1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else /* !__fibre */ 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif /* !__fibre */ 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 715 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 716 #endif /* fibre or NON-sparc platforms */ 717 #if ((defined(__sparc) && !defined(__fibre)) ||\ 718 (defined(__i386) || defined(__amd64))) 719 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 720 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 721 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 722 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 723 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 731 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 732 &symbios_properties }, 733 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 734 &lsi_properties_scsi }, 735 #if defined(__i386) || defined(__amd64) 736 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 737 | SD_CONF_BSET_READSUB_BCD 738 | SD_CONF_BSET_READ_TOC_ADDR_BCD 739 | SD_CONF_BSET_NO_READ_HEADER 740 | SD_CONF_BSET_READ_CD_XD4), NULL }, 741 742 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 743 | SD_CONF_BSET_READSUB_BCD 744 | SD_CONF_BSET_READ_TOC_ADDR_BCD 745 | SD_CONF_BSET_NO_READ_HEADER 746 | SD_CONF_BSET_READ_CD_XD4), NULL }, 747 #endif /* __i386 || __amd64 */ 748 #endif /* sparc NON-fibre or NON-sparc platforms */ 749 750 #if (defined(SD_PROP_TST)) 751 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 752 | SD_CONF_BSET_CTYPE 753 | SD_CONF_BSET_NRR_COUNT 754 | SD_CONF_BSET_FAB_DEVID 755 | SD_CONF_BSET_NOCACHE 756 | SD_CONF_BSET_BSY_RETRY_COUNT 757 | SD_CONF_BSET_PLAYMSF_BCD 758 | SD_CONF_BSET_READSUB_BCD 759 | SD_CONF_BSET_READ_TOC_TRK_BCD 760 | SD_CONF_BSET_READ_TOC_ADDR_BCD 761 | SD_CONF_BSET_NO_READ_HEADER 762 | SD_CONF_BSET_READ_CD_XD4 763 | SD_CONF_BSET_RST_RETRIES 764 | SD_CONF_BSET_RSV_REL_TIME 765 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 766 #endif 767 }; 768 769 static const int sd_disk_table_size = 770 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 771 772 773 774 #define SD_INTERCONNECT_PARALLEL 0 775 #define SD_INTERCONNECT_FABRIC 1 776 #define SD_INTERCONNECT_FIBRE 2 777 #define SD_INTERCONNECT_SSA 3 778 #define SD_INTERCONNECT_SATA 4 779 #define SD_INTERCONNECT_SAS 5 780 781 #define SD_IS_PARALLEL_SCSI(un) \ 782 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 783 #define SD_IS_SERIAL(un) \ 784 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 785 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 786 787 /* 788 * Definitions used by device id registration routines 789 */ 790 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 791 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 792 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 793 794 static kmutex_t sd_sense_mutex = {0}; 795 796 /* 797 * Macros for updates of the driver state 798 */ 799 #define New_state(un, s) \ 800 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 801 #define Restore_state(un) \ 802 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 803 804 static struct sd_cdbinfo sd_cdbtab[] = { 805 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 806 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 807 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 808 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 809 }; 810 811 /* 812 * Specifies the number of seconds that must have elapsed since the last 813 * cmd. has completed for a device to be declared idle to the PM framework. 814 */ 815 static int sd_pm_idletime = 1; 816 817 /* 818 * Internal function prototypes 819 */ 820 821 #if (defined(__fibre)) 822 /* 823 * These #defines are to avoid namespace collisions that occur because this 824 * code is currently used to compile two separate driver modules: sd and ssd. 825 * All function names need to be treated this way (even if declared static) 826 * in order to allow the debugger to resolve the names properly. 827 * It is anticipated that in the near future the ssd module will be obsoleted, 828 * at which time this ugliness should go away. 829 */ 830 #define sd_log_trace ssd_log_trace 831 #define sd_log_info ssd_log_info 832 #define sd_log_err ssd_log_err 833 #define sdprobe ssdprobe 834 #define sdinfo ssdinfo 835 #define sd_prop_op ssd_prop_op 836 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 837 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 838 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 839 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 840 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 841 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 842 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 843 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 844 #define sd_spin_up_unit ssd_spin_up_unit 845 #define sd_enable_descr_sense ssd_enable_descr_sense 846 #define sd_reenable_dsense_task ssd_reenable_dsense_task 847 #define sd_set_mmc_caps ssd_set_mmc_caps 848 #define sd_read_unit_properties ssd_read_unit_properties 849 #define sd_process_sdconf_file ssd_process_sdconf_file 850 #define sd_process_sdconf_table ssd_process_sdconf_table 851 #define sd_sdconf_id_match ssd_sdconf_id_match 852 #define sd_blank_cmp ssd_blank_cmp 853 #define sd_chk_vers1_data ssd_chk_vers1_data 854 #define sd_set_vers1_properties ssd_set_vers1_properties 855 856 #define sd_get_physical_geometry ssd_get_physical_geometry 857 #define sd_get_virtual_geometry ssd_get_virtual_geometry 858 #define sd_update_block_info ssd_update_block_info 859 #define sd_register_devid ssd_register_devid 860 #define sd_get_devid ssd_get_devid 861 #define sd_create_devid ssd_create_devid 862 #define sd_write_deviceid ssd_write_deviceid 863 #define sd_check_vpd_page_support ssd_check_vpd_page_support 864 #define sd_setup_pm ssd_setup_pm 865 #define sd_create_pm_components ssd_create_pm_components 866 #define sd_ddi_suspend ssd_ddi_suspend 867 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 868 #define sd_ddi_resume ssd_ddi_resume 869 #define sd_ddi_pm_resume ssd_ddi_pm_resume 870 #define sdpower ssdpower 871 #define sdattach ssdattach 872 #define sddetach ssddetach 873 #define sd_unit_attach ssd_unit_attach 874 #define sd_unit_detach ssd_unit_detach 875 #define sd_set_unit_attributes ssd_set_unit_attributes 876 #define sd_create_errstats ssd_create_errstats 877 #define sd_set_errstats ssd_set_errstats 878 #define sd_set_pstats ssd_set_pstats 879 #define sddump ssddump 880 #define sd_scsi_poll ssd_scsi_poll 881 #define sd_send_polled_RQS ssd_send_polled_RQS 882 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 883 #define sd_init_event_callbacks ssd_init_event_callbacks 884 #define sd_event_callback ssd_event_callback 885 #define sd_cache_control ssd_cache_control 886 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 887 #define sd_get_nv_sup ssd_get_nv_sup 888 #define sd_make_device ssd_make_device 889 #define sdopen ssdopen 890 #define sdclose ssdclose 891 #define sd_ready_and_valid ssd_ready_and_valid 892 #define sdmin ssdmin 893 #define sdread ssdread 894 #define sdwrite ssdwrite 895 #define sdaread ssdaread 896 #define sdawrite ssdawrite 897 #define sdstrategy ssdstrategy 898 #define sdioctl ssdioctl 899 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 900 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 901 #define sd_checksum_iostart ssd_checksum_iostart 902 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 903 #define sd_pm_iostart ssd_pm_iostart 904 #define sd_core_iostart ssd_core_iostart 905 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 906 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 907 #define sd_checksum_iodone ssd_checksum_iodone 908 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 909 #define sd_pm_iodone ssd_pm_iodone 910 #define sd_initpkt_for_buf ssd_initpkt_for_buf 911 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 912 #define sd_setup_rw_pkt ssd_setup_rw_pkt 913 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 914 #define sd_buf_iodone ssd_buf_iodone 915 #define sd_uscsi_strategy ssd_uscsi_strategy 916 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 917 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 918 #define sd_uscsi_iodone ssd_uscsi_iodone 919 #define sd_xbuf_strategy ssd_xbuf_strategy 920 #define sd_xbuf_init ssd_xbuf_init 921 #define sd_pm_entry ssd_pm_entry 922 #define sd_pm_exit ssd_pm_exit 923 924 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 925 #define sd_pm_timeout_handler ssd_pm_timeout_handler 926 927 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 928 #define sdintr ssdintr 929 #define sd_start_cmds ssd_start_cmds 930 #define sd_send_scsi_cmd ssd_send_scsi_cmd 931 #define sd_bioclone_alloc ssd_bioclone_alloc 932 #define sd_bioclone_free ssd_bioclone_free 933 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 934 #define sd_shadow_buf_free ssd_shadow_buf_free 935 #define sd_print_transport_rejected_message \ 936 ssd_print_transport_rejected_message 937 #define sd_retry_command ssd_retry_command 938 #define sd_set_retry_bp ssd_set_retry_bp 939 #define sd_send_request_sense_command ssd_send_request_sense_command 940 #define sd_start_retry_command ssd_start_retry_command 941 #define sd_start_direct_priority_command \ 942 ssd_start_direct_priority_command 943 #define sd_return_failed_command ssd_return_failed_command 944 #define sd_return_failed_command_no_restart \ 945 ssd_return_failed_command_no_restart 946 #define sd_return_command ssd_return_command 947 #define sd_sync_with_callback ssd_sync_with_callback 948 #define sdrunout ssdrunout 949 #define sd_mark_rqs_busy ssd_mark_rqs_busy 950 #define sd_mark_rqs_idle ssd_mark_rqs_idle 951 #define sd_reduce_throttle ssd_reduce_throttle 952 #define sd_restore_throttle ssd_restore_throttle 953 #define sd_print_incomplete_msg ssd_print_incomplete_msg 954 #define sd_init_cdb_limits ssd_init_cdb_limits 955 #define sd_pkt_status_good ssd_pkt_status_good 956 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 957 #define sd_pkt_status_busy ssd_pkt_status_busy 958 #define sd_pkt_status_reservation_conflict \ 959 ssd_pkt_status_reservation_conflict 960 #define sd_pkt_status_qfull ssd_pkt_status_qfull 961 #define sd_handle_request_sense ssd_handle_request_sense 962 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 963 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 964 #define sd_validate_sense_data ssd_validate_sense_data 965 #define sd_decode_sense ssd_decode_sense 966 #define sd_print_sense_msg ssd_print_sense_msg 967 #define sd_sense_key_no_sense ssd_sense_key_no_sense 968 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 969 #define sd_sense_key_not_ready ssd_sense_key_not_ready 970 #define sd_sense_key_medium_or_hardware_error \ 971 ssd_sense_key_medium_or_hardware_error 972 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 973 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 974 #define sd_sense_key_fail_command ssd_sense_key_fail_command 975 #define sd_sense_key_blank_check ssd_sense_key_blank_check 976 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 977 #define sd_sense_key_default ssd_sense_key_default 978 #define sd_print_retry_msg ssd_print_retry_msg 979 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 980 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 981 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 982 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 983 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 984 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 985 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 986 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 987 #define sd_pkt_reason_default ssd_pkt_reason_default 988 #define sd_reset_target ssd_reset_target 989 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 990 #define sd_start_stop_unit_task ssd_start_stop_unit_task 991 #define sd_taskq_create ssd_taskq_create 992 #define sd_taskq_delete ssd_taskq_delete 993 #define sd_target_change_task ssd_target_change_task 994 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 995 #define sd_media_change_task ssd_media_change_task 996 #define sd_handle_mchange ssd_handle_mchange 997 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 998 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 999 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1000 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1001 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1002 sd_send_scsi_feature_GET_CONFIGURATION 1003 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1004 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1005 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1006 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1007 ssd_send_scsi_PERSISTENT_RESERVE_IN 1008 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1009 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1010 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1011 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1012 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1013 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1014 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1015 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1016 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1017 #define sd_alloc_rqs ssd_alloc_rqs 1018 #define sd_free_rqs ssd_free_rqs 1019 #define sd_dump_memory ssd_dump_memory 1020 #define sd_get_media_info ssd_get_media_info 1021 #define sd_get_media_info_ext ssd_get_media_info_ext 1022 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1023 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1024 #define sd_strtok_r ssd_strtok_r 1025 #define sd_set_properties ssd_set_properties 1026 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1027 #define sd_setup_next_xfer ssd_setup_next_xfer 1028 #define sd_dkio_get_temp ssd_dkio_get_temp 1029 #define sd_check_mhd ssd_check_mhd 1030 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1031 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1032 #define sd_sname ssd_sname 1033 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1034 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1035 #define sd_take_ownership ssd_take_ownership 1036 #define sd_reserve_release ssd_reserve_release 1037 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1038 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1039 #define sd_persistent_reservation_in_read_keys \ 1040 ssd_persistent_reservation_in_read_keys 1041 #define sd_persistent_reservation_in_read_resv \ 1042 ssd_persistent_reservation_in_read_resv 1043 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1044 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1045 #define sd_mhdioc_release ssd_mhdioc_release 1046 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1047 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1048 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1049 #define sr_change_blkmode ssr_change_blkmode 1050 #define sr_change_speed ssr_change_speed 1051 #define sr_atapi_change_speed ssr_atapi_change_speed 1052 #define sr_pause_resume ssr_pause_resume 1053 #define sr_play_msf ssr_play_msf 1054 #define sr_play_trkind ssr_play_trkind 1055 #define sr_read_all_subcodes ssr_read_all_subcodes 1056 #define sr_read_subchannel ssr_read_subchannel 1057 #define sr_read_tocentry ssr_read_tocentry 1058 #define sr_read_tochdr ssr_read_tochdr 1059 #define sr_read_cdda ssr_read_cdda 1060 #define sr_read_cdxa ssr_read_cdxa 1061 #define sr_read_mode1 ssr_read_mode1 1062 #define sr_read_mode2 ssr_read_mode2 1063 #define sr_read_cd_mode2 ssr_read_cd_mode2 1064 #define sr_sector_mode ssr_sector_mode 1065 #define sr_eject ssr_eject 1066 #define sr_ejected ssr_ejected 1067 #define sr_check_wp ssr_check_wp 1068 #define sd_check_media ssd_check_media 1069 #define sd_media_watch_cb ssd_media_watch_cb 1070 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1071 #define sr_volume_ctrl ssr_volume_ctrl 1072 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1073 #define sd_log_page_supported ssd_log_page_supported 1074 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1075 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1076 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1077 #define sd_range_lock ssd_range_lock 1078 #define sd_get_range ssd_get_range 1079 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1080 #define sd_range_unlock ssd_range_unlock 1081 #define sd_read_modify_write_task ssd_read_modify_write_task 1082 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1083 1084 #define sd_iostart_chain ssd_iostart_chain 1085 #define sd_iodone_chain ssd_iodone_chain 1086 #define sd_initpkt_map ssd_initpkt_map 1087 #define sd_destroypkt_map ssd_destroypkt_map 1088 #define sd_chain_type_map ssd_chain_type_map 1089 #define sd_chain_index_map ssd_chain_index_map 1090 1091 #define sd_failfast_flushctl ssd_failfast_flushctl 1092 #define sd_failfast_flushq ssd_failfast_flushq 1093 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1094 1095 #define sd_is_lsi ssd_is_lsi 1096 #define sd_tg_rdwr ssd_tg_rdwr 1097 #define sd_tg_getinfo ssd_tg_getinfo 1098 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1099 1100 #endif /* #if (defined(__fibre)) */ 1101 1102 1103 int _init(void); 1104 int _fini(void); 1105 int _info(struct modinfo *modinfop); 1106 1107 /*PRINTFLIKE3*/ 1108 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1109 /*PRINTFLIKE3*/ 1110 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1111 /*PRINTFLIKE3*/ 1112 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1113 1114 static int sdprobe(dev_info_t *devi); 1115 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1116 void **result); 1117 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1118 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1119 1120 /* 1121 * Smart probe for parallel scsi 1122 */ 1123 static void sd_scsi_probe_cache_init(void); 1124 static void sd_scsi_probe_cache_fini(void); 1125 static void sd_scsi_clear_probe_cache(void); 1126 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1127 1128 /* 1129 * Attached luns on target for parallel scsi 1130 */ 1131 static void sd_scsi_target_lun_init(void); 1132 static void sd_scsi_target_lun_fini(void); 1133 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1134 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1135 1136 static int sd_spin_up_unit(sd_ssc_t *ssc); 1137 1138 /* 1139 * Using sd_ssc_init to establish sd_ssc_t struct 1140 * Using sd_ssc_send to send uscsi internal command 1141 * Using sd_ssc_fini to free sd_ssc_t struct 1142 */ 1143 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1144 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1145 int flag, enum uio_seg dataspace, int path_flag); 1146 static void sd_ssc_fini(sd_ssc_t *ssc); 1147 1148 /* 1149 * Using sd_ssc_assessment to set correct type-of-assessment 1150 * Using sd_ssc_post to post ereport & system log 1151 * sd_ssc_post will call sd_ssc_print to print system log 1152 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1153 */ 1154 static void sd_ssc_assessment(sd_ssc_t *ssc, 1155 enum sd_type_assessment tp_assess); 1156 1157 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1158 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1159 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1160 enum sd_driver_assessment drv_assess); 1161 1162 /* 1163 * Using sd_ssc_set_info to mark an un-decodable-data error. 1164 * Using sd_ssc_extract_info to transfer information from internal 1165 * data structures to sd_ssc_t. 1166 */ 1167 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1168 const char *fmt, ...); 1169 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1170 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1171 1172 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1173 enum uio_seg dataspace, int path_flag); 1174 1175 #ifdef _LP64 1176 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1177 static void sd_reenable_dsense_task(void *arg); 1178 #endif /* _LP64 */ 1179 1180 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1181 1182 static void sd_read_unit_properties(struct sd_lun *un); 1183 static int sd_process_sdconf_file(struct sd_lun *un); 1184 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1185 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1186 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1187 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1188 int *data_list, sd_tunables *values); 1189 static void sd_process_sdconf_table(struct sd_lun *un); 1190 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1191 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1192 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1193 int list_len, char *dataname_ptr); 1194 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1195 sd_tunables *prop_list); 1196 1197 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1198 int reservation_flag); 1199 static int sd_get_devid(sd_ssc_t *ssc); 1200 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1201 static int sd_write_deviceid(sd_ssc_t *ssc); 1202 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1203 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1204 1205 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1206 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1207 1208 static int sd_ddi_suspend(dev_info_t *devi); 1209 static int sd_ddi_pm_suspend(struct sd_lun *un); 1210 static int sd_ddi_resume(dev_info_t *devi); 1211 static int sd_ddi_pm_resume(struct sd_lun *un); 1212 static int sdpower(dev_info_t *devi, int component, int level); 1213 1214 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1215 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1216 static int sd_unit_attach(dev_info_t *devi); 1217 static int sd_unit_detach(dev_info_t *devi); 1218 1219 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1220 static void sd_create_errstats(struct sd_lun *un, int instance); 1221 static void sd_set_errstats(struct sd_lun *un); 1222 static void sd_set_pstats(struct sd_lun *un); 1223 1224 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1225 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1226 static int sd_send_polled_RQS(struct sd_lun *un); 1227 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1228 1229 #if (defined(__fibre)) 1230 /* 1231 * Event callbacks (photon) 1232 */ 1233 static void sd_init_event_callbacks(struct sd_lun *un); 1234 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1235 #endif 1236 1237 /* 1238 * Defines for sd_cache_control 1239 */ 1240 1241 #define SD_CACHE_ENABLE 1 1242 #define SD_CACHE_DISABLE 0 1243 #define SD_CACHE_NOCHANGE -1 1244 1245 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1246 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1247 static void sd_get_nv_sup(sd_ssc_t *ssc); 1248 static dev_t sd_make_device(dev_info_t *devi); 1249 1250 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1251 uint64_t capacity); 1252 1253 /* 1254 * Driver entry point functions. 1255 */ 1256 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1257 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1258 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1259 1260 static void sdmin(struct buf *bp); 1261 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1262 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1263 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1264 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1265 1266 static int sdstrategy(struct buf *bp); 1267 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1268 1269 /* 1270 * Function prototypes for layering functions in the iostart chain. 1271 */ 1272 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1273 struct buf *bp); 1274 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1275 struct buf *bp); 1276 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1277 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1278 struct buf *bp); 1279 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1280 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1281 1282 /* 1283 * Function prototypes for layering functions in the iodone chain. 1284 */ 1285 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1287 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1288 struct buf *bp); 1289 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1290 struct buf *bp); 1291 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1292 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1293 struct buf *bp); 1294 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1295 1296 /* 1297 * Prototypes for functions to support buf(9S) based IO. 1298 */ 1299 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1300 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1301 static void sd_destroypkt_for_buf(struct buf *); 1302 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1303 struct buf *bp, int flags, 1304 int (*callback)(caddr_t), caddr_t callback_arg, 1305 diskaddr_t lba, uint32_t blockcount); 1306 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1307 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1308 1309 /* 1310 * Prototypes for functions to support USCSI IO. 1311 */ 1312 static int sd_uscsi_strategy(struct buf *bp); 1313 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1314 static void sd_destroypkt_for_uscsi(struct buf *); 1315 1316 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1317 uchar_t chain_type, void *pktinfop); 1318 1319 static int sd_pm_entry(struct sd_lun *un); 1320 static void sd_pm_exit(struct sd_lun *un); 1321 1322 static void sd_pm_idletimeout_handler(void *arg); 1323 1324 /* 1325 * sd_core internal functions (used at the sd_core_io layer). 1326 */ 1327 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1328 static void sdintr(struct scsi_pkt *pktp); 1329 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1330 1331 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1332 enum uio_seg dataspace, int path_flag); 1333 1334 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1335 daddr_t blkno, int (*func)(struct buf *)); 1336 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1337 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1338 static void sd_bioclone_free(struct buf *bp); 1339 static void sd_shadow_buf_free(struct buf *bp); 1340 1341 static void sd_print_transport_rejected_message(struct sd_lun *un, 1342 struct sd_xbuf *xp, int code); 1343 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1344 void *arg, int code); 1345 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1346 void *arg, int code); 1347 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1348 void *arg, int code); 1349 1350 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1351 int retry_check_flag, 1352 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1353 int c), 1354 void *user_arg, int failure_code, clock_t retry_delay, 1355 void (*statp)(kstat_io_t *)); 1356 1357 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1358 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1359 1360 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1361 struct scsi_pkt *pktp); 1362 static void sd_start_retry_command(void *arg); 1363 static void sd_start_direct_priority_command(void *arg); 1364 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1365 int errcode); 1366 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1367 struct buf *bp, int errcode); 1368 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1369 static void sd_sync_with_callback(struct sd_lun *un); 1370 static int sdrunout(caddr_t arg); 1371 1372 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1373 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1374 1375 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1376 static void sd_restore_throttle(void *arg); 1377 1378 static void sd_init_cdb_limits(struct sd_lun *un); 1379 1380 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 1383 /* 1384 * Error handling functions 1385 */ 1386 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1389 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1390 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1391 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1392 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1393 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1394 1395 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1398 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1399 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1400 struct sd_xbuf *xp, size_t actual_len); 1401 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1402 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1403 1404 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1405 void *arg, int code); 1406 1407 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1408 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_not_ready(struct sd_lun *un, 1413 uint8_t *sense_datap, 1414 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1415 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1416 uint8_t *sense_datap, 1417 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1418 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1419 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1420 static void sd_sense_key_unit_attention(struct sd_lun *un, 1421 uint8_t *sense_datap, 1422 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1423 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1424 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1425 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1426 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1427 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1428 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1429 static void sd_sense_key_default(struct sd_lun *un, 1430 uint8_t *sense_datap, 1431 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1432 1433 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1434 void *arg, int flag); 1435 1436 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1438 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1447 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1448 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1449 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1450 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1451 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1452 1453 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1454 1455 static void sd_start_stop_unit_callback(void *arg); 1456 static void sd_start_stop_unit_task(void *arg); 1457 1458 static void sd_taskq_create(void); 1459 static void sd_taskq_delete(void); 1460 static void sd_target_change_task(void *arg); 1461 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1462 static void sd_media_change_task(void *arg); 1463 1464 static int sd_handle_mchange(struct sd_lun *un); 1465 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1466 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1467 uint32_t *lbap, int path_flag); 1468 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1469 uint32_t *lbap, uint32_t *psp, int path_flag); 1470 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1471 int path_flag); 1472 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1473 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1474 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1475 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1476 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1477 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1478 uchar_t usr_cmd, uchar_t *usr_bufp); 1479 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1480 struct dk_callback *dkc); 1481 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1482 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1483 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1484 uchar_t *bufaddr, uint_t buflen, int path_flag); 1485 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1486 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1487 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1488 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1489 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1490 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1491 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1492 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1493 size_t buflen, daddr_t start_block, int path_flag); 1494 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1495 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1496 path_flag) 1497 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1498 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1499 path_flag) 1500 1501 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1502 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1503 uint16_t param_ptr, int path_flag); 1504 1505 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1506 static void sd_free_rqs(struct sd_lun *un); 1507 1508 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1509 uchar_t *data, int len, int fmt); 1510 static void sd_panic_for_res_conflict(struct sd_lun *un); 1511 1512 /* 1513 * Disk Ioctl Function Prototypes 1514 */ 1515 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1516 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1517 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1518 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1519 1520 /* 1521 * Multi-host Ioctl Prototypes 1522 */ 1523 static int sd_check_mhd(dev_t dev, int interval); 1524 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1525 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1526 static char *sd_sname(uchar_t status); 1527 static void sd_mhd_resvd_recover(void *arg); 1528 static void sd_resv_reclaim_thread(); 1529 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1530 static int sd_reserve_release(dev_t dev, int cmd); 1531 static void sd_rmv_resv_reclaim_req(dev_t dev); 1532 static void sd_mhd_reset_notify_cb(caddr_t arg); 1533 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1534 mhioc_inkeys_t *usrp, int flag); 1535 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1536 mhioc_inresvs_t *usrp, int flag); 1537 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1538 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1539 static int sd_mhdioc_release(dev_t dev); 1540 static int sd_mhdioc_register_devid(dev_t dev); 1541 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1542 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1543 1544 /* 1545 * SCSI removable prototypes 1546 */ 1547 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1548 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1549 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1550 static int sr_pause_resume(dev_t dev, int mode); 1551 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1552 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1555 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1556 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1557 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1558 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1559 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1560 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1561 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1562 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1563 static int sr_eject(dev_t dev); 1564 static void sr_ejected(register struct sd_lun *un); 1565 static int sr_check_wp(dev_t dev); 1566 static int sd_check_media(dev_t dev, enum dkio_state state); 1567 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1568 static void sd_delayed_cv_broadcast(void *arg); 1569 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1570 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1571 1572 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1573 1574 /* 1575 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1576 */ 1577 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1578 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1579 static void sd_wm_cache_destructor(void *wm, void *un); 1580 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1581 daddr_t endb, ushort_t typ); 1582 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1583 daddr_t endb); 1584 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1585 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1586 static void sd_read_modify_write_task(void * arg); 1587 static int 1588 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1589 struct buf **bpp); 1590 1591 1592 /* 1593 * Function prototypes for failfast support. 1594 */ 1595 static void sd_failfast_flushq(struct sd_lun *un); 1596 static int sd_failfast_flushq_callback(struct buf *bp); 1597 1598 /* 1599 * Function prototypes to check for lsi devices 1600 */ 1601 static void sd_is_lsi(struct sd_lun *un); 1602 1603 /* 1604 * Function prototypes for partial DMA support 1605 */ 1606 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1607 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1608 1609 1610 /* Function prototypes for cmlb */ 1611 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1612 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1613 1614 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1615 1616 /* 1617 * For printing RMW warning message timely 1618 */ 1619 static void sd_rmw_msg_print_handler(void *arg); 1620 1621 /* 1622 * Constants for failfast support: 1623 * 1624 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1625 * failfast processing being performed. 1626 * 1627 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1628 * failfast processing on all bufs with B_FAILFAST set. 1629 */ 1630 1631 #define SD_FAILFAST_INACTIVE 0 1632 #define SD_FAILFAST_ACTIVE 1 1633 1634 /* 1635 * Bitmask to control behavior of buf(9S) flushes when a transition to 1636 * the failfast state occurs. Optional bits include: 1637 * 1638 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1639 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1640 * be flushed. 1641 * 1642 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1643 * driver, in addition to the regular wait queue. This includes the xbuf 1644 * queues. When clear, only the driver's wait queue will be flushed. 1645 */ 1646 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1647 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1648 1649 /* 1650 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1651 * to flush all queues within the driver. 1652 */ 1653 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1654 1655 1656 /* 1657 * SD Testing Fault Injection 1658 */ 1659 #ifdef SD_FAULT_INJECTION 1660 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1661 static void sd_faultinjection(struct scsi_pkt *pktp); 1662 static void sd_injection_log(char *buf, struct sd_lun *un); 1663 #endif 1664 1665 /* 1666 * Device driver ops vector 1667 */ 1668 static struct cb_ops sd_cb_ops = { 1669 sdopen, /* open */ 1670 sdclose, /* close */ 1671 sdstrategy, /* strategy */ 1672 nodev, /* print */ 1673 sddump, /* dump */ 1674 sdread, /* read */ 1675 sdwrite, /* write */ 1676 sdioctl, /* ioctl */ 1677 nodev, /* devmap */ 1678 nodev, /* mmap */ 1679 nodev, /* segmap */ 1680 nochpoll, /* poll */ 1681 sd_prop_op, /* cb_prop_op */ 1682 0, /* streamtab */ 1683 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1684 CB_REV, /* cb_rev */ 1685 sdaread, /* async I/O read entry point */ 1686 sdawrite /* async I/O write entry point */ 1687 }; 1688 1689 struct dev_ops sd_ops = { 1690 DEVO_REV, /* devo_rev, */ 1691 0, /* refcnt */ 1692 sdinfo, /* info */ 1693 nulldev, /* identify */ 1694 sdprobe, /* probe */ 1695 sdattach, /* attach */ 1696 sddetach, /* detach */ 1697 nodev, /* reset */ 1698 &sd_cb_ops, /* driver operations */ 1699 NULL, /* bus operations */ 1700 sdpower, /* power */ 1701 ddi_quiesce_not_needed, /* quiesce */ 1702 }; 1703 1704 /* 1705 * This is the loadable module wrapper. 1706 */ 1707 #include <sys/modctl.h> 1708 1709 #ifndef XPV_HVM_DRIVER 1710 static struct modldrv modldrv = { 1711 &mod_driverops, /* Type of module. This one is a driver */ 1712 SD_MODULE_NAME, /* Module name. */ 1713 &sd_ops /* driver ops */ 1714 }; 1715 1716 static struct modlinkage modlinkage = { 1717 MODREV_1, &modldrv, NULL 1718 }; 1719 1720 #else /* XPV_HVM_DRIVER */ 1721 static struct modlmisc modlmisc = { 1722 &mod_miscops, /* Type of module. This one is a misc */ 1723 "HVM " SD_MODULE_NAME, /* Module name. */ 1724 }; 1725 1726 static struct modlinkage modlinkage = { 1727 MODREV_1, &modlmisc, NULL 1728 }; 1729 1730 #endif /* XPV_HVM_DRIVER */ 1731 1732 static cmlb_tg_ops_t sd_tgops = { 1733 TG_DK_OPS_VERSION_1, 1734 sd_tg_rdwr, 1735 sd_tg_getinfo 1736 }; 1737 1738 static struct scsi_asq_key_strings sd_additional_codes[] = { 1739 0x81, 0, "Logical Unit is Reserved", 1740 0x85, 0, "Audio Address Not Valid", 1741 0xb6, 0, "Media Load Mechanism Failed", 1742 0xB9, 0, "Audio Play Operation Aborted", 1743 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1744 0x53, 2, "Medium removal prevented", 1745 0x6f, 0, "Authentication failed during key exchange", 1746 0x6f, 1, "Key not present", 1747 0x6f, 2, "Key not established", 1748 0x6f, 3, "Read without proper authentication", 1749 0x6f, 4, "Mismatched region to this logical unit", 1750 0x6f, 5, "Region reset count error", 1751 0xffff, 0x0, NULL 1752 }; 1753 1754 1755 /* 1756 * Struct for passing printing information for sense data messages 1757 */ 1758 struct sd_sense_info { 1759 int ssi_severity; 1760 int ssi_pfa_flag; 1761 }; 1762 1763 /* 1764 * Table of function pointers for iostart-side routines. Separate "chains" 1765 * of layered function calls are formed by placing the function pointers 1766 * sequentially in the desired order. Functions are called according to an 1767 * incrementing table index ordering. The last function in each chain must 1768 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1769 * in the sd_iodone_chain[] array. 1770 * 1771 * Note: It may seem more natural to organize both the iostart and iodone 1772 * functions together, into an array of structures (or some similar 1773 * organization) with a common index, rather than two separate arrays which 1774 * must be maintained in synchronization. The purpose of this division is 1775 * to achieve improved performance: individual arrays allows for more 1776 * effective cache line utilization on certain platforms. 1777 */ 1778 1779 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1780 1781 1782 static sd_chain_t sd_iostart_chain[] = { 1783 1784 /* Chain for buf IO for disk drive targets (PM enabled) */ 1785 sd_mapblockaddr_iostart, /* Index: 0 */ 1786 sd_pm_iostart, /* Index: 1 */ 1787 sd_core_iostart, /* Index: 2 */ 1788 1789 /* Chain for buf IO for disk drive targets (PM disabled) */ 1790 sd_mapblockaddr_iostart, /* Index: 3 */ 1791 sd_core_iostart, /* Index: 4 */ 1792 1793 /* 1794 * Chain for buf IO for removable-media or large sector size 1795 * disk drive targets with RMW needed (PM enabled) 1796 */ 1797 sd_mapblockaddr_iostart, /* Index: 5 */ 1798 sd_mapblocksize_iostart, /* Index: 6 */ 1799 sd_pm_iostart, /* Index: 7 */ 1800 sd_core_iostart, /* Index: 8 */ 1801 1802 /* 1803 * Chain for buf IO for removable-media or large sector size 1804 * disk drive targets with RMW needed (PM disabled) 1805 */ 1806 sd_mapblockaddr_iostart, /* Index: 9 */ 1807 sd_mapblocksize_iostart, /* Index: 10 */ 1808 sd_core_iostart, /* Index: 11 */ 1809 1810 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1811 sd_mapblockaddr_iostart, /* Index: 12 */ 1812 sd_checksum_iostart, /* Index: 13 */ 1813 sd_pm_iostart, /* Index: 14 */ 1814 sd_core_iostart, /* Index: 15 */ 1815 1816 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1817 sd_mapblockaddr_iostart, /* Index: 16 */ 1818 sd_checksum_iostart, /* Index: 17 */ 1819 sd_core_iostart, /* Index: 18 */ 1820 1821 /* Chain for USCSI commands (all targets) */ 1822 sd_pm_iostart, /* Index: 19 */ 1823 sd_core_iostart, /* Index: 20 */ 1824 1825 /* Chain for checksumming USCSI commands (all targets) */ 1826 sd_checksum_uscsi_iostart, /* Index: 21 */ 1827 sd_pm_iostart, /* Index: 22 */ 1828 sd_core_iostart, /* Index: 23 */ 1829 1830 /* Chain for "direct" USCSI commands (all targets) */ 1831 sd_core_iostart, /* Index: 24 */ 1832 1833 /* Chain for "direct priority" USCSI commands (all targets) */ 1834 sd_core_iostart, /* Index: 25 */ 1835 1836 /* 1837 * Chain for buf IO for large sector size disk drive targets 1838 * with RMW needed with checksumming (PM enabled) 1839 */ 1840 sd_mapblockaddr_iostart, /* Index: 26 */ 1841 sd_mapblocksize_iostart, /* Index: 27 */ 1842 sd_checksum_iostart, /* Index: 28 */ 1843 sd_pm_iostart, /* Index: 29 */ 1844 sd_core_iostart, /* Index: 30 */ 1845 1846 /* 1847 * Chain for buf IO for large sector size disk drive targets 1848 * with RMW needed with checksumming (PM disabled) 1849 */ 1850 sd_mapblockaddr_iostart, /* Index: 31 */ 1851 sd_mapblocksize_iostart, /* Index: 32 */ 1852 sd_checksum_iostart, /* Index: 33 */ 1853 sd_core_iostart, /* Index: 34 */ 1854 1855 }; 1856 1857 /* 1858 * Macros to locate the first function of each iostart chain in the 1859 * sd_iostart_chain[] array. These are located by the index in the array. 1860 */ 1861 #define SD_CHAIN_DISK_IOSTART 0 1862 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1863 #define SD_CHAIN_MSS_DISK_IOSTART 5 1864 #define SD_CHAIN_RMMEDIA_IOSTART 5 1865 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1866 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1867 #define SD_CHAIN_CHKSUM_IOSTART 12 1868 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1869 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1870 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1871 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1872 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1873 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1874 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1875 1876 1877 /* 1878 * Table of function pointers for the iodone-side routines for the driver- 1879 * internal layering mechanism. The calling sequence for iodone routines 1880 * uses a decrementing table index, so the last routine called in a chain 1881 * must be at the lowest array index location for that chain. The last 1882 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1883 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1884 * of the functions in an iodone side chain must correspond to the ordering 1885 * of the iostart routines for that chain. Note that there is no iodone 1886 * side routine that corresponds to sd_core_iostart(), so there is no 1887 * entry in the table for this. 1888 */ 1889 1890 static sd_chain_t sd_iodone_chain[] = { 1891 1892 /* Chain for buf IO for disk drive targets (PM enabled) */ 1893 sd_buf_iodone, /* Index: 0 */ 1894 sd_mapblockaddr_iodone, /* Index: 1 */ 1895 sd_pm_iodone, /* Index: 2 */ 1896 1897 /* Chain for buf IO for disk drive targets (PM disabled) */ 1898 sd_buf_iodone, /* Index: 3 */ 1899 sd_mapblockaddr_iodone, /* Index: 4 */ 1900 1901 /* 1902 * Chain for buf IO for removable-media or large sector size 1903 * disk drive targets with RMW needed (PM enabled) 1904 */ 1905 sd_buf_iodone, /* Index: 5 */ 1906 sd_mapblockaddr_iodone, /* Index: 6 */ 1907 sd_mapblocksize_iodone, /* Index: 7 */ 1908 sd_pm_iodone, /* Index: 8 */ 1909 1910 /* 1911 * Chain for buf IO for removable-media or large sector size 1912 * disk drive targets with RMW needed (PM disabled) 1913 */ 1914 sd_buf_iodone, /* Index: 9 */ 1915 sd_mapblockaddr_iodone, /* Index: 10 */ 1916 sd_mapblocksize_iodone, /* Index: 11 */ 1917 1918 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1919 sd_buf_iodone, /* Index: 12 */ 1920 sd_mapblockaddr_iodone, /* Index: 13 */ 1921 sd_checksum_iodone, /* Index: 14 */ 1922 sd_pm_iodone, /* Index: 15 */ 1923 1924 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1925 sd_buf_iodone, /* Index: 16 */ 1926 sd_mapblockaddr_iodone, /* Index: 17 */ 1927 sd_checksum_iodone, /* Index: 18 */ 1928 1929 /* Chain for USCSI commands (non-checksum targets) */ 1930 sd_uscsi_iodone, /* Index: 19 */ 1931 sd_pm_iodone, /* Index: 20 */ 1932 1933 /* Chain for USCSI commands (checksum targets) */ 1934 sd_uscsi_iodone, /* Index: 21 */ 1935 sd_checksum_uscsi_iodone, /* Index: 22 */ 1936 sd_pm_iodone, /* Index: 22 */ 1937 1938 /* Chain for "direct" USCSI commands (all targets) */ 1939 sd_uscsi_iodone, /* Index: 24 */ 1940 1941 /* Chain for "direct priority" USCSI commands (all targets) */ 1942 sd_uscsi_iodone, /* Index: 25 */ 1943 1944 /* 1945 * Chain for buf IO for large sector size disk drive targets 1946 * with checksumming (PM enabled) 1947 */ 1948 sd_buf_iodone, /* Index: 26 */ 1949 sd_mapblockaddr_iodone, /* Index: 27 */ 1950 sd_mapblocksize_iodone, /* Index: 28 */ 1951 sd_checksum_iodone, /* Index: 29 */ 1952 sd_pm_iodone, /* Index: 30 */ 1953 1954 /* 1955 * Chain for buf IO for large sector size disk drive targets 1956 * with checksumming (PM disabled) 1957 */ 1958 sd_buf_iodone, /* Index: 31 */ 1959 sd_mapblockaddr_iodone, /* Index: 32 */ 1960 sd_mapblocksize_iodone, /* Index: 33 */ 1961 sd_checksum_iodone, /* Index: 34 */ 1962 }; 1963 1964 1965 /* 1966 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1967 * each iodone-side chain. These are located by the array index, but as the 1968 * iodone side functions are called in a decrementing-index order, the 1969 * highest index number in each chain must be specified (as these correspond 1970 * to the first function in the iodone chain that will be called by the core 1971 * at IO completion time). 1972 */ 1973 1974 #define SD_CHAIN_DISK_IODONE 2 1975 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1976 #define SD_CHAIN_RMMEDIA_IODONE 8 1977 #define SD_CHAIN_MSS_DISK_IODONE 8 1978 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1979 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 1980 #define SD_CHAIN_CHKSUM_IODONE 15 1981 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1982 #define SD_CHAIN_USCSI_CMD_IODONE 20 1983 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1984 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1985 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1986 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 1987 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 1988 1989 1990 1991 /* 1992 * Array to map a layering chain index to the appropriate initpkt routine. 1993 * The redundant entries are present so that the index used for accessing 1994 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1995 * with this table as well. 1996 */ 1997 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1998 1999 static sd_initpkt_t sd_initpkt_map[] = { 2000 2001 /* Chain for buf IO for disk drive targets (PM enabled) */ 2002 sd_initpkt_for_buf, /* Index: 0 */ 2003 sd_initpkt_for_buf, /* Index: 1 */ 2004 sd_initpkt_for_buf, /* Index: 2 */ 2005 2006 /* Chain for buf IO for disk drive targets (PM disabled) */ 2007 sd_initpkt_for_buf, /* Index: 3 */ 2008 sd_initpkt_for_buf, /* Index: 4 */ 2009 2010 /* 2011 * Chain for buf IO for removable-media or large sector size 2012 * disk drive targets (PM enabled) 2013 */ 2014 sd_initpkt_for_buf, /* Index: 5 */ 2015 sd_initpkt_for_buf, /* Index: 6 */ 2016 sd_initpkt_for_buf, /* Index: 7 */ 2017 sd_initpkt_for_buf, /* Index: 8 */ 2018 2019 /* 2020 * Chain for buf IO for removable-media or large sector size 2021 * disk drive targets (PM disabled) 2022 */ 2023 sd_initpkt_for_buf, /* Index: 9 */ 2024 sd_initpkt_for_buf, /* Index: 10 */ 2025 sd_initpkt_for_buf, /* Index: 11 */ 2026 2027 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2028 sd_initpkt_for_buf, /* Index: 12 */ 2029 sd_initpkt_for_buf, /* Index: 13 */ 2030 sd_initpkt_for_buf, /* Index: 14 */ 2031 sd_initpkt_for_buf, /* Index: 15 */ 2032 2033 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2034 sd_initpkt_for_buf, /* Index: 16 */ 2035 sd_initpkt_for_buf, /* Index: 17 */ 2036 sd_initpkt_for_buf, /* Index: 18 */ 2037 2038 /* Chain for USCSI commands (non-checksum targets) */ 2039 sd_initpkt_for_uscsi, /* Index: 19 */ 2040 sd_initpkt_for_uscsi, /* Index: 20 */ 2041 2042 /* Chain for USCSI commands (checksum targets) */ 2043 sd_initpkt_for_uscsi, /* Index: 21 */ 2044 sd_initpkt_for_uscsi, /* Index: 22 */ 2045 sd_initpkt_for_uscsi, /* Index: 22 */ 2046 2047 /* Chain for "direct" USCSI commands (all targets) */ 2048 sd_initpkt_for_uscsi, /* Index: 24 */ 2049 2050 /* Chain for "direct priority" USCSI commands (all targets) */ 2051 sd_initpkt_for_uscsi, /* Index: 25 */ 2052 2053 /* 2054 * Chain for buf IO for large sector size disk drive targets 2055 * with checksumming (PM enabled) 2056 */ 2057 sd_initpkt_for_buf, /* Index: 26 */ 2058 sd_initpkt_for_buf, /* Index: 27 */ 2059 sd_initpkt_for_buf, /* Index: 28 */ 2060 sd_initpkt_for_buf, /* Index: 29 */ 2061 sd_initpkt_for_buf, /* Index: 30 */ 2062 2063 /* 2064 * Chain for buf IO for large sector size disk drive targets 2065 * with checksumming (PM disabled) 2066 */ 2067 sd_initpkt_for_buf, /* Index: 31 */ 2068 sd_initpkt_for_buf, /* Index: 32 */ 2069 sd_initpkt_for_buf, /* Index: 33 */ 2070 sd_initpkt_for_buf, /* Index: 34 */ 2071 }; 2072 2073 2074 /* 2075 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2076 * The redundant entries are present so that the index used for accessing 2077 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2078 * with this table as well. 2079 */ 2080 typedef void (*sd_destroypkt_t)(struct buf *); 2081 2082 static sd_destroypkt_t sd_destroypkt_map[] = { 2083 2084 /* Chain for buf IO for disk drive targets (PM enabled) */ 2085 sd_destroypkt_for_buf, /* Index: 0 */ 2086 sd_destroypkt_for_buf, /* Index: 1 */ 2087 sd_destroypkt_for_buf, /* Index: 2 */ 2088 2089 /* Chain for buf IO for disk drive targets (PM disabled) */ 2090 sd_destroypkt_for_buf, /* Index: 3 */ 2091 sd_destroypkt_for_buf, /* Index: 4 */ 2092 2093 /* 2094 * Chain for buf IO for removable-media or large sector size 2095 * disk drive targets (PM enabled) 2096 */ 2097 sd_destroypkt_for_buf, /* Index: 5 */ 2098 sd_destroypkt_for_buf, /* Index: 6 */ 2099 sd_destroypkt_for_buf, /* Index: 7 */ 2100 sd_destroypkt_for_buf, /* Index: 8 */ 2101 2102 /* 2103 * Chain for buf IO for removable-media or large sector size 2104 * disk drive targets (PM disabled) 2105 */ 2106 sd_destroypkt_for_buf, /* Index: 9 */ 2107 sd_destroypkt_for_buf, /* Index: 10 */ 2108 sd_destroypkt_for_buf, /* Index: 11 */ 2109 2110 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2111 sd_destroypkt_for_buf, /* Index: 12 */ 2112 sd_destroypkt_for_buf, /* Index: 13 */ 2113 sd_destroypkt_for_buf, /* Index: 14 */ 2114 sd_destroypkt_for_buf, /* Index: 15 */ 2115 2116 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2117 sd_destroypkt_for_buf, /* Index: 16 */ 2118 sd_destroypkt_for_buf, /* Index: 17 */ 2119 sd_destroypkt_for_buf, /* Index: 18 */ 2120 2121 /* Chain for USCSI commands (non-checksum targets) */ 2122 sd_destroypkt_for_uscsi, /* Index: 19 */ 2123 sd_destroypkt_for_uscsi, /* Index: 20 */ 2124 2125 /* Chain for USCSI commands (checksum targets) */ 2126 sd_destroypkt_for_uscsi, /* Index: 21 */ 2127 sd_destroypkt_for_uscsi, /* Index: 22 */ 2128 sd_destroypkt_for_uscsi, /* Index: 22 */ 2129 2130 /* Chain for "direct" USCSI commands (all targets) */ 2131 sd_destroypkt_for_uscsi, /* Index: 24 */ 2132 2133 /* Chain for "direct priority" USCSI commands (all targets) */ 2134 sd_destroypkt_for_uscsi, /* Index: 25 */ 2135 2136 /* 2137 * Chain for buf IO for large sector size disk drive targets 2138 * with checksumming (PM disabled) 2139 */ 2140 sd_destroypkt_for_buf, /* Index: 26 */ 2141 sd_destroypkt_for_buf, /* Index: 27 */ 2142 sd_destroypkt_for_buf, /* Index: 28 */ 2143 sd_destroypkt_for_buf, /* Index: 29 */ 2144 sd_destroypkt_for_buf, /* Index: 30 */ 2145 2146 /* 2147 * Chain for buf IO for large sector size disk drive targets 2148 * with checksumming (PM enabled) 2149 */ 2150 sd_destroypkt_for_buf, /* Index: 31 */ 2151 sd_destroypkt_for_buf, /* Index: 32 */ 2152 sd_destroypkt_for_buf, /* Index: 33 */ 2153 sd_destroypkt_for_buf, /* Index: 34 */ 2154 }; 2155 2156 2157 2158 /* 2159 * Array to map a layering chain index to the appropriate chain "type". 2160 * The chain type indicates a specific property/usage of the chain. 2161 * The redundant entries are present so that the index used for accessing 2162 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2163 * with this table as well. 2164 */ 2165 2166 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2167 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2168 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2169 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2170 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2171 /* (for error recovery) */ 2172 2173 static int sd_chain_type_map[] = { 2174 2175 /* Chain for buf IO for disk drive targets (PM enabled) */ 2176 SD_CHAIN_BUFIO, /* Index: 0 */ 2177 SD_CHAIN_BUFIO, /* Index: 1 */ 2178 SD_CHAIN_BUFIO, /* Index: 2 */ 2179 2180 /* Chain for buf IO for disk drive targets (PM disabled) */ 2181 SD_CHAIN_BUFIO, /* Index: 3 */ 2182 SD_CHAIN_BUFIO, /* Index: 4 */ 2183 2184 /* 2185 * Chain for buf IO for removable-media or large sector size 2186 * disk drive targets (PM enabled) 2187 */ 2188 SD_CHAIN_BUFIO, /* Index: 5 */ 2189 SD_CHAIN_BUFIO, /* Index: 6 */ 2190 SD_CHAIN_BUFIO, /* Index: 7 */ 2191 SD_CHAIN_BUFIO, /* Index: 8 */ 2192 2193 /* 2194 * Chain for buf IO for removable-media or large sector size 2195 * disk drive targets (PM disabled) 2196 */ 2197 SD_CHAIN_BUFIO, /* Index: 9 */ 2198 SD_CHAIN_BUFIO, /* Index: 10 */ 2199 SD_CHAIN_BUFIO, /* Index: 11 */ 2200 2201 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2202 SD_CHAIN_BUFIO, /* Index: 12 */ 2203 SD_CHAIN_BUFIO, /* Index: 13 */ 2204 SD_CHAIN_BUFIO, /* Index: 14 */ 2205 SD_CHAIN_BUFIO, /* Index: 15 */ 2206 2207 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2208 SD_CHAIN_BUFIO, /* Index: 16 */ 2209 SD_CHAIN_BUFIO, /* Index: 17 */ 2210 SD_CHAIN_BUFIO, /* Index: 18 */ 2211 2212 /* Chain for USCSI commands (non-checksum targets) */ 2213 SD_CHAIN_USCSI, /* Index: 19 */ 2214 SD_CHAIN_USCSI, /* Index: 20 */ 2215 2216 /* Chain for USCSI commands (checksum targets) */ 2217 SD_CHAIN_USCSI, /* Index: 21 */ 2218 SD_CHAIN_USCSI, /* Index: 22 */ 2219 SD_CHAIN_USCSI, /* Index: 23 */ 2220 2221 /* Chain for "direct" USCSI commands (all targets) */ 2222 SD_CHAIN_DIRECT, /* Index: 24 */ 2223 2224 /* Chain for "direct priority" USCSI commands (all targets) */ 2225 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2226 2227 /* 2228 * Chain for buf IO for large sector size disk drive targets 2229 * with checksumming (PM enabled) 2230 */ 2231 SD_CHAIN_BUFIO, /* Index: 26 */ 2232 SD_CHAIN_BUFIO, /* Index: 27 */ 2233 SD_CHAIN_BUFIO, /* Index: 28 */ 2234 SD_CHAIN_BUFIO, /* Index: 29 */ 2235 SD_CHAIN_BUFIO, /* Index: 30 */ 2236 2237 /* 2238 * Chain for buf IO for large sector size disk drive targets 2239 * with checksumming (PM disabled) 2240 */ 2241 SD_CHAIN_BUFIO, /* Index: 31 */ 2242 SD_CHAIN_BUFIO, /* Index: 32 */ 2243 SD_CHAIN_BUFIO, /* Index: 33 */ 2244 SD_CHAIN_BUFIO, /* Index: 34 */ 2245 }; 2246 2247 2248 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2249 #define SD_IS_BUFIO(xp) \ 2250 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2251 2252 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2253 #define SD_IS_DIRECT_PRIORITY(xp) \ 2254 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2255 2256 2257 2258 /* 2259 * Struct, array, and macros to map a specific chain to the appropriate 2260 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2261 * 2262 * The sd_chain_index_map[] array is used at attach time to set the various 2263 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2264 * chain to be used with the instance. This allows different instances to use 2265 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2266 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2267 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2268 * dynamically & without the use of locking; and (2) a layer may update the 2269 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2270 * to allow for deferred processing of an IO within the same chain from a 2271 * different execution context. 2272 */ 2273 2274 struct sd_chain_index { 2275 int sci_iostart_index; 2276 int sci_iodone_index; 2277 }; 2278 2279 static struct sd_chain_index sd_chain_index_map[] = { 2280 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2281 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2282 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2283 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2284 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2285 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2286 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2287 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2288 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2289 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2290 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2291 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2292 2293 }; 2294 2295 2296 /* 2297 * The following are indexes into the sd_chain_index_map[] array. 2298 */ 2299 2300 /* un->un_buf_chain_type must be set to one of these */ 2301 #define SD_CHAIN_INFO_DISK 0 2302 #define SD_CHAIN_INFO_DISK_NO_PM 1 2303 #define SD_CHAIN_INFO_RMMEDIA 2 2304 #define SD_CHAIN_INFO_MSS_DISK 2 2305 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2306 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2307 #define SD_CHAIN_INFO_CHKSUM 4 2308 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2309 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2310 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2311 2312 /* un->un_uscsi_chain_type must be set to one of these */ 2313 #define SD_CHAIN_INFO_USCSI_CMD 6 2314 /* USCSI with PM disabled is the same as DIRECT */ 2315 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2316 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2317 2318 /* un->un_direct_chain_type must be set to one of these */ 2319 #define SD_CHAIN_INFO_DIRECT_CMD 8 2320 2321 /* un->un_priority_chain_type must be set to one of these */ 2322 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2323 2324 /* size for devid inquiries */ 2325 #define MAX_INQUIRY_SIZE 0xF0 2326 2327 /* 2328 * Macros used by functions to pass a given buf(9S) struct along to the 2329 * next function in the layering chain for further processing. 2330 * 2331 * In the following macros, passing more than three arguments to the called 2332 * routines causes the optimizer for the SPARC compiler to stop doing tail 2333 * call elimination which results in significant performance degradation. 2334 */ 2335 #define SD_BEGIN_IOSTART(index, un, bp) \ 2336 ((*(sd_iostart_chain[index]))(index, un, bp)) 2337 2338 #define SD_BEGIN_IODONE(index, un, bp) \ 2339 ((*(sd_iodone_chain[index]))(index, un, bp)) 2340 2341 #define SD_NEXT_IOSTART(index, un, bp) \ 2342 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2343 2344 #define SD_NEXT_IODONE(index, un, bp) \ 2345 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2346 2347 /* 2348 * Function: _init 2349 * 2350 * Description: This is the driver _init(9E) entry point. 2351 * 2352 * Return Code: Returns the value from mod_install(9F) or 2353 * ddi_soft_state_init(9F) as appropriate. 2354 * 2355 * Context: Called when driver module loaded. 2356 */ 2357 2358 int 2359 _init(void) 2360 { 2361 int err; 2362 2363 /* establish driver name from module name */ 2364 sd_label = (char *)mod_modname(&modlinkage); 2365 2366 #ifndef XPV_HVM_DRIVER 2367 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2368 SD_MAXUNIT); 2369 if (err != 0) { 2370 return (err); 2371 } 2372 2373 #else /* XPV_HVM_DRIVER */ 2374 /* Remove the leading "hvm_" from the module name */ 2375 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2376 sd_label += strlen("hvm_"); 2377 2378 #endif /* XPV_HVM_DRIVER */ 2379 2380 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2381 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2382 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2383 2384 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2385 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2386 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2387 2388 /* 2389 * it's ok to init here even for fibre device 2390 */ 2391 sd_scsi_probe_cache_init(); 2392 2393 sd_scsi_target_lun_init(); 2394 2395 /* 2396 * Creating taskq before mod_install ensures that all callers (threads) 2397 * that enter the module after a successful mod_install encounter 2398 * a valid taskq. 2399 */ 2400 sd_taskq_create(); 2401 2402 err = mod_install(&modlinkage); 2403 if (err != 0) { 2404 /* delete taskq if install fails */ 2405 sd_taskq_delete(); 2406 2407 mutex_destroy(&sd_detach_mutex); 2408 mutex_destroy(&sd_log_mutex); 2409 mutex_destroy(&sd_label_mutex); 2410 2411 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2412 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2413 cv_destroy(&sd_tr.srq_inprocess_cv); 2414 2415 sd_scsi_probe_cache_fini(); 2416 2417 sd_scsi_target_lun_fini(); 2418 2419 #ifndef XPV_HVM_DRIVER 2420 ddi_soft_state_fini(&sd_state); 2421 #endif /* !XPV_HVM_DRIVER */ 2422 return (err); 2423 } 2424 2425 return (err); 2426 } 2427 2428 2429 /* 2430 * Function: _fini 2431 * 2432 * Description: This is the driver _fini(9E) entry point. 2433 * 2434 * Return Code: Returns the value from mod_remove(9F) 2435 * 2436 * Context: Called when driver module is unloaded. 2437 */ 2438 2439 int 2440 _fini(void) 2441 { 2442 int err; 2443 2444 if ((err = mod_remove(&modlinkage)) != 0) { 2445 return (err); 2446 } 2447 2448 sd_taskq_delete(); 2449 2450 mutex_destroy(&sd_detach_mutex); 2451 mutex_destroy(&sd_log_mutex); 2452 mutex_destroy(&sd_label_mutex); 2453 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2454 2455 sd_scsi_probe_cache_fini(); 2456 2457 sd_scsi_target_lun_fini(); 2458 2459 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2460 cv_destroy(&sd_tr.srq_inprocess_cv); 2461 2462 #ifndef XPV_HVM_DRIVER 2463 ddi_soft_state_fini(&sd_state); 2464 #endif /* !XPV_HVM_DRIVER */ 2465 2466 return (err); 2467 } 2468 2469 2470 /* 2471 * Function: _info 2472 * 2473 * Description: This is the driver _info(9E) entry point. 2474 * 2475 * Arguments: modinfop - pointer to the driver modinfo structure 2476 * 2477 * Return Code: Returns the value from mod_info(9F). 2478 * 2479 * Context: Kernel thread context 2480 */ 2481 2482 int 2483 _info(struct modinfo *modinfop) 2484 { 2485 return (mod_info(&modlinkage, modinfop)); 2486 } 2487 2488 2489 /* 2490 * The following routines implement the driver message logging facility. 2491 * They provide component- and level- based debug output filtering. 2492 * Output may also be restricted to messages for a single instance by 2493 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2494 * to NULL, then messages for all instances are printed. 2495 * 2496 * These routines have been cloned from each other due to the language 2497 * constraints of macros and variable argument list processing. 2498 */ 2499 2500 2501 /* 2502 * Function: sd_log_err 2503 * 2504 * Description: This routine is called by the SD_ERROR macro for debug 2505 * logging of error conditions. 2506 * 2507 * Arguments: comp - driver component being logged 2508 * dev - pointer to driver info structure 2509 * fmt - error string and format to be logged 2510 */ 2511 2512 static void 2513 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2514 { 2515 va_list ap; 2516 dev_info_t *dev; 2517 2518 ASSERT(un != NULL); 2519 dev = SD_DEVINFO(un); 2520 ASSERT(dev != NULL); 2521 2522 /* 2523 * Filter messages based on the global component and level masks. 2524 * Also print if un matches the value of sd_debug_un, or if 2525 * sd_debug_un is set to NULL. 2526 */ 2527 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2528 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2529 mutex_enter(&sd_log_mutex); 2530 va_start(ap, fmt); 2531 (void) vsprintf(sd_log_buf, fmt, ap); 2532 va_end(ap); 2533 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2534 mutex_exit(&sd_log_mutex); 2535 } 2536 #ifdef SD_FAULT_INJECTION 2537 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2538 if (un->sd_injection_mask & comp) { 2539 mutex_enter(&sd_log_mutex); 2540 va_start(ap, fmt); 2541 (void) vsprintf(sd_log_buf, fmt, ap); 2542 va_end(ap); 2543 sd_injection_log(sd_log_buf, un); 2544 mutex_exit(&sd_log_mutex); 2545 } 2546 #endif 2547 } 2548 2549 2550 /* 2551 * Function: sd_log_info 2552 * 2553 * Description: This routine is called by the SD_INFO macro for debug 2554 * logging of general purpose informational conditions. 2555 * 2556 * Arguments: comp - driver component being logged 2557 * dev - pointer to driver info structure 2558 * fmt - info string and format to be logged 2559 */ 2560 2561 static void 2562 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2563 { 2564 va_list ap; 2565 dev_info_t *dev; 2566 2567 ASSERT(un != NULL); 2568 dev = SD_DEVINFO(un); 2569 ASSERT(dev != NULL); 2570 2571 /* 2572 * Filter messages based on the global component and level masks. 2573 * Also print if un matches the value of sd_debug_un, or if 2574 * sd_debug_un is set to NULL. 2575 */ 2576 if ((sd_component_mask & component) && 2577 (sd_level_mask & SD_LOGMASK_INFO) && 2578 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2579 mutex_enter(&sd_log_mutex); 2580 va_start(ap, fmt); 2581 (void) vsprintf(sd_log_buf, fmt, ap); 2582 va_end(ap); 2583 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2584 mutex_exit(&sd_log_mutex); 2585 } 2586 #ifdef SD_FAULT_INJECTION 2587 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2588 if (un->sd_injection_mask & component) { 2589 mutex_enter(&sd_log_mutex); 2590 va_start(ap, fmt); 2591 (void) vsprintf(sd_log_buf, fmt, ap); 2592 va_end(ap); 2593 sd_injection_log(sd_log_buf, un); 2594 mutex_exit(&sd_log_mutex); 2595 } 2596 #endif 2597 } 2598 2599 2600 /* 2601 * Function: sd_log_trace 2602 * 2603 * Description: This routine is called by the SD_TRACE macro for debug 2604 * logging of trace conditions (i.e. function entry/exit). 2605 * 2606 * Arguments: comp - driver component being logged 2607 * dev - pointer to driver info structure 2608 * fmt - trace string and format to be logged 2609 */ 2610 2611 static void 2612 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2613 { 2614 va_list ap; 2615 dev_info_t *dev; 2616 2617 ASSERT(un != NULL); 2618 dev = SD_DEVINFO(un); 2619 ASSERT(dev != NULL); 2620 2621 /* 2622 * Filter messages based on the global component and level masks. 2623 * Also print if un matches the value of sd_debug_un, or if 2624 * sd_debug_un is set to NULL. 2625 */ 2626 if ((sd_component_mask & component) && 2627 (sd_level_mask & SD_LOGMASK_TRACE) && 2628 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2629 mutex_enter(&sd_log_mutex); 2630 va_start(ap, fmt); 2631 (void) vsprintf(sd_log_buf, fmt, ap); 2632 va_end(ap); 2633 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2634 mutex_exit(&sd_log_mutex); 2635 } 2636 #ifdef SD_FAULT_INJECTION 2637 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2638 if (un->sd_injection_mask & component) { 2639 mutex_enter(&sd_log_mutex); 2640 va_start(ap, fmt); 2641 (void) vsprintf(sd_log_buf, fmt, ap); 2642 va_end(ap); 2643 sd_injection_log(sd_log_buf, un); 2644 mutex_exit(&sd_log_mutex); 2645 } 2646 #endif 2647 } 2648 2649 2650 /* 2651 * Function: sdprobe 2652 * 2653 * Description: This is the driver probe(9e) entry point function. 2654 * 2655 * Arguments: devi - opaque device info handle 2656 * 2657 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2658 * DDI_PROBE_FAILURE: If the probe failed. 2659 * DDI_PROBE_PARTIAL: If the instance is not present now, 2660 * but may be present in the future. 2661 */ 2662 2663 static int 2664 sdprobe(dev_info_t *devi) 2665 { 2666 struct scsi_device *devp; 2667 int rval; 2668 #ifndef XPV_HVM_DRIVER 2669 int instance = ddi_get_instance(devi); 2670 #endif /* !XPV_HVM_DRIVER */ 2671 2672 /* 2673 * if it wasn't for pln, sdprobe could actually be nulldev 2674 * in the "__fibre" case. 2675 */ 2676 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2677 return (DDI_PROBE_DONTCARE); 2678 } 2679 2680 devp = ddi_get_driver_private(devi); 2681 2682 if (devp == NULL) { 2683 /* Ooops... nexus driver is mis-configured... */ 2684 return (DDI_PROBE_FAILURE); 2685 } 2686 2687 #ifndef XPV_HVM_DRIVER 2688 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2689 return (DDI_PROBE_PARTIAL); 2690 } 2691 #endif /* !XPV_HVM_DRIVER */ 2692 2693 /* 2694 * Call the SCSA utility probe routine to see if we actually 2695 * have a target at this SCSI nexus. 2696 */ 2697 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2698 case SCSIPROBE_EXISTS: 2699 switch (devp->sd_inq->inq_dtype) { 2700 case DTYPE_DIRECT: 2701 rval = DDI_PROBE_SUCCESS; 2702 break; 2703 case DTYPE_RODIRECT: 2704 /* CDs etc. Can be removable media */ 2705 rval = DDI_PROBE_SUCCESS; 2706 break; 2707 case DTYPE_OPTICAL: 2708 /* 2709 * Rewritable optical driver HP115AA 2710 * Can also be removable media 2711 */ 2712 2713 /* 2714 * Do not attempt to bind to DTYPE_OPTICAL if 2715 * pre solaris 9 sparc sd behavior is required 2716 * 2717 * If first time through and sd_dtype_optical_bind 2718 * has not been set in /etc/system check properties 2719 */ 2720 2721 if (sd_dtype_optical_bind < 0) { 2722 sd_dtype_optical_bind = ddi_prop_get_int 2723 (DDI_DEV_T_ANY, devi, 0, 2724 "optical-device-bind", 1); 2725 } 2726 2727 if (sd_dtype_optical_bind == 0) { 2728 rval = DDI_PROBE_FAILURE; 2729 } else { 2730 rval = DDI_PROBE_SUCCESS; 2731 } 2732 break; 2733 2734 case DTYPE_NOTPRESENT: 2735 default: 2736 rval = DDI_PROBE_FAILURE; 2737 break; 2738 } 2739 break; 2740 default: 2741 rval = DDI_PROBE_PARTIAL; 2742 break; 2743 } 2744 2745 /* 2746 * This routine checks for resource allocation prior to freeing, 2747 * so it will take care of the "smart probing" case where a 2748 * scsi_probe() may or may not have been issued and will *not* 2749 * free previously-freed resources. 2750 */ 2751 scsi_unprobe(devp); 2752 return (rval); 2753 } 2754 2755 2756 /* 2757 * Function: sdinfo 2758 * 2759 * Description: This is the driver getinfo(9e) entry point function. 2760 * Given the device number, return the devinfo pointer from 2761 * the scsi_device structure or the instance number 2762 * associated with the dev_t. 2763 * 2764 * Arguments: dip - pointer to device info structure 2765 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2766 * DDI_INFO_DEVT2INSTANCE) 2767 * arg - driver dev_t 2768 * resultp - user buffer for request response 2769 * 2770 * Return Code: DDI_SUCCESS 2771 * DDI_FAILURE 2772 */ 2773 /* ARGSUSED */ 2774 static int 2775 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2776 { 2777 struct sd_lun *un; 2778 dev_t dev; 2779 int instance; 2780 int error; 2781 2782 switch (infocmd) { 2783 case DDI_INFO_DEVT2DEVINFO: 2784 dev = (dev_t)arg; 2785 instance = SDUNIT(dev); 2786 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2787 return (DDI_FAILURE); 2788 } 2789 *result = (void *) SD_DEVINFO(un); 2790 error = DDI_SUCCESS; 2791 break; 2792 case DDI_INFO_DEVT2INSTANCE: 2793 dev = (dev_t)arg; 2794 instance = SDUNIT(dev); 2795 *result = (void *)(uintptr_t)instance; 2796 error = DDI_SUCCESS; 2797 break; 2798 default: 2799 error = DDI_FAILURE; 2800 } 2801 return (error); 2802 } 2803 2804 /* 2805 * Function: sd_prop_op 2806 * 2807 * Description: This is the driver prop_op(9e) entry point function. 2808 * Return the number of blocks for the partition in question 2809 * or forward the request to the property facilities. 2810 * 2811 * Arguments: dev - device number 2812 * dip - pointer to device info structure 2813 * prop_op - property operator 2814 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2815 * name - pointer to property name 2816 * valuep - pointer or address of the user buffer 2817 * lengthp - property length 2818 * 2819 * Return Code: DDI_PROP_SUCCESS 2820 * DDI_PROP_NOT_FOUND 2821 * DDI_PROP_UNDEFINED 2822 * DDI_PROP_NO_MEMORY 2823 * DDI_PROP_BUF_TOO_SMALL 2824 */ 2825 2826 static int 2827 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2828 char *name, caddr_t valuep, int *lengthp) 2829 { 2830 struct sd_lun *un; 2831 2832 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2833 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2834 name, valuep, lengthp)); 2835 2836 return (cmlb_prop_op(un->un_cmlbhandle, 2837 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2838 SDPART(dev), (void *)SD_PATH_DIRECT)); 2839 } 2840 2841 /* 2842 * The following functions are for smart probing: 2843 * sd_scsi_probe_cache_init() 2844 * sd_scsi_probe_cache_fini() 2845 * sd_scsi_clear_probe_cache() 2846 * sd_scsi_probe_with_cache() 2847 */ 2848 2849 /* 2850 * Function: sd_scsi_probe_cache_init 2851 * 2852 * Description: Initializes the probe response cache mutex and head pointer. 2853 * 2854 * Context: Kernel thread context 2855 */ 2856 2857 static void 2858 sd_scsi_probe_cache_init(void) 2859 { 2860 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2861 sd_scsi_probe_cache_head = NULL; 2862 } 2863 2864 2865 /* 2866 * Function: sd_scsi_probe_cache_fini 2867 * 2868 * Description: Frees all resources associated with the probe response cache. 2869 * 2870 * Context: Kernel thread context 2871 */ 2872 2873 static void 2874 sd_scsi_probe_cache_fini(void) 2875 { 2876 struct sd_scsi_probe_cache *cp; 2877 struct sd_scsi_probe_cache *ncp; 2878 2879 /* Clean up our smart probing linked list */ 2880 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2881 ncp = cp->next; 2882 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2883 } 2884 sd_scsi_probe_cache_head = NULL; 2885 mutex_destroy(&sd_scsi_probe_cache_mutex); 2886 } 2887 2888 2889 /* 2890 * Function: sd_scsi_clear_probe_cache 2891 * 2892 * Description: This routine clears the probe response cache. This is 2893 * done when open() returns ENXIO so that when deferred 2894 * attach is attempted (possibly after a device has been 2895 * turned on) we will retry the probe. Since we don't know 2896 * which target we failed to open, we just clear the 2897 * entire cache. 2898 * 2899 * Context: Kernel thread context 2900 */ 2901 2902 static void 2903 sd_scsi_clear_probe_cache(void) 2904 { 2905 struct sd_scsi_probe_cache *cp; 2906 int i; 2907 2908 mutex_enter(&sd_scsi_probe_cache_mutex); 2909 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2910 /* 2911 * Reset all entries to SCSIPROBE_EXISTS. This will 2912 * force probing to be performed the next time 2913 * sd_scsi_probe_with_cache is called. 2914 */ 2915 for (i = 0; i < NTARGETS_WIDE; i++) { 2916 cp->cache[i] = SCSIPROBE_EXISTS; 2917 } 2918 } 2919 mutex_exit(&sd_scsi_probe_cache_mutex); 2920 } 2921 2922 2923 /* 2924 * Function: sd_scsi_probe_with_cache 2925 * 2926 * Description: This routine implements support for a scsi device probe 2927 * with cache. The driver maintains a cache of the target 2928 * responses to scsi probes. If we get no response from a 2929 * target during a probe inquiry, we remember that, and we 2930 * avoid additional calls to scsi_probe on non-zero LUNs 2931 * on the same target until the cache is cleared. By doing 2932 * so we avoid the 1/4 sec selection timeout for nonzero 2933 * LUNs. lun0 of a target is always probed. 2934 * 2935 * Arguments: devp - Pointer to a scsi_device(9S) structure 2936 * waitfunc - indicates what the allocator routines should 2937 * do when resources are not available. This value 2938 * is passed on to scsi_probe() when that routine 2939 * is called. 2940 * 2941 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2942 * otherwise the value returned by scsi_probe(9F). 2943 * 2944 * Context: Kernel thread context 2945 */ 2946 2947 static int 2948 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2949 { 2950 struct sd_scsi_probe_cache *cp; 2951 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2952 int lun, tgt; 2953 2954 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2955 SCSI_ADDR_PROP_LUN, 0); 2956 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2957 SCSI_ADDR_PROP_TARGET, -1); 2958 2959 /* Make sure caching enabled and target in range */ 2960 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2961 /* do it the old way (no cache) */ 2962 return (scsi_probe(devp, waitfn)); 2963 } 2964 2965 mutex_enter(&sd_scsi_probe_cache_mutex); 2966 2967 /* Find the cache for this scsi bus instance */ 2968 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2969 if (cp->pdip == pdip) { 2970 break; 2971 } 2972 } 2973 2974 /* If we can't find a cache for this pdip, create one */ 2975 if (cp == NULL) { 2976 int i; 2977 2978 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2979 KM_SLEEP); 2980 cp->pdip = pdip; 2981 cp->next = sd_scsi_probe_cache_head; 2982 sd_scsi_probe_cache_head = cp; 2983 for (i = 0; i < NTARGETS_WIDE; i++) { 2984 cp->cache[i] = SCSIPROBE_EXISTS; 2985 } 2986 } 2987 2988 mutex_exit(&sd_scsi_probe_cache_mutex); 2989 2990 /* Recompute the cache for this target if LUN zero */ 2991 if (lun == 0) { 2992 cp->cache[tgt] = SCSIPROBE_EXISTS; 2993 } 2994 2995 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2996 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2997 return (SCSIPROBE_NORESP); 2998 } 2999 3000 /* Do the actual probe; save & return the result */ 3001 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3002 } 3003 3004 3005 /* 3006 * Function: sd_scsi_target_lun_init 3007 * 3008 * Description: Initializes the attached lun chain mutex and head pointer. 3009 * 3010 * Context: Kernel thread context 3011 */ 3012 3013 static void 3014 sd_scsi_target_lun_init(void) 3015 { 3016 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3017 sd_scsi_target_lun_head = NULL; 3018 } 3019 3020 3021 /* 3022 * Function: sd_scsi_target_lun_fini 3023 * 3024 * Description: Frees all resources associated with the attached lun 3025 * chain 3026 * 3027 * Context: Kernel thread context 3028 */ 3029 3030 static void 3031 sd_scsi_target_lun_fini(void) 3032 { 3033 struct sd_scsi_hba_tgt_lun *cp; 3034 struct sd_scsi_hba_tgt_lun *ncp; 3035 3036 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3037 ncp = cp->next; 3038 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3039 } 3040 sd_scsi_target_lun_head = NULL; 3041 mutex_destroy(&sd_scsi_target_lun_mutex); 3042 } 3043 3044 3045 /* 3046 * Function: sd_scsi_get_target_lun_count 3047 * 3048 * Description: This routine will check in the attached lun chain to see 3049 * how many luns are attached on the required SCSI controller 3050 * and target. Currently, some capabilities like tagged queue 3051 * are supported per target based by HBA. So all luns in a 3052 * target have the same capabilities. Based on this assumption, 3053 * sd should only set these capabilities once per target. This 3054 * function is called when sd needs to decide how many luns 3055 * already attached on a target. 3056 * 3057 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3058 * controller device. 3059 * target - The target ID on the controller's SCSI bus. 3060 * 3061 * Return Code: The number of luns attached on the required target and 3062 * controller. 3063 * -1 if target ID is not in parallel SCSI scope or the given 3064 * dip is not in the chain. 3065 * 3066 * Context: Kernel thread context 3067 */ 3068 3069 static int 3070 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3071 { 3072 struct sd_scsi_hba_tgt_lun *cp; 3073 3074 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3075 return (-1); 3076 } 3077 3078 mutex_enter(&sd_scsi_target_lun_mutex); 3079 3080 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3081 if (cp->pdip == dip) { 3082 break; 3083 } 3084 } 3085 3086 mutex_exit(&sd_scsi_target_lun_mutex); 3087 3088 if (cp == NULL) { 3089 return (-1); 3090 } 3091 3092 return (cp->nlun[target]); 3093 } 3094 3095 3096 /* 3097 * Function: sd_scsi_update_lun_on_target 3098 * 3099 * Description: This routine is used to update the attached lun chain when a 3100 * lun is attached or detached on a target. 3101 * 3102 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3103 * controller device. 3104 * target - The target ID on the controller's SCSI bus. 3105 * flag - Indicate the lun is attached or detached. 3106 * 3107 * Context: Kernel thread context 3108 */ 3109 3110 static void 3111 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3112 { 3113 struct sd_scsi_hba_tgt_lun *cp; 3114 3115 mutex_enter(&sd_scsi_target_lun_mutex); 3116 3117 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3118 if (cp->pdip == dip) { 3119 break; 3120 } 3121 } 3122 3123 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3124 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3125 KM_SLEEP); 3126 cp->pdip = dip; 3127 cp->next = sd_scsi_target_lun_head; 3128 sd_scsi_target_lun_head = cp; 3129 } 3130 3131 mutex_exit(&sd_scsi_target_lun_mutex); 3132 3133 if (cp != NULL) { 3134 if (flag == SD_SCSI_LUN_ATTACH) { 3135 cp->nlun[target] ++; 3136 } else { 3137 cp->nlun[target] --; 3138 } 3139 } 3140 } 3141 3142 3143 /* 3144 * Function: sd_spin_up_unit 3145 * 3146 * Description: Issues the following commands to spin-up the device: 3147 * START STOP UNIT, and INQUIRY. 3148 * 3149 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3150 * structure for this target. 3151 * 3152 * Return Code: 0 - success 3153 * EIO - failure 3154 * EACCES - reservation conflict 3155 * 3156 * Context: Kernel thread context 3157 */ 3158 3159 static int 3160 sd_spin_up_unit(sd_ssc_t *ssc) 3161 { 3162 size_t resid = 0; 3163 int has_conflict = FALSE; 3164 uchar_t *bufaddr; 3165 int status; 3166 struct sd_lun *un; 3167 3168 ASSERT(ssc != NULL); 3169 un = ssc->ssc_un; 3170 ASSERT(un != NULL); 3171 3172 /* 3173 * Send a throwaway START UNIT command. 3174 * 3175 * If we fail on this, we don't care presently what precisely 3176 * is wrong. EMC's arrays will also fail this with a check 3177 * condition (0x2/0x4/0x3) if the device is "inactive," but 3178 * we don't want to fail the attach because it may become 3179 * "active" later. 3180 */ 3181 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3182 SD_PATH_DIRECT); 3183 3184 if (status != 0) { 3185 if (status == EACCES) 3186 has_conflict = TRUE; 3187 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3188 } 3189 3190 /* 3191 * Send another INQUIRY command to the target. This is necessary for 3192 * non-removable media direct access devices because their INQUIRY data 3193 * may not be fully qualified until they are spun up (perhaps via the 3194 * START command above). Note: This seems to be needed for some 3195 * legacy devices only.) The INQUIRY command should succeed even if a 3196 * Reservation Conflict is present. 3197 */ 3198 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3199 3200 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3201 != 0) { 3202 kmem_free(bufaddr, SUN_INQSIZE); 3203 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3204 return (EIO); 3205 } 3206 3207 /* 3208 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3209 * Note that this routine does not return a failure here even if the 3210 * INQUIRY command did not return any data. This is a legacy behavior. 3211 */ 3212 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3213 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3214 } 3215 3216 kmem_free(bufaddr, SUN_INQSIZE); 3217 3218 /* If we hit a reservation conflict above, tell the caller. */ 3219 if (has_conflict == TRUE) { 3220 return (EACCES); 3221 } 3222 3223 return (0); 3224 } 3225 3226 #ifdef _LP64 3227 /* 3228 * Function: sd_enable_descr_sense 3229 * 3230 * Description: This routine attempts to select descriptor sense format 3231 * using the Control mode page. Devices that support 64 bit 3232 * LBAs (for >2TB luns) should also implement descriptor 3233 * sense data so we will call this function whenever we see 3234 * a lun larger than 2TB. If for some reason the device 3235 * supports 64 bit LBAs but doesn't support descriptor sense 3236 * presumably the mode select will fail. Everything will 3237 * continue to work normally except that we will not get 3238 * complete sense data for commands that fail with an LBA 3239 * larger than 32 bits. 3240 * 3241 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3242 * structure for this target. 3243 * 3244 * Context: Kernel thread context only 3245 */ 3246 3247 static void 3248 sd_enable_descr_sense(sd_ssc_t *ssc) 3249 { 3250 uchar_t *header; 3251 struct mode_control_scsi3 *ctrl_bufp; 3252 size_t buflen; 3253 size_t bd_len; 3254 int status; 3255 struct sd_lun *un; 3256 3257 ASSERT(ssc != NULL); 3258 un = ssc->ssc_un; 3259 ASSERT(un != NULL); 3260 3261 /* 3262 * Read MODE SENSE page 0xA, Control Mode Page 3263 */ 3264 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3265 sizeof (struct mode_control_scsi3); 3266 header = kmem_zalloc(buflen, KM_SLEEP); 3267 3268 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3269 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3270 3271 if (status != 0) { 3272 SD_ERROR(SD_LOG_COMMON, un, 3273 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3274 goto eds_exit; 3275 } 3276 3277 /* 3278 * Determine size of Block Descriptors in order to locate 3279 * the mode page data. ATAPI devices return 0, SCSI devices 3280 * should return MODE_BLK_DESC_LENGTH. 3281 */ 3282 bd_len = ((struct mode_header *)header)->bdesc_length; 3283 3284 /* Clear the mode data length field for MODE SELECT */ 3285 ((struct mode_header *)header)->length = 0; 3286 3287 ctrl_bufp = (struct mode_control_scsi3 *) 3288 (header + MODE_HEADER_LENGTH + bd_len); 3289 3290 /* 3291 * If the page length is smaller than the expected value, 3292 * the target device doesn't support D_SENSE. Bail out here. 3293 */ 3294 if (ctrl_bufp->mode_page.length < 3295 sizeof (struct mode_control_scsi3) - 2) { 3296 SD_ERROR(SD_LOG_COMMON, un, 3297 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3298 goto eds_exit; 3299 } 3300 3301 /* 3302 * Clear PS bit for MODE SELECT 3303 */ 3304 ctrl_bufp->mode_page.ps = 0; 3305 3306 /* 3307 * Set D_SENSE to enable descriptor sense format. 3308 */ 3309 ctrl_bufp->d_sense = 1; 3310 3311 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3312 3313 /* 3314 * Use MODE SELECT to commit the change to the D_SENSE bit 3315 */ 3316 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3317 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3318 3319 if (status != 0) { 3320 SD_INFO(SD_LOG_COMMON, un, 3321 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3322 } else { 3323 kmem_free(header, buflen); 3324 return; 3325 } 3326 3327 eds_exit: 3328 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3329 kmem_free(header, buflen); 3330 } 3331 3332 /* 3333 * Function: sd_reenable_dsense_task 3334 * 3335 * Description: Re-enable descriptor sense after device or bus reset 3336 * 3337 * Context: Executes in a taskq() thread context 3338 */ 3339 static void 3340 sd_reenable_dsense_task(void *arg) 3341 { 3342 struct sd_lun *un = arg; 3343 sd_ssc_t *ssc; 3344 3345 ASSERT(un != NULL); 3346 3347 ssc = sd_ssc_init(un); 3348 sd_enable_descr_sense(ssc); 3349 sd_ssc_fini(ssc); 3350 } 3351 #endif /* _LP64 */ 3352 3353 /* 3354 * Function: sd_set_mmc_caps 3355 * 3356 * Description: This routine determines if the device is MMC compliant and if 3357 * the device supports CDDA via a mode sense of the CDVD 3358 * capabilities mode page. Also checks if the device is a 3359 * dvdram writable device. 3360 * 3361 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3362 * structure for this target. 3363 * 3364 * Context: Kernel thread context only 3365 */ 3366 3367 static void 3368 sd_set_mmc_caps(sd_ssc_t *ssc) 3369 { 3370 struct mode_header_grp2 *sense_mhp; 3371 uchar_t *sense_page; 3372 caddr_t buf; 3373 int bd_len; 3374 int status; 3375 struct uscsi_cmd com; 3376 int rtn; 3377 uchar_t *out_data_rw, *out_data_hd; 3378 uchar_t *rqbuf_rw, *rqbuf_hd; 3379 struct sd_lun *un; 3380 3381 ASSERT(ssc != NULL); 3382 un = ssc->ssc_un; 3383 ASSERT(un != NULL); 3384 3385 /* 3386 * The flags which will be set in this function are - mmc compliant, 3387 * dvdram writable device, cdda support. Initialize them to FALSE 3388 * and if a capability is detected - it will be set to TRUE. 3389 */ 3390 un->un_f_mmc_cap = FALSE; 3391 un->un_f_dvdram_writable_device = FALSE; 3392 un->un_f_cfg_cdda = FALSE; 3393 3394 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3395 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3396 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3397 3398 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3399 3400 if (status != 0) { 3401 /* command failed; just return */ 3402 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3403 return; 3404 } 3405 /* 3406 * If the mode sense request for the CDROM CAPABILITIES 3407 * page (0x2A) succeeds the device is assumed to be MMC. 3408 */ 3409 un->un_f_mmc_cap = TRUE; 3410 3411 /* Get to the page data */ 3412 sense_mhp = (struct mode_header_grp2 *)buf; 3413 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3414 sense_mhp->bdesc_length_lo; 3415 if (bd_len > MODE_BLK_DESC_LENGTH) { 3416 /* 3417 * We did not get back the expected block descriptor 3418 * length so we cannot determine if the device supports 3419 * CDDA. However, we still indicate the device is MMC 3420 * according to the successful response to the page 3421 * 0x2A mode sense request. 3422 */ 3423 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3424 "sd_set_mmc_caps: Mode Sense returned " 3425 "invalid block descriptor length\n"); 3426 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3427 return; 3428 } 3429 3430 /* See if read CDDA is supported */ 3431 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3432 bd_len); 3433 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3434 3435 /* See if writing DVD RAM is supported. */ 3436 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3437 if (un->un_f_dvdram_writable_device == TRUE) { 3438 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3439 return; 3440 } 3441 3442 /* 3443 * If the device presents DVD or CD capabilities in the mode 3444 * page, we can return here since a RRD will not have 3445 * these capabilities. 3446 */ 3447 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3448 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3449 return; 3450 } 3451 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3452 3453 /* 3454 * If un->un_f_dvdram_writable_device is still FALSE, 3455 * check for a Removable Rigid Disk (RRD). A RRD 3456 * device is identified by the features RANDOM_WRITABLE and 3457 * HARDWARE_DEFECT_MANAGEMENT. 3458 */ 3459 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3460 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3461 3462 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3463 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3464 RANDOM_WRITABLE, SD_PATH_STANDARD); 3465 3466 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3467 3468 if (rtn != 0) { 3469 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3470 kmem_free(rqbuf_rw, SENSE_LENGTH); 3471 return; 3472 } 3473 3474 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3475 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3476 3477 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3478 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3479 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3480 3481 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3482 3483 if (rtn == 0) { 3484 /* 3485 * We have good information, check for random writable 3486 * and hardware defect features. 3487 */ 3488 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3489 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3490 un->un_f_dvdram_writable_device = TRUE; 3491 } 3492 } 3493 3494 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3495 kmem_free(rqbuf_rw, SENSE_LENGTH); 3496 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3497 kmem_free(rqbuf_hd, SENSE_LENGTH); 3498 } 3499 3500 /* 3501 * Function: sd_check_for_writable_cd 3502 * 3503 * Description: This routine determines if the media in the device is 3504 * writable or not. It uses the get configuration command (0x46) 3505 * to determine if the media is writable 3506 * 3507 * Arguments: un - driver soft state (unit) structure 3508 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3509 * chain and the normal command waitq, or 3510 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3511 * "direct" chain and bypass the normal command 3512 * waitq. 3513 * 3514 * Context: Never called at interrupt context. 3515 */ 3516 3517 static void 3518 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3519 { 3520 struct uscsi_cmd com; 3521 uchar_t *out_data; 3522 uchar_t *rqbuf; 3523 int rtn; 3524 uchar_t *out_data_rw, *out_data_hd; 3525 uchar_t *rqbuf_rw, *rqbuf_hd; 3526 struct mode_header_grp2 *sense_mhp; 3527 uchar_t *sense_page; 3528 caddr_t buf; 3529 int bd_len; 3530 int status; 3531 struct sd_lun *un; 3532 3533 ASSERT(ssc != NULL); 3534 un = ssc->ssc_un; 3535 ASSERT(un != NULL); 3536 ASSERT(mutex_owned(SD_MUTEX(un))); 3537 3538 /* 3539 * Initialize the writable media to false, if configuration info. 3540 * tells us otherwise then only we will set it. 3541 */ 3542 un->un_f_mmc_writable_media = FALSE; 3543 mutex_exit(SD_MUTEX(un)); 3544 3545 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3546 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3547 3548 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3549 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3550 3551 if (rtn != 0) 3552 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3553 3554 mutex_enter(SD_MUTEX(un)); 3555 if (rtn == 0) { 3556 /* 3557 * We have good information, check for writable DVD. 3558 */ 3559 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3560 un->un_f_mmc_writable_media = TRUE; 3561 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3562 kmem_free(rqbuf, SENSE_LENGTH); 3563 return; 3564 } 3565 } 3566 3567 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3568 kmem_free(rqbuf, SENSE_LENGTH); 3569 3570 /* 3571 * Determine if this is a RRD type device. 3572 */ 3573 mutex_exit(SD_MUTEX(un)); 3574 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3575 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3576 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3577 3578 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3579 3580 mutex_enter(SD_MUTEX(un)); 3581 if (status != 0) { 3582 /* command failed; just return */ 3583 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3584 return; 3585 } 3586 3587 /* Get to the page data */ 3588 sense_mhp = (struct mode_header_grp2 *)buf; 3589 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3590 if (bd_len > MODE_BLK_DESC_LENGTH) { 3591 /* 3592 * We did not get back the expected block descriptor length so 3593 * we cannot check the mode page. 3594 */ 3595 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3596 "sd_check_for_writable_cd: Mode Sense returned " 3597 "invalid block descriptor length\n"); 3598 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3599 return; 3600 } 3601 3602 /* 3603 * If the device presents DVD or CD capabilities in the mode 3604 * page, we can return here since a RRD device will not have 3605 * these capabilities. 3606 */ 3607 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3608 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3609 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3610 return; 3611 } 3612 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3613 3614 /* 3615 * If un->un_f_mmc_writable_media is still FALSE, 3616 * check for RRD type media. A RRD device is identified 3617 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3618 */ 3619 mutex_exit(SD_MUTEX(un)); 3620 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3621 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3622 3623 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3624 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3625 RANDOM_WRITABLE, path_flag); 3626 3627 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3628 if (rtn != 0) { 3629 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3630 kmem_free(rqbuf_rw, SENSE_LENGTH); 3631 mutex_enter(SD_MUTEX(un)); 3632 return; 3633 } 3634 3635 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3636 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3637 3638 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3639 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3640 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3641 3642 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3643 mutex_enter(SD_MUTEX(un)); 3644 if (rtn == 0) { 3645 /* 3646 * We have good information, check for random writable 3647 * and hardware defect features as current. 3648 */ 3649 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3650 (out_data_rw[10] & 0x1) && 3651 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3652 (out_data_hd[10] & 0x1)) { 3653 un->un_f_mmc_writable_media = TRUE; 3654 } 3655 } 3656 3657 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3658 kmem_free(rqbuf_rw, SENSE_LENGTH); 3659 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3660 kmem_free(rqbuf_hd, SENSE_LENGTH); 3661 } 3662 3663 /* 3664 * Function: sd_read_unit_properties 3665 * 3666 * Description: The following implements a property lookup mechanism. 3667 * Properties for particular disks (keyed on vendor, model 3668 * and rev numbers) are sought in the sd.conf file via 3669 * sd_process_sdconf_file(), and if not found there, are 3670 * looked for in a list hardcoded in this driver via 3671 * sd_process_sdconf_table() Once located the properties 3672 * are used to update the driver unit structure. 3673 * 3674 * Arguments: un - driver soft state (unit) structure 3675 */ 3676 3677 static void 3678 sd_read_unit_properties(struct sd_lun *un) 3679 { 3680 /* 3681 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3682 * the "sd-config-list" property (from the sd.conf file) or if 3683 * there was not a match for the inquiry vid/pid. If this event 3684 * occurs the static driver configuration table is searched for 3685 * a match. 3686 */ 3687 ASSERT(un != NULL); 3688 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3689 sd_process_sdconf_table(un); 3690 } 3691 3692 /* check for LSI device */ 3693 sd_is_lsi(un); 3694 3695 3696 } 3697 3698 3699 /* 3700 * Function: sd_process_sdconf_file 3701 * 3702 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3703 * driver's config file (ie, sd.conf) and update the driver 3704 * soft state structure accordingly. 3705 * 3706 * Arguments: un - driver soft state (unit) structure 3707 * 3708 * Return Code: SD_SUCCESS - The properties were successfully set according 3709 * to the driver configuration file. 3710 * SD_FAILURE - The driver config list was not obtained or 3711 * there was no vid/pid match. This indicates that 3712 * the static config table should be used. 3713 * 3714 * The config file has a property, "sd-config-list". Currently we support 3715 * two kinds of formats. For both formats, the value of this property 3716 * is a list of duplets: 3717 * 3718 * sd-config-list= 3719 * <duplet>, 3720 * [,<duplet>]*; 3721 * 3722 * For the improved format, where 3723 * 3724 * <duplet>:= "<vid+pid>","<tunable-list>" 3725 * 3726 * and 3727 * 3728 * <tunable-list>:= <tunable> [, <tunable> ]*; 3729 * <tunable> = <name> : <value> 3730 * 3731 * The <vid+pid> is the string that is returned by the target device on a 3732 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3733 * to apply to all target devices with the specified <vid+pid>. 3734 * 3735 * Each <tunable> is a "<name> : <value>" pair. 3736 * 3737 * For the old format, the structure of each duplet is as follows: 3738 * 3739 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3740 * 3741 * The first entry of the duplet is the device ID string (the concatenated 3742 * vid & pid; not to be confused with a device_id). This is defined in 3743 * the same way as in the sd_disk_table. 3744 * 3745 * The second part of the duplet is a string that identifies a 3746 * data-property-name-list. The data-property-name-list is defined as 3747 * follows: 3748 * 3749 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3750 * 3751 * The syntax of <data-property-name> depends on the <version> field. 3752 * 3753 * If version = SD_CONF_VERSION_1 we have the following syntax: 3754 * 3755 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3756 * 3757 * where the prop0 value will be used to set prop0 if bit0 set in the 3758 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3759 * 3760 */ 3761 3762 static int 3763 sd_process_sdconf_file(struct sd_lun *un) 3764 { 3765 char **config_list = NULL; 3766 uint_t nelements; 3767 char *vidptr; 3768 int vidlen; 3769 char *dnlist_ptr; 3770 char *dataname_ptr; 3771 char *dataname_lasts; 3772 int *data_list = NULL; 3773 uint_t data_list_len; 3774 int rval = SD_FAILURE; 3775 int i; 3776 3777 ASSERT(un != NULL); 3778 3779 /* Obtain the configuration list associated with the .conf file */ 3780 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3781 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3782 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3783 return (SD_FAILURE); 3784 } 3785 3786 /* 3787 * Compare vids in each duplet to the inquiry vid - if a match is 3788 * made, get the data value and update the soft state structure 3789 * accordingly. 3790 * 3791 * Each duplet should show as a pair of strings, return SD_FAILURE 3792 * otherwise. 3793 */ 3794 if (nelements & 1) { 3795 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3796 "sd-config-list should show as pairs of strings.\n"); 3797 if (config_list) 3798 ddi_prop_free(config_list); 3799 return (SD_FAILURE); 3800 } 3801 3802 for (i = 0; i < nelements; i += 2) { 3803 /* 3804 * Note: The assumption here is that each vid entry is on 3805 * a unique line from its associated duplet. 3806 */ 3807 vidptr = config_list[i]; 3808 vidlen = (int)strlen(vidptr); 3809 if ((vidlen == 0) || 3810 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3811 continue; 3812 } 3813 3814 /* 3815 * dnlist contains 1 or more blank separated 3816 * data-property-name entries 3817 */ 3818 dnlist_ptr = config_list[i + 1]; 3819 3820 if (strchr(dnlist_ptr, ':') != NULL) { 3821 /* 3822 * Decode the improved format sd-config-list. 3823 */ 3824 sd_nvpair_str_decode(un, dnlist_ptr); 3825 } else { 3826 /* 3827 * The old format sd-config-list, loop through all 3828 * data-property-name entries in the 3829 * data-property-name-list 3830 * setting the properties for each. 3831 */ 3832 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3833 &dataname_lasts); dataname_ptr != NULL; 3834 dataname_ptr = sd_strtok_r(NULL, " \t", 3835 &dataname_lasts)) { 3836 int version; 3837 3838 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3839 "sd_process_sdconf_file: disk:%s, " 3840 "data:%s\n", vidptr, dataname_ptr); 3841 3842 /* Get the data list */ 3843 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3844 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3845 &data_list_len) != DDI_PROP_SUCCESS) { 3846 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3847 "sd_process_sdconf_file: data " 3848 "property (%s) has no value\n", 3849 dataname_ptr); 3850 continue; 3851 } 3852 3853 version = data_list[0]; 3854 3855 if (version == SD_CONF_VERSION_1) { 3856 sd_tunables values; 3857 3858 /* Set the properties */ 3859 if (sd_chk_vers1_data(un, data_list[1], 3860 &data_list[2], data_list_len, 3861 dataname_ptr) == SD_SUCCESS) { 3862 sd_get_tunables_from_conf(un, 3863 data_list[1], &data_list[2], 3864 &values); 3865 sd_set_vers1_properties(un, 3866 data_list[1], &values); 3867 rval = SD_SUCCESS; 3868 } else { 3869 rval = SD_FAILURE; 3870 } 3871 } else { 3872 scsi_log(SD_DEVINFO(un), sd_label, 3873 CE_WARN, "data property %s version " 3874 "0x%x is invalid.", 3875 dataname_ptr, version); 3876 rval = SD_FAILURE; 3877 } 3878 if (data_list) 3879 ddi_prop_free(data_list); 3880 } 3881 } 3882 } 3883 3884 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3885 if (config_list) { 3886 ddi_prop_free(config_list); 3887 } 3888 3889 return (rval); 3890 } 3891 3892 /* 3893 * Function: sd_nvpair_str_decode() 3894 * 3895 * Description: Parse the improved format sd-config-list to get 3896 * each entry of tunable, which includes a name-value pair. 3897 * Then call sd_set_properties() to set the property. 3898 * 3899 * Arguments: un - driver soft state (unit) structure 3900 * nvpair_str - the tunable list 3901 */ 3902 static void 3903 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3904 { 3905 char *nv, *name, *value, *token; 3906 char *nv_lasts, *v_lasts, *x_lasts; 3907 3908 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3909 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3910 token = sd_strtok_r(nv, ":", &v_lasts); 3911 name = sd_strtok_r(token, " \t", &x_lasts); 3912 token = sd_strtok_r(NULL, ":", &v_lasts); 3913 value = sd_strtok_r(token, " \t", &x_lasts); 3914 if (name == NULL || value == NULL) { 3915 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3916 "sd_nvpair_str_decode: " 3917 "name or value is not valid!\n"); 3918 } else { 3919 sd_set_properties(un, name, value); 3920 } 3921 } 3922 } 3923 3924 /* 3925 * Function: sd_strtok_r() 3926 * 3927 * Description: This function uses strpbrk and strspn to break 3928 * string into tokens on sequentially subsequent calls. Return 3929 * NULL when no non-separator characters remain. The first 3930 * argument is NULL for subsequent calls. 3931 */ 3932 static char * 3933 sd_strtok_r(char *string, const char *sepset, char **lasts) 3934 { 3935 char *q, *r; 3936 3937 /* First or subsequent call */ 3938 if (string == NULL) 3939 string = *lasts; 3940 3941 if (string == NULL) 3942 return (NULL); 3943 3944 /* Skip leading separators */ 3945 q = string + strspn(string, sepset); 3946 3947 if (*q == '\0') 3948 return (NULL); 3949 3950 if ((r = strpbrk(q, sepset)) == NULL) 3951 *lasts = NULL; 3952 else { 3953 *r = '\0'; 3954 *lasts = r + 1; 3955 } 3956 return (q); 3957 } 3958 3959 /* 3960 * Function: sd_set_properties() 3961 * 3962 * Description: Set device properties based on the improved 3963 * format sd-config-list. 3964 * 3965 * Arguments: un - driver soft state (unit) structure 3966 * name - supported tunable name 3967 * value - tunable value 3968 */ 3969 static void 3970 sd_set_properties(struct sd_lun *un, char *name, char *value) 3971 { 3972 char *endptr = NULL; 3973 long val = 0; 3974 3975 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3976 if (strcasecmp(value, "true") == 0) { 3977 un->un_f_suppress_cache_flush = TRUE; 3978 } else if (strcasecmp(value, "false") == 0) { 3979 un->un_f_suppress_cache_flush = FALSE; 3980 } else { 3981 goto value_invalid; 3982 } 3983 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3984 "suppress_cache_flush flag set to %d\n", 3985 un->un_f_suppress_cache_flush); 3986 return; 3987 } 3988 3989 if (strcasecmp(name, "controller-type") == 0) { 3990 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3991 un->un_ctype = val; 3992 } else { 3993 goto value_invalid; 3994 } 3995 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3996 "ctype set to %d\n", un->un_ctype); 3997 return; 3998 } 3999 4000 if (strcasecmp(name, "delay-busy") == 0) { 4001 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4002 un->un_busy_timeout = drv_usectohz(val / 1000); 4003 } else { 4004 goto value_invalid; 4005 } 4006 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4007 "busy_timeout set to %d\n", un->un_busy_timeout); 4008 return; 4009 } 4010 4011 if (strcasecmp(name, "disksort") == 0) { 4012 if (strcasecmp(value, "true") == 0) { 4013 un->un_f_disksort_disabled = FALSE; 4014 } else if (strcasecmp(value, "false") == 0) { 4015 un->un_f_disksort_disabled = TRUE; 4016 } else { 4017 goto value_invalid; 4018 } 4019 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4020 "disksort disabled flag set to %d\n", 4021 un->un_f_disksort_disabled); 4022 return; 4023 } 4024 4025 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4026 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4027 un->un_reserve_release_time = val; 4028 } else { 4029 goto value_invalid; 4030 } 4031 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4032 "reservation release timeout set to %d\n", 4033 un->un_reserve_release_time); 4034 return; 4035 } 4036 4037 if (strcasecmp(name, "reset-lun") == 0) { 4038 if (strcasecmp(value, "true") == 0) { 4039 un->un_f_lun_reset_enabled = TRUE; 4040 } else if (strcasecmp(value, "false") == 0) { 4041 un->un_f_lun_reset_enabled = FALSE; 4042 } else { 4043 goto value_invalid; 4044 } 4045 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4046 "lun reset enabled flag set to %d\n", 4047 un->un_f_lun_reset_enabled); 4048 return; 4049 } 4050 4051 if (strcasecmp(name, "retries-busy") == 0) { 4052 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4053 un->un_busy_retry_count = val; 4054 } else { 4055 goto value_invalid; 4056 } 4057 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4058 "busy retry count set to %d\n", un->un_busy_retry_count); 4059 return; 4060 } 4061 4062 if (strcasecmp(name, "retries-timeout") == 0) { 4063 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4064 un->un_retry_count = val; 4065 } else { 4066 goto value_invalid; 4067 } 4068 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4069 "timeout retry count set to %d\n", un->un_retry_count); 4070 return; 4071 } 4072 4073 if (strcasecmp(name, "retries-notready") == 0) { 4074 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4075 un->un_notready_retry_count = val; 4076 } else { 4077 goto value_invalid; 4078 } 4079 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4080 "notready retry count set to %d\n", 4081 un->un_notready_retry_count); 4082 return; 4083 } 4084 4085 if (strcasecmp(name, "retries-reset") == 0) { 4086 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4087 un->un_reset_retry_count = val; 4088 } else { 4089 goto value_invalid; 4090 } 4091 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4092 "reset retry count set to %d\n", 4093 un->un_reset_retry_count); 4094 return; 4095 } 4096 4097 if (strcasecmp(name, "throttle-max") == 0) { 4098 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4099 un->un_saved_throttle = un->un_throttle = val; 4100 } else { 4101 goto value_invalid; 4102 } 4103 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4104 "throttle set to %d\n", un->un_throttle); 4105 } 4106 4107 if (strcasecmp(name, "throttle-min") == 0) { 4108 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4109 un->un_min_throttle = val; 4110 } else { 4111 goto value_invalid; 4112 } 4113 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4114 "min throttle set to %d\n", un->un_min_throttle); 4115 } 4116 4117 if (strcasecmp(name, "rmw-type") == 0) { 4118 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4119 un->un_f_rmw_type = val; 4120 } else { 4121 goto value_invalid; 4122 } 4123 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4124 "RMW type set to %d\n", un->un_f_rmw_type); 4125 } 4126 4127 /* 4128 * Validate the throttle values. 4129 * If any of the numbers are invalid, set everything to defaults. 4130 */ 4131 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4132 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4133 (un->un_min_throttle > un->un_throttle)) { 4134 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4135 un->un_min_throttle = sd_min_throttle; 4136 } 4137 return; 4138 4139 value_invalid: 4140 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4141 "value of prop %s is invalid\n", name); 4142 } 4143 4144 /* 4145 * Function: sd_get_tunables_from_conf() 4146 * 4147 * 4148 * This function reads the data list from the sd.conf file and pulls 4149 * the values that can have numeric values as arguments and places 4150 * the values in the appropriate sd_tunables member. 4151 * Since the order of the data list members varies across platforms 4152 * This function reads them from the data list in a platform specific 4153 * order and places them into the correct sd_tunable member that is 4154 * consistent across all platforms. 4155 */ 4156 static void 4157 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4158 sd_tunables *values) 4159 { 4160 int i; 4161 int mask; 4162 4163 bzero(values, sizeof (sd_tunables)); 4164 4165 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4166 4167 mask = 1 << i; 4168 if (mask > flags) { 4169 break; 4170 } 4171 4172 switch (mask & flags) { 4173 case 0: /* This mask bit not set in flags */ 4174 continue; 4175 case SD_CONF_BSET_THROTTLE: 4176 values->sdt_throttle = data_list[i]; 4177 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4178 "sd_get_tunables_from_conf: throttle = %d\n", 4179 values->sdt_throttle); 4180 break; 4181 case SD_CONF_BSET_CTYPE: 4182 values->sdt_ctype = data_list[i]; 4183 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4184 "sd_get_tunables_from_conf: ctype = %d\n", 4185 values->sdt_ctype); 4186 break; 4187 case SD_CONF_BSET_NRR_COUNT: 4188 values->sdt_not_rdy_retries = data_list[i]; 4189 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4190 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4191 values->sdt_not_rdy_retries); 4192 break; 4193 case SD_CONF_BSET_BSY_RETRY_COUNT: 4194 values->sdt_busy_retries = data_list[i]; 4195 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4196 "sd_get_tunables_from_conf: busy_retries = %d\n", 4197 values->sdt_busy_retries); 4198 break; 4199 case SD_CONF_BSET_RST_RETRIES: 4200 values->sdt_reset_retries = data_list[i]; 4201 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4202 "sd_get_tunables_from_conf: reset_retries = %d\n", 4203 values->sdt_reset_retries); 4204 break; 4205 case SD_CONF_BSET_RSV_REL_TIME: 4206 values->sdt_reserv_rel_time = data_list[i]; 4207 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4208 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4209 values->sdt_reserv_rel_time); 4210 break; 4211 case SD_CONF_BSET_MIN_THROTTLE: 4212 values->sdt_min_throttle = data_list[i]; 4213 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4214 "sd_get_tunables_from_conf: min_throttle = %d\n", 4215 values->sdt_min_throttle); 4216 break; 4217 case SD_CONF_BSET_DISKSORT_DISABLED: 4218 values->sdt_disk_sort_dis = data_list[i]; 4219 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4220 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4221 values->sdt_disk_sort_dis); 4222 break; 4223 case SD_CONF_BSET_LUN_RESET_ENABLED: 4224 values->sdt_lun_reset_enable = data_list[i]; 4225 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4226 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4227 "\n", values->sdt_lun_reset_enable); 4228 break; 4229 case SD_CONF_BSET_CACHE_IS_NV: 4230 values->sdt_suppress_cache_flush = data_list[i]; 4231 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4232 "sd_get_tunables_from_conf: \ 4233 suppress_cache_flush = %d" 4234 "\n", values->sdt_suppress_cache_flush); 4235 break; 4236 } 4237 } 4238 } 4239 4240 /* 4241 * Function: sd_process_sdconf_table 4242 * 4243 * Description: Search the static configuration table for a match on the 4244 * inquiry vid/pid and update the driver soft state structure 4245 * according to the table property values for the device. 4246 * 4247 * The form of a configuration table entry is: 4248 * <vid+pid>,<flags>,<property-data> 4249 * "SEAGATE ST42400N",1,0x40000, 4250 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4251 * 4252 * Arguments: un - driver soft state (unit) structure 4253 */ 4254 4255 static void 4256 sd_process_sdconf_table(struct sd_lun *un) 4257 { 4258 char *id = NULL; 4259 int table_index; 4260 int idlen; 4261 4262 ASSERT(un != NULL); 4263 for (table_index = 0; table_index < sd_disk_table_size; 4264 table_index++) { 4265 id = sd_disk_table[table_index].device_id; 4266 idlen = strlen(id); 4267 if (idlen == 0) { 4268 continue; 4269 } 4270 4271 /* 4272 * The static configuration table currently does not 4273 * implement version 10 properties. Additionally, 4274 * multiple data-property-name entries are not 4275 * implemented in the static configuration table. 4276 */ 4277 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4278 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4279 "sd_process_sdconf_table: disk %s\n", id); 4280 sd_set_vers1_properties(un, 4281 sd_disk_table[table_index].flags, 4282 sd_disk_table[table_index].properties); 4283 break; 4284 } 4285 } 4286 } 4287 4288 4289 /* 4290 * Function: sd_sdconf_id_match 4291 * 4292 * Description: This local function implements a case sensitive vid/pid 4293 * comparison as well as the boundary cases of wild card and 4294 * multiple blanks. 4295 * 4296 * Note: An implicit assumption made here is that the scsi 4297 * inquiry structure will always keep the vid, pid and 4298 * revision strings in consecutive sequence, so they can be 4299 * read as a single string. If this assumption is not the 4300 * case, a separate string, to be used for the check, needs 4301 * to be built with these strings concatenated. 4302 * 4303 * Arguments: un - driver soft state (unit) structure 4304 * id - table or config file vid/pid 4305 * idlen - length of the vid/pid (bytes) 4306 * 4307 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4308 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4309 */ 4310 4311 static int 4312 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4313 { 4314 struct scsi_inquiry *sd_inq; 4315 int rval = SD_SUCCESS; 4316 4317 ASSERT(un != NULL); 4318 sd_inq = un->un_sd->sd_inq; 4319 ASSERT(id != NULL); 4320 4321 /* 4322 * We use the inq_vid as a pointer to a buffer containing the 4323 * vid and pid and use the entire vid/pid length of the table 4324 * entry for the comparison. This works because the inq_pid 4325 * data member follows inq_vid in the scsi_inquiry structure. 4326 */ 4327 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4328 /* 4329 * The user id string is compared to the inquiry vid/pid 4330 * using a case insensitive comparison and ignoring 4331 * multiple spaces. 4332 */ 4333 rval = sd_blank_cmp(un, id, idlen); 4334 if (rval != SD_SUCCESS) { 4335 /* 4336 * User id strings that start and end with a "*" 4337 * are a special case. These do not have a 4338 * specific vendor, and the product string can 4339 * appear anywhere in the 16 byte PID portion of 4340 * the inquiry data. This is a simple strstr() 4341 * type search for the user id in the inquiry data. 4342 */ 4343 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4344 char *pidptr = &id[1]; 4345 int i; 4346 int j; 4347 int pidstrlen = idlen - 2; 4348 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4349 pidstrlen; 4350 4351 if (j < 0) { 4352 return (SD_FAILURE); 4353 } 4354 for (i = 0; i < j; i++) { 4355 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4356 pidptr, pidstrlen) == 0) { 4357 rval = SD_SUCCESS; 4358 break; 4359 } 4360 } 4361 } 4362 } 4363 } 4364 return (rval); 4365 } 4366 4367 4368 /* 4369 * Function: sd_blank_cmp 4370 * 4371 * Description: If the id string starts and ends with a space, treat 4372 * multiple consecutive spaces as equivalent to a single 4373 * space. For example, this causes a sd_disk_table entry 4374 * of " NEC CDROM " to match a device's id string of 4375 * "NEC CDROM". 4376 * 4377 * Note: The success exit condition for this routine is if 4378 * the pointer to the table entry is '\0' and the cnt of 4379 * the inquiry length is zero. This will happen if the inquiry 4380 * string returned by the device is padded with spaces to be 4381 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4382 * SCSI spec states that the inquiry string is to be padded with 4383 * spaces. 4384 * 4385 * Arguments: un - driver soft state (unit) structure 4386 * id - table or config file vid/pid 4387 * idlen - length of the vid/pid (bytes) 4388 * 4389 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4390 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4391 */ 4392 4393 static int 4394 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4395 { 4396 char *p1; 4397 char *p2; 4398 int cnt; 4399 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4400 sizeof (SD_INQUIRY(un)->inq_pid); 4401 4402 ASSERT(un != NULL); 4403 p2 = un->un_sd->sd_inq->inq_vid; 4404 ASSERT(id != NULL); 4405 p1 = id; 4406 4407 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4408 /* 4409 * Note: string p1 is terminated by a NUL but string p2 4410 * isn't. The end of p2 is determined by cnt. 4411 */ 4412 for (;;) { 4413 /* skip over any extra blanks in both strings */ 4414 while ((*p1 != '\0') && (*p1 == ' ')) { 4415 p1++; 4416 } 4417 while ((cnt != 0) && (*p2 == ' ')) { 4418 p2++; 4419 cnt--; 4420 } 4421 4422 /* compare the two strings */ 4423 if ((cnt == 0) || 4424 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4425 break; 4426 } 4427 while ((cnt > 0) && 4428 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4429 p1++; 4430 p2++; 4431 cnt--; 4432 } 4433 } 4434 } 4435 4436 /* return SD_SUCCESS if both strings match */ 4437 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4438 } 4439 4440 4441 /* 4442 * Function: sd_chk_vers1_data 4443 * 4444 * Description: Verify the version 1 device properties provided by the 4445 * user via the configuration file 4446 * 4447 * Arguments: un - driver soft state (unit) structure 4448 * flags - integer mask indicating properties to be set 4449 * prop_list - integer list of property values 4450 * list_len - number of the elements 4451 * 4452 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4453 * SD_FAILURE - Indicates the user provided data is invalid 4454 */ 4455 4456 static int 4457 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4458 int list_len, char *dataname_ptr) 4459 { 4460 int i; 4461 int mask = 1; 4462 int index = 0; 4463 4464 ASSERT(un != NULL); 4465 4466 /* Check for a NULL property name and list */ 4467 if (dataname_ptr == NULL) { 4468 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4469 "sd_chk_vers1_data: NULL data property name."); 4470 return (SD_FAILURE); 4471 } 4472 if (prop_list == NULL) { 4473 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4474 "sd_chk_vers1_data: %s NULL data property list.", 4475 dataname_ptr); 4476 return (SD_FAILURE); 4477 } 4478 4479 /* Display a warning if undefined bits are set in the flags */ 4480 if (flags & ~SD_CONF_BIT_MASK) { 4481 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4482 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4483 "Properties not set.", 4484 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4485 return (SD_FAILURE); 4486 } 4487 4488 /* 4489 * Verify the length of the list by identifying the highest bit set 4490 * in the flags and validating that the property list has a length 4491 * up to the index of this bit. 4492 */ 4493 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4494 if (flags & mask) { 4495 index++; 4496 } 4497 mask = 1 << i; 4498 } 4499 if (list_len < (index + 2)) { 4500 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4501 "sd_chk_vers1_data: " 4502 "Data property list %s size is incorrect. " 4503 "Properties not set.", dataname_ptr); 4504 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4505 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4506 return (SD_FAILURE); 4507 } 4508 return (SD_SUCCESS); 4509 } 4510 4511 4512 /* 4513 * Function: sd_set_vers1_properties 4514 * 4515 * Description: Set version 1 device properties based on a property list 4516 * retrieved from the driver configuration file or static 4517 * configuration table. Version 1 properties have the format: 4518 * 4519 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4520 * 4521 * where the prop0 value will be used to set prop0 if bit0 4522 * is set in the flags 4523 * 4524 * Arguments: un - driver soft state (unit) structure 4525 * flags - integer mask indicating properties to be set 4526 * prop_list - integer list of property values 4527 */ 4528 4529 static void 4530 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4531 { 4532 ASSERT(un != NULL); 4533 4534 /* 4535 * Set the flag to indicate cache is to be disabled. An attempt 4536 * to disable the cache via sd_cache_control() will be made 4537 * later during attach once the basic initialization is complete. 4538 */ 4539 if (flags & SD_CONF_BSET_NOCACHE) { 4540 un->un_f_opt_disable_cache = TRUE; 4541 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4542 "sd_set_vers1_properties: caching disabled flag set\n"); 4543 } 4544 4545 /* CD-specific configuration parameters */ 4546 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4547 un->un_f_cfg_playmsf_bcd = TRUE; 4548 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4549 "sd_set_vers1_properties: playmsf_bcd set\n"); 4550 } 4551 if (flags & SD_CONF_BSET_READSUB_BCD) { 4552 un->un_f_cfg_readsub_bcd = TRUE; 4553 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4554 "sd_set_vers1_properties: readsub_bcd set\n"); 4555 } 4556 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4557 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4558 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4559 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4560 } 4561 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4562 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4563 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4564 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4565 } 4566 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4567 un->un_f_cfg_no_read_header = TRUE; 4568 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4569 "sd_set_vers1_properties: no_read_header set\n"); 4570 } 4571 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4572 un->un_f_cfg_read_cd_xd4 = TRUE; 4573 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4574 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4575 } 4576 4577 /* Support for devices which do not have valid/unique serial numbers */ 4578 if (flags & SD_CONF_BSET_FAB_DEVID) { 4579 un->un_f_opt_fab_devid = TRUE; 4580 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4581 "sd_set_vers1_properties: fab_devid bit set\n"); 4582 } 4583 4584 /* Support for user throttle configuration */ 4585 if (flags & SD_CONF_BSET_THROTTLE) { 4586 ASSERT(prop_list != NULL); 4587 un->un_saved_throttle = un->un_throttle = 4588 prop_list->sdt_throttle; 4589 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4590 "sd_set_vers1_properties: throttle set to %d\n", 4591 prop_list->sdt_throttle); 4592 } 4593 4594 /* Set the per disk retry count according to the conf file or table. */ 4595 if (flags & SD_CONF_BSET_NRR_COUNT) { 4596 ASSERT(prop_list != NULL); 4597 if (prop_list->sdt_not_rdy_retries) { 4598 un->un_notready_retry_count = 4599 prop_list->sdt_not_rdy_retries; 4600 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4601 "sd_set_vers1_properties: not ready retry count" 4602 " set to %d\n", un->un_notready_retry_count); 4603 } 4604 } 4605 4606 /* The controller type is reported for generic disk driver ioctls */ 4607 if (flags & SD_CONF_BSET_CTYPE) { 4608 ASSERT(prop_list != NULL); 4609 switch (prop_list->sdt_ctype) { 4610 case CTYPE_CDROM: 4611 un->un_ctype = prop_list->sdt_ctype; 4612 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4613 "sd_set_vers1_properties: ctype set to " 4614 "CTYPE_CDROM\n"); 4615 break; 4616 case CTYPE_CCS: 4617 un->un_ctype = prop_list->sdt_ctype; 4618 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4619 "sd_set_vers1_properties: ctype set to " 4620 "CTYPE_CCS\n"); 4621 break; 4622 case CTYPE_ROD: /* RW optical */ 4623 un->un_ctype = prop_list->sdt_ctype; 4624 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4625 "sd_set_vers1_properties: ctype set to " 4626 "CTYPE_ROD\n"); 4627 break; 4628 default: 4629 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4630 "sd_set_vers1_properties: Could not set " 4631 "invalid ctype value (%d)", 4632 prop_list->sdt_ctype); 4633 } 4634 } 4635 4636 /* Purple failover timeout */ 4637 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4638 ASSERT(prop_list != NULL); 4639 un->un_busy_retry_count = 4640 prop_list->sdt_busy_retries; 4641 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4642 "sd_set_vers1_properties: " 4643 "busy retry count set to %d\n", 4644 un->un_busy_retry_count); 4645 } 4646 4647 /* Purple reset retry count */ 4648 if (flags & SD_CONF_BSET_RST_RETRIES) { 4649 ASSERT(prop_list != NULL); 4650 un->un_reset_retry_count = 4651 prop_list->sdt_reset_retries; 4652 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4653 "sd_set_vers1_properties: " 4654 "reset retry count set to %d\n", 4655 un->un_reset_retry_count); 4656 } 4657 4658 /* Purple reservation release timeout */ 4659 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4660 ASSERT(prop_list != NULL); 4661 un->un_reserve_release_time = 4662 prop_list->sdt_reserv_rel_time; 4663 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4664 "sd_set_vers1_properties: " 4665 "reservation release timeout set to %d\n", 4666 un->un_reserve_release_time); 4667 } 4668 4669 /* 4670 * Driver flag telling the driver to verify that no commands are pending 4671 * for a device before issuing a Test Unit Ready. This is a workaround 4672 * for a firmware bug in some Seagate eliteI drives. 4673 */ 4674 if (flags & SD_CONF_BSET_TUR_CHECK) { 4675 un->un_f_cfg_tur_check = TRUE; 4676 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4677 "sd_set_vers1_properties: tur queue check set\n"); 4678 } 4679 4680 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4681 un->un_min_throttle = prop_list->sdt_min_throttle; 4682 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4683 "sd_set_vers1_properties: min throttle set to %d\n", 4684 un->un_min_throttle); 4685 } 4686 4687 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4688 un->un_f_disksort_disabled = 4689 (prop_list->sdt_disk_sort_dis != 0) ? 4690 TRUE : FALSE; 4691 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4692 "sd_set_vers1_properties: disksort disabled " 4693 "flag set to %d\n", 4694 prop_list->sdt_disk_sort_dis); 4695 } 4696 4697 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4698 un->un_f_lun_reset_enabled = 4699 (prop_list->sdt_lun_reset_enable != 0) ? 4700 TRUE : FALSE; 4701 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4702 "sd_set_vers1_properties: lun reset enabled " 4703 "flag set to %d\n", 4704 prop_list->sdt_lun_reset_enable); 4705 } 4706 4707 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4708 un->un_f_suppress_cache_flush = 4709 (prop_list->sdt_suppress_cache_flush != 0) ? 4710 TRUE : FALSE; 4711 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4712 "sd_set_vers1_properties: suppress_cache_flush " 4713 "flag set to %d\n", 4714 prop_list->sdt_suppress_cache_flush); 4715 } 4716 4717 /* 4718 * Validate the throttle values. 4719 * If any of the numbers are invalid, set everything to defaults. 4720 */ 4721 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4722 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4723 (un->un_min_throttle > un->un_throttle)) { 4724 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4725 un->un_min_throttle = sd_min_throttle; 4726 } 4727 } 4728 4729 /* 4730 * Function: sd_is_lsi() 4731 * 4732 * Description: Check for lsi devices, step through the static device 4733 * table to match vid/pid. 4734 * 4735 * Args: un - ptr to sd_lun 4736 * 4737 * Notes: When creating new LSI property, need to add the new LSI property 4738 * to this function. 4739 */ 4740 static void 4741 sd_is_lsi(struct sd_lun *un) 4742 { 4743 char *id = NULL; 4744 int table_index; 4745 int idlen; 4746 void *prop; 4747 4748 ASSERT(un != NULL); 4749 for (table_index = 0; table_index < sd_disk_table_size; 4750 table_index++) { 4751 id = sd_disk_table[table_index].device_id; 4752 idlen = strlen(id); 4753 if (idlen == 0) { 4754 continue; 4755 } 4756 4757 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4758 prop = sd_disk_table[table_index].properties; 4759 if (prop == &lsi_properties || 4760 prop == &lsi_oem_properties || 4761 prop == &lsi_properties_scsi || 4762 prop == &symbios_properties) { 4763 un->un_f_cfg_is_lsi = TRUE; 4764 } 4765 break; 4766 } 4767 } 4768 } 4769 4770 /* 4771 * Function: sd_get_physical_geometry 4772 * 4773 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4774 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4775 * target, and use this information to initialize the physical 4776 * geometry cache specified by pgeom_p. 4777 * 4778 * MODE SENSE is an optional command, so failure in this case 4779 * does not necessarily denote an error. We want to use the 4780 * MODE SENSE commands to derive the physical geometry of the 4781 * device, but if either command fails, the logical geometry is 4782 * used as the fallback for disk label geometry in cmlb. 4783 * 4784 * This requires that un->un_blockcount and un->un_tgt_blocksize 4785 * have already been initialized for the current target and 4786 * that the current values be passed as args so that we don't 4787 * end up ever trying to use -1 as a valid value. This could 4788 * happen if either value is reset while we're not holding 4789 * the mutex. 4790 * 4791 * Arguments: un - driver soft state (unit) structure 4792 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4793 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4794 * to use the USCSI "direct" chain and bypass the normal 4795 * command waitq. 4796 * 4797 * Context: Kernel thread only (can sleep). 4798 */ 4799 4800 static int 4801 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4802 diskaddr_t capacity, int lbasize, int path_flag) 4803 { 4804 struct mode_format *page3p; 4805 struct mode_geometry *page4p; 4806 struct mode_header *headerp; 4807 int sector_size; 4808 int nsect; 4809 int nhead; 4810 int ncyl; 4811 int intrlv; 4812 int spc; 4813 diskaddr_t modesense_capacity; 4814 int rpm; 4815 int bd_len; 4816 int mode_header_length; 4817 uchar_t *p3bufp; 4818 uchar_t *p4bufp; 4819 int cdbsize; 4820 int ret = EIO; 4821 sd_ssc_t *ssc; 4822 int status; 4823 4824 ASSERT(un != NULL); 4825 4826 if (lbasize == 0) { 4827 if (ISCD(un)) { 4828 lbasize = 2048; 4829 } else { 4830 lbasize = un->un_sys_blocksize; 4831 } 4832 } 4833 pgeom_p->g_secsize = (unsigned short)lbasize; 4834 4835 /* 4836 * If the unit is a cd/dvd drive MODE SENSE page three 4837 * and MODE SENSE page four are reserved (see SBC spec 4838 * and MMC spec). To prevent soft errors just return 4839 * using the default LBA size. 4840 */ 4841 if (ISCD(un)) 4842 return (ret); 4843 4844 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4845 4846 /* 4847 * Retrieve MODE SENSE page 3 - Format Device Page 4848 */ 4849 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4850 ssc = sd_ssc_init(un); 4851 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4852 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4853 if (status != 0) { 4854 SD_ERROR(SD_LOG_COMMON, un, 4855 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4856 goto page3_exit; 4857 } 4858 4859 /* 4860 * Determine size of Block Descriptors in order to locate the mode 4861 * page data. ATAPI devices return 0, SCSI devices should return 4862 * MODE_BLK_DESC_LENGTH. 4863 */ 4864 headerp = (struct mode_header *)p3bufp; 4865 if (un->un_f_cfg_is_atapi == TRUE) { 4866 struct mode_header_grp2 *mhp = 4867 (struct mode_header_grp2 *)headerp; 4868 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4869 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4870 } else { 4871 mode_header_length = MODE_HEADER_LENGTH; 4872 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4873 } 4874 4875 if (bd_len > MODE_BLK_DESC_LENGTH) { 4876 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4877 "sd_get_physical_geometry: received unexpected bd_len " 4878 "of %d, page3\n", bd_len); 4879 status = EIO; 4880 goto page3_exit; 4881 } 4882 4883 page3p = (struct mode_format *) 4884 ((caddr_t)headerp + mode_header_length + bd_len); 4885 4886 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4887 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4888 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4889 "%d\n", page3p->mode_page.code); 4890 status = EIO; 4891 goto page3_exit; 4892 } 4893 4894 /* 4895 * Use this physical geometry data only if BOTH MODE SENSE commands 4896 * complete successfully; otherwise, revert to the logical geometry. 4897 * So, we need to save everything in temporary variables. 4898 */ 4899 sector_size = BE_16(page3p->data_bytes_sect); 4900 4901 /* 4902 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4903 */ 4904 if (sector_size == 0) { 4905 sector_size = un->un_sys_blocksize; 4906 } else { 4907 sector_size &= ~(un->un_sys_blocksize - 1); 4908 } 4909 4910 nsect = BE_16(page3p->sect_track); 4911 intrlv = BE_16(page3p->interleave); 4912 4913 SD_INFO(SD_LOG_COMMON, un, 4914 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4915 SD_INFO(SD_LOG_COMMON, un, 4916 " mode page: %d; nsect: %d; sector size: %d;\n", 4917 page3p->mode_page.code, nsect, sector_size); 4918 SD_INFO(SD_LOG_COMMON, un, 4919 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4920 BE_16(page3p->track_skew), 4921 BE_16(page3p->cylinder_skew)); 4922 4923 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4924 4925 /* 4926 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4927 */ 4928 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4929 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4930 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4931 if (status != 0) { 4932 SD_ERROR(SD_LOG_COMMON, un, 4933 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4934 goto page4_exit; 4935 } 4936 4937 /* 4938 * Determine size of Block Descriptors in order to locate the mode 4939 * page data. ATAPI devices return 0, SCSI devices should return 4940 * MODE_BLK_DESC_LENGTH. 4941 */ 4942 headerp = (struct mode_header *)p4bufp; 4943 if (un->un_f_cfg_is_atapi == TRUE) { 4944 struct mode_header_grp2 *mhp = 4945 (struct mode_header_grp2 *)headerp; 4946 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4947 } else { 4948 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4949 } 4950 4951 if (bd_len > MODE_BLK_DESC_LENGTH) { 4952 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4953 "sd_get_physical_geometry: received unexpected bd_len of " 4954 "%d, page4\n", bd_len); 4955 status = EIO; 4956 goto page4_exit; 4957 } 4958 4959 page4p = (struct mode_geometry *) 4960 ((caddr_t)headerp + mode_header_length + bd_len); 4961 4962 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4963 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4964 "sd_get_physical_geometry: mode sense pg4 code mismatch " 4965 "%d\n", page4p->mode_page.code); 4966 status = EIO; 4967 goto page4_exit; 4968 } 4969 4970 /* 4971 * Stash the data now, after we know that both commands completed. 4972 */ 4973 4974 4975 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4976 spc = nhead * nsect; 4977 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4978 rpm = BE_16(page4p->rpm); 4979 4980 modesense_capacity = spc * ncyl; 4981 4982 SD_INFO(SD_LOG_COMMON, un, 4983 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4984 SD_INFO(SD_LOG_COMMON, un, 4985 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4986 SD_INFO(SD_LOG_COMMON, un, 4987 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4988 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4989 (void *)pgeom_p, capacity); 4990 4991 /* 4992 * Compensate if the drive's geometry is not rectangular, i.e., 4993 * the product of C * H * S returned by MODE SENSE >= that returned 4994 * by read capacity. This is an idiosyncrasy of the original x86 4995 * disk subsystem. 4996 */ 4997 if (modesense_capacity >= capacity) { 4998 SD_INFO(SD_LOG_COMMON, un, 4999 "sd_get_physical_geometry: adjusting acyl; " 5000 "old: %d; new: %d\n", pgeom_p->g_acyl, 5001 (modesense_capacity - capacity + spc - 1) / spc); 5002 if (sector_size != 0) { 5003 /* 1243403: NEC D38x7 drives don't support sec size */ 5004 pgeom_p->g_secsize = (unsigned short)sector_size; 5005 } 5006 pgeom_p->g_nsect = (unsigned short)nsect; 5007 pgeom_p->g_nhead = (unsigned short)nhead; 5008 pgeom_p->g_capacity = capacity; 5009 pgeom_p->g_acyl = 5010 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5011 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5012 } 5013 5014 pgeom_p->g_rpm = (unsigned short)rpm; 5015 pgeom_p->g_intrlv = (unsigned short)intrlv; 5016 ret = 0; 5017 5018 SD_INFO(SD_LOG_COMMON, un, 5019 "sd_get_physical_geometry: mode sense geometry:\n"); 5020 SD_INFO(SD_LOG_COMMON, un, 5021 " nsect: %d; sector size: %d; interlv: %d\n", 5022 nsect, sector_size, intrlv); 5023 SD_INFO(SD_LOG_COMMON, un, 5024 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5025 nhead, ncyl, rpm, modesense_capacity); 5026 SD_INFO(SD_LOG_COMMON, un, 5027 "sd_get_physical_geometry: (cached)\n"); 5028 SD_INFO(SD_LOG_COMMON, un, 5029 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5030 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5031 pgeom_p->g_nhead, pgeom_p->g_nsect); 5032 SD_INFO(SD_LOG_COMMON, un, 5033 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5034 pgeom_p->g_secsize, pgeom_p->g_capacity, 5035 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5036 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5037 5038 page4_exit: 5039 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5040 5041 page3_exit: 5042 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5043 5044 if (status != 0) { 5045 if (status == EIO) { 5046 /* 5047 * Some disks do not support mode sense(6), we 5048 * should ignore this kind of error(sense key is 5049 * 0x5 - illegal request). 5050 */ 5051 uint8_t *sensep; 5052 int senlen; 5053 5054 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5055 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5056 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5057 5058 if (senlen > 0 && 5059 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5060 sd_ssc_assessment(ssc, 5061 SD_FMT_IGNORE_COMPROMISE); 5062 } else { 5063 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5064 } 5065 } else { 5066 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5067 } 5068 } 5069 sd_ssc_fini(ssc); 5070 return (ret); 5071 } 5072 5073 /* 5074 * Function: sd_get_virtual_geometry 5075 * 5076 * Description: Ask the controller to tell us about the target device. 5077 * 5078 * Arguments: un - pointer to softstate 5079 * capacity - disk capacity in #blocks 5080 * lbasize - disk block size in bytes 5081 * 5082 * Context: Kernel thread only 5083 */ 5084 5085 static int 5086 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5087 diskaddr_t capacity, int lbasize) 5088 { 5089 uint_t geombuf; 5090 int spc; 5091 5092 ASSERT(un != NULL); 5093 5094 /* Set sector size, and total number of sectors */ 5095 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5096 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5097 5098 /* Let the HBA tell us its geometry */ 5099 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5100 5101 /* A value of -1 indicates an undefined "geometry" property */ 5102 if (geombuf == (-1)) { 5103 return (EINVAL); 5104 } 5105 5106 /* Initialize the logical geometry cache. */ 5107 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5108 lgeom_p->g_nsect = geombuf & 0xffff; 5109 lgeom_p->g_secsize = un->un_sys_blocksize; 5110 5111 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5112 5113 /* 5114 * Note: The driver originally converted the capacity value from 5115 * target blocks to system blocks. However, the capacity value passed 5116 * to this routine is already in terms of system blocks (this scaling 5117 * is done when the READ CAPACITY command is issued and processed). 5118 * This 'error' may have gone undetected because the usage of g_ncyl 5119 * (which is based upon g_capacity) is very limited within the driver 5120 */ 5121 lgeom_p->g_capacity = capacity; 5122 5123 /* 5124 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5125 * hba may return zero values if the device has been removed. 5126 */ 5127 if (spc == 0) { 5128 lgeom_p->g_ncyl = 0; 5129 } else { 5130 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5131 } 5132 lgeom_p->g_acyl = 0; 5133 5134 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5135 return (0); 5136 5137 } 5138 /* 5139 * Function: sd_update_block_info 5140 * 5141 * Description: Calculate a byte count to sector count bitshift value 5142 * from sector size. 5143 * 5144 * Arguments: un: unit struct. 5145 * lbasize: new target sector size 5146 * capacity: new target capacity, ie. block count 5147 * 5148 * Context: Kernel thread context 5149 */ 5150 5151 static void 5152 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5153 { 5154 if (lbasize != 0) { 5155 un->un_tgt_blocksize = lbasize; 5156 un->un_f_tgt_blocksize_is_valid = TRUE; 5157 if (!un->un_f_has_removable_media) { 5158 un->un_sys_blocksize = lbasize; 5159 } 5160 } 5161 5162 if (capacity != 0) { 5163 un->un_blockcount = capacity; 5164 un->un_f_blockcount_is_valid = TRUE; 5165 } 5166 } 5167 5168 5169 /* 5170 * Function: sd_register_devid 5171 * 5172 * Description: This routine will obtain the device id information from the 5173 * target, obtain the serial number, and register the device 5174 * id with the ddi framework. 5175 * 5176 * Arguments: devi - the system's dev_info_t for the device. 5177 * un - driver soft state (unit) structure 5178 * reservation_flag - indicates if a reservation conflict 5179 * occurred during attach 5180 * 5181 * Context: Kernel Thread 5182 */ 5183 static void 5184 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5185 { 5186 int rval = 0; 5187 uchar_t *inq80 = NULL; 5188 size_t inq80_len = MAX_INQUIRY_SIZE; 5189 size_t inq80_resid = 0; 5190 uchar_t *inq83 = NULL; 5191 size_t inq83_len = MAX_INQUIRY_SIZE; 5192 size_t inq83_resid = 0; 5193 int dlen, len; 5194 char *sn; 5195 struct sd_lun *un; 5196 5197 ASSERT(ssc != NULL); 5198 un = ssc->ssc_un; 5199 ASSERT(un != NULL); 5200 ASSERT(mutex_owned(SD_MUTEX(un))); 5201 ASSERT((SD_DEVINFO(un)) == devi); 5202 5203 5204 /* 5205 * We check the availability of the World Wide Name (0x83) and Unit 5206 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5207 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5208 * 0x83 is available, that is the best choice. Our next choice is 5209 * 0x80. If neither are available, we munge the devid from the device 5210 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5211 * to fabricate a devid for non-Sun qualified disks. 5212 */ 5213 if (sd_check_vpd_page_support(ssc) == 0) { 5214 /* collect page 80 data if available */ 5215 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5216 5217 mutex_exit(SD_MUTEX(un)); 5218 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5219 5220 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5221 0x01, 0x80, &inq80_resid); 5222 5223 if (rval != 0) { 5224 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5225 kmem_free(inq80, inq80_len); 5226 inq80 = NULL; 5227 inq80_len = 0; 5228 } else if (ddi_prop_exists( 5229 DDI_DEV_T_NONE, SD_DEVINFO(un), 5230 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5231 INQUIRY_SERIAL_NO) == 0) { 5232 /* 5233 * If we don't already have a serial number 5234 * property, do quick verify of data returned 5235 * and define property. 5236 */ 5237 dlen = inq80_len - inq80_resid; 5238 len = (size_t)inq80[3]; 5239 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5240 /* 5241 * Ensure sn termination, skip leading 5242 * blanks, and create property 5243 * 'inquiry-serial-no'. 5244 */ 5245 sn = (char *)&inq80[4]; 5246 sn[len] = 0; 5247 while (*sn && (*sn == ' ')) 5248 sn++; 5249 if (*sn) { 5250 (void) ddi_prop_update_string( 5251 DDI_DEV_T_NONE, 5252 SD_DEVINFO(un), 5253 INQUIRY_SERIAL_NO, sn); 5254 } 5255 } 5256 } 5257 mutex_enter(SD_MUTEX(un)); 5258 } 5259 5260 /* collect page 83 data if available */ 5261 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5262 mutex_exit(SD_MUTEX(un)); 5263 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5264 5265 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5266 0x01, 0x83, &inq83_resid); 5267 5268 if (rval != 0) { 5269 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5270 kmem_free(inq83, inq83_len); 5271 inq83 = NULL; 5272 inq83_len = 0; 5273 } 5274 mutex_enter(SD_MUTEX(un)); 5275 } 5276 } 5277 5278 /* 5279 * If transport has already registered a devid for this target 5280 * then that takes precedence over the driver's determination 5281 * of the devid. 5282 * 5283 * NOTE: The reason this check is done here instead of at the beginning 5284 * of the function is to allow the code above to create the 5285 * 'inquiry-serial-no' property. 5286 */ 5287 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5288 ASSERT(un->un_devid); 5289 un->un_f_devid_transport_defined = TRUE; 5290 goto cleanup; /* use devid registered by the transport */ 5291 } 5292 5293 /* 5294 * This is the case of antiquated Sun disk drives that have the 5295 * FAB_DEVID property set in the disk_table. These drives 5296 * manage the devid's by storing them in last 2 available sectors 5297 * on the drive and have them fabricated by the ddi layer by calling 5298 * ddi_devid_init and passing the DEVID_FAB flag. 5299 */ 5300 if (un->un_f_opt_fab_devid == TRUE) { 5301 /* 5302 * Depending on EINVAL isn't reliable, since a reserved disk 5303 * may result in invalid geometry, so check to make sure a 5304 * reservation conflict did not occur during attach. 5305 */ 5306 if ((sd_get_devid(ssc) == EINVAL) && 5307 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5308 /* 5309 * The devid is invalid AND there is no reservation 5310 * conflict. Fabricate a new devid. 5311 */ 5312 (void) sd_create_devid(ssc); 5313 } 5314 5315 /* Register the devid if it exists */ 5316 if (un->un_devid != NULL) { 5317 (void) ddi_devid_register(SD_DEVINFO(un), 5318 un->un_devid); 5319 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5320 "sd_register_devid: Devid Fabricated\n"); 5321 } 5322 goto cleanup; 5323 } 5324 5325 /* encode best devid possible based on data available */ 5326 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5327 (char *)ddi_driver_name(SD_DEVINFO(un)), 5328 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5329 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5330 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5331 5332 /* devid successfully encoded, register devid */ 5333 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5334 5335 } else { 5336 /* 5337 * Unable to encode a devid based on data available. 5338 * This is not a Sun qualified disk. Older Sun disk 5339 * drives that have the SD_FAB_DEVID property 5340 * set in the disk_table and non Sun qualified 5341 * disks are treated in the same manner. These 5342 * drives manage the devid's by storing them in 5343 * last 2 available sectors on the drive and 5344 * have them fabricated by the ddi layer by 5345 * calling ddi_devid_init and passing the 5346 * DEVID_FAB flag. 5347 * Create a fabricate devid only if there's no 5348 * fabricate devid existed. 5349 */ 5350 if (sd_get_devid(ssc) == EINVAL) { 5351 (void) sd_create_devid(ssc); 5352 } 5353 un->un_f_opt_fab_devid = TRUE; 5354 5355 /* Register the devid if it exists */ 5356 if (un->un_devid != NULL) { 5357 (void) ddi_devid_register(SD_DEVINFO(un), 5358 un->un_devid); 5359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5360 "sd_register_devid: devid fabricated using " 5361 "ddi framework\n"); 5362 } 5363 } 5364 5365 cleanup: 5366 /* clean up resources */ 5367 if (inq80 != NULL) { 5368 kmem_free(inq80, inq80_len); 5369 } 5370 if (inq83 != NULL) { 5371 kmem_free(inq83, inq83_len); 5372 } 5373 } 5374 5375 5376 5377 /* 5378 * Function: sd_get_devid 5379 * 5380 * Description: This routine will return 0 if a valid device id has been 5381 * obtained from the target and stored in the soft state. If a 5382 * valid device id has not been previously read and stored, a 5383 * read attempt will be made. 5384 * 5385 * Arguments: un - driver soft state (unit) structure 5386 * 5387 * Return Code: 0 if we successfully get the device id 5388 * 5389 * Context: Kernel Thread 5390 */ 5391 5392 static int 5393 sd_get_devid(sd_ssc_t *ssc) 5394 { 5395 struct dk_devid *dkdevid; 5396 ddi_devid_t tmpid; 5397 uint_t *ip; 5398 size_t sz; 5399 diskaddr_t blk; 5400 int status; 5401 int chksum; 5402 int i; 5403 size_t buffer_size; 5404 struct sd_lun *un; 5405 5406 ASSERT(ssc != NULL); 5407 un = ssc->ssc_un; 5408 ASSERT(un != NULL); 5409 ASSERT(mutex_owned(SD_MUTEX(un))); 5410 5411 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5412 un); 5413 5414 if (un->un_devid != NULL) { 5415 return (0); 5416 } 5417 5418 mutex_exit(SD_MUTEX(un)); 5419 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5420 (void *)SD_PATH_DIRECT) != 0) { 5421 mutex_enter(SD_MUTEX(un)); 5422 return (EINVAL); 5423 } 5424 5425 /* 5426 * Read and verify device id, stored in the reserved cylinders at the 5427 * end of the disk. Backup label is on the odd sectors of the last 5428 * track of the last cylinder. Device id will be on track of the next 5429 * to last cylinder. 5430 */ 5431 mutex_enter(SD_MUTEX(un)); 5432 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5433 mutex_exit(SD_MUTEX(un)); 5434 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5435 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5436 SD_PATH_DIRECT); 5437 5438 if (status != 0) { 5439 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5440 goto error; 5441 } 5442 5443 /* Validate the revision */ 5444 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5445 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5446 status = EINVAL; 5447 goto error; 5448 } 5449 5450 /* Calculate the checksum */ 5451 chksum = 0; 5452 ip = (uint_t *)dkdevid; 5453 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5454 i++) { 5455 chksum ^= ip[i]; 5456 } 5457 5458 /* Compare the checksums */ 5459 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5460 status = EINVAL; 5461 goto error; 5462 } 5463 5464 /* Validate the device id */ 5465 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5466 status = EINVAL; 5467 goto error; 5468 } 5469 5470 /* 5471 * Store the device id in the driver soft state 5472 */ 5473 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5474 tmpid = kmem_alloc(sz, KM_SLEEP); 5475 5476 mutex_enter(SD_MUTEX(un)); 5477 5478 un->un_devid = tmpid; 5479 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5480 5481 kmem_free(dkdevid, buffer_size); 5482 5483 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5484 5485 return (status); 5486 error: 5487 mutex_enter(SD_MUTEX(un)); 5488 kmem_free(dkdevid, buffer_size); 5489 return (status); 5490 } 5491 5492 5493 /* 5494 * Function: sd_create_devid 5495 * 5496 * Description: This routine will fabricate the device id and write it 5497 * to the disk. 5498 * 5499 * Arguments: un - driver soft state (unit) structure 5500 * 5501 * Return Code: value of the fabricated device id 5502 * 5503 * Context: Kernel Thread 5504 */ 5505 5506 static ddi_devid_t 5507 sd_create_devid(sd_ssc_t *ssc) 5508 { 5509 struct sd_lun *un; 5510 5511 ASSERT(ssc != NULL); 5512 un = ssc->ssc_un; 5513 ASSERT(un != NULL); 5514 5515 /* Fabricate the devid */ 5516 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5517 == DDI_FAILURE) { 5518 return (NULL); 5519 } 5520 5521 /* Write the devid to disk */ 5522 if (sd_write_deviceid(ssc) != 0) { 5523 ddi_devid_free(un->un_devid); 5524 un->un_devid = NULL; 5525 } 5526 5527 return (un->un_devid); 5528 } 5529 5530 5531 /* 5532 * Function: sd_write_deviceid 5533 * 5534 * Description: This routine will write the device id to the disk 5535 * reserved sector. 5536 * 5537 * Arguments: un - driver soft state (unit) structure 5538 * 5539 * Return Code: EINVAL 5540 * value returned by sd_send_scsi_cmd 5541 * 5542 * Context: Kernel Thread 5543 */ 5544 5545 static int 5546 sd_write_deviceid(sd_ssc_t *ssc) 5547 { 5548 struct dk_devid *dkdevid; 5549 uchar_t *buf; 5550 diskaddr_t blk; 5551 uint_t *ip, chksum; 5552 int status; 5553 int i; 5554 struct sd_lun *un; 5555 5556 ASSERT(ssc != NULL); 5557 un = ssc->ssc_un; 5558 ASSERT(un != NULL); 5559 ASSERT(mutex_owned(SD_MUTEX(un))); 5560 5561 mutex_exit(SD_MUTEX(un)); 5562 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5563 (void *)SD_PATH_DIRECT) != 0) { 5564 mutex_enter(SD_MUTEX(un)); 5565 return (-1); 5566 } 5567 5568 5569 /* Allocate the buffer */ 5570 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5571 dkdevid = (struct dk_devid *)buf; 5572 5573 /* Fill in the revision */ 5574 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5575 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5576 5577 /* Copy in the device id */ 5578 mutex_enter(SD_MUTEX(un)); 5579 bcopy(un->un_devid, &dkdevid->dkd_devid, 5580 ddi_devid_sizeof(un->un_devid)); 5581 mutex_exit(SD_MUTEX(un)); 5582 5583 /* Calculate the checksum */ 5584 chksum = 0; 5585 ip = (uint_t *)dkdevid; 5586 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5587 i++) { 5588 chksum ^= ip[i]; 5589 } 5590 5591 /* Fill-in checksum */ 5592 DKD_FORMCHKSUM(chksum, dkdevid); 5593 5594 /* Write the reserved sector */ 5595 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5596 SD_PATH_DIRECT); 5597 if (status != 0) 5598 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5599 5600 kmem_free(buf, un->un_sys_blocksize); 5601 5602 mutex_enter(SD_MUTEX(un)); 5603 return (status); 5604 } 5605 5606 5607 /* 5608 * Function: sd_check_vpd_page_support 5609 * 5610 * Description: This routine sends an inquiry command with the EVPD bit set and 5611 * a page code of 0x00 to the device. It is used to determine which 5612 * vital product pages are available to find the devid. We are 5613 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5614 * device does not support that command. 5615 * 5616 * Arguments: un - driver soft state (unit) structure 5617 * 5618 * Return Code: 0 - success 5619 * 1 - check condition 5620 * 5621 * Context: This routine can sleep. 5622 */ 5623 5624 static int 5625 sd_check_vpd_page_support(sd_ssc_t *ssc) 5626 { 5627 uchar_t *page_list = NULL; 5628 uchar_t page_length = 0xff; /* Use max possible length */ 5629 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5630 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5631 int rval = 0; 5632 int counter; 5633 struct sd_lun *un; 5634 5635 ASSERT(ssc != NULL); 5636 un = ssc->ssc_un; 5637 ASSERT(un != NULL); 5638 ASSERT(mutex_owned(SD_MUTEX(un))); 5639 5640 mutex_exit(SD_MUTEX(un)); 5641 5642 /* 5643 * We'll set the page length to the maximum to save figuring it out 5644 * with an additional call. 5645 */ 5646 page_list = kmem_zalloc(page_length, KM_SLEEP); 5647 5648 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5649 page_code, NULL); 5650 5651 if (rval != 0) 5652 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5653 5654 mutex_enter(SD_MUTEX(un)); 5655 5656 /* 5657 * Now we must validate that the device accepted the command, as some 5658 * drives do not support it. If the drive does support it, we will 5659 * return 0, and the supported pages will be in un_vpd_page_mask. If 5660 * not, we return -1. 5661 */ 5662 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5663 /* Loop to find one of the 2 pages we need */ 5664 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5665 5666 /* 5667 * Pages are returned in ascending order, and 0x83 is what we 5668 * are hoping for. 5669 */ 5670 while ((page_list[counter] <= 0x86) && 5671 (counter <= (page_list[VPD_PAGE_LENGTH] + 5672 VPD_HEAD_OFFSET))) { 5673 /* 5674 * Add 3 because page_list[3] is the number of 5675 * pages minus 3 5676 */ 5677 5678 switch (page_list[counter]) { 5679 case 0x00: 5680 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5681 break; 5682 case 0x80: 5683 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5684 break; 5685 case 0x81: 5686 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5687 break; 5688 case 0x82: 5689 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5690 break; 5691 case 0x83: 5692 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5693 break; 5694 case 0x86: 5695 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5696 break; 5697 } 5698 counter++; 5699 } 5700 5701 } else { 5702 rval = -1; 5703 5704 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5705 "sd_check_vpd_page_support: This drive does not implement " 5706 "VPD pages.\n"); 5707 } 5708 5709 kmem_free(page_list, page_length); 5710 5711 return (rval); 5712 } 5713 5714 5715 /* 5716 * Function: sd_setup_pm 5717 * 5718 * Description: Initialize Power Management on the device 5719 * 5720 * Context: Kernel Thread 5721 */ 5722 5723 static void 5724 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5725 { 5726 uint_t log_page_size; 5727 uchar_t *log_page_data; 5728 int rval = 0; 5729 struct sd_lun *un; 5730 5731 ASSERT(ssc != NULL); 5732 un = ssc->ssc_un; 5733 ASSERT(un != NULL); 5734 5735 /* 5736 * Since we are called from attach, holding a mutex for 5737 * un is unnecessary. Because some of the routines called 5738 * from here require SD_MUTEX to not be held, assert this 5739 * right up front. 5740 */ 5741 ASSERT(!mutex_owned(SD_MUTEX(un))); 5742 /* 5743 * Since the sd device does not have the 'reg' property, 5744 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5745 * The following code is to tell cpr that this device 5746 * DOES need to be suspended and resumed. 5747 */ 5748 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5749 "pm-hardware-state", "needs-suspend-resume"); 5750 5751 /* 5752 * This complies with the new power management framework 5753 * for certain desktop machines. Create the pm_components 5754 * property as a string array property. 5755 */ 5756 if (un->un_f_pm_supported) { 5757 /* 5758 * not all devices have a motor, try it first. 5759 * some devices may return ILLEGAL REQUEST, some 5760 * will hang 5761 * The following START_STOP_UNIT is used to check if target 5762 * device has a motor. 5763 */ 5764 un->un_f_start_stop_supported = TRUE; 5765 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5766 SD_PATH_DIRECT); 5767 5768 if (rval != 0) { 5769 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5770 un->un_f_start_stop_supported = FALSE; 5771 } 5772 5773 /* 5774 * create pm properties anyways otherwise the parent can't 5775 * go to sleep 5776 */ 5777 (void) sd_create_pm_components(devi, un); 5778 un->un_f_pm_is_enabled = TRUE; 5779 return; 5780 } 5781 5782 if (!un->un_f_log_sense_supported) { 5783 un->un_power_level = SD_SPINDLE_ON; 5784 un->un_f_pm_is_enabled = FALSE; 5785 return; 5786 } 5787 5788 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5789 5790 #ifdef SDDEBUG 5791 if (sd_force_pm_supported) { 5792 /* Force a successful result */ 5793 rval = 1; 5794 } 5795 #endif 5796 5797 /* 5798 * If the start-stop cycle counter log page is not supported 5799 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5800 * then we should not create the pm_components property. 5801 */ 5802 if (rval == -1) { 5803 /* 5804 * Error. 5805 * Reading log sense failed, most likely this is 5806 * an older drive that does not support log sense. 5807 * If this fails auto-pm is not supported. 5808 */ 5809 un->un_power_level = SD_SPINDLE_ON; 5810 un->un_f_pm_is_enabled = FALSE; 5811 5812 } else if (rval == 0) { 5813 /* 5814 * Page not found. 5815 * The start stop cycle counter is implemented as page 5816 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5817 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5818 */ 5819 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5820 /* 5821 * Page found, use this one. 5822 */ 5823 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5824 un->un_f_pm_is_enabled = TRUE; 5825 } else { 5826 /* 5827 * Error or page not found. 5828 * auto-pm is not supported for this device. 5829 */ 5830 un->un_power_level = SD_SPINDLE_ON; 5831 un->un_f_pm_is_enabled = FALSE; 5832 } 5833 } else { 5834 /* 5835 * Page found, use it. 5836 */ 5837 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5838 un->un_f_pm_is_enabled = TRUE; 5839 } 5840 5841 5842 if (un->un_f_pm_is_enabled == TRUE) { 5843 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5844 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5845 5846 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5847 log_page_size, un->un_start_stop_cycle_page, 5848 0x01, 0, SD_PATH_DIRECT); 5849 5850 if (rval != 0) { 5851 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5852 } 5853 5854 #ifdef SDDEBUG 5855 if (sd_force_pm_supported) { 5856 /* Force a successful result */ 5857 rval = 0; 5858 } 5859 #endif 5860 5861 /* 5862 * If the Log sense for Page( Start/stop cycle counter page) 5863 * succeeds, then power management is supported and we can 5864 * enable auto-pm. 5865 */ 5866 if (rval == 0) { 5867 (void) sd_create_pm_components(devi, un); 5868 } else { 5869 un->un_power_level = SD_SPINDLE_ON; 5870 un->un_f_pm_is_enabled = FALSE; 5871 } 5872 5873 kmem_free(log_page_data, log_page_size); 5874 } 5875 } 5876 5877 5878 /* 5879 * Function: sd_create_pm_components 5880 * 5881 * Description: Initialize PM property. 5882 * 5883 * Context: Kernel thread context 5884 */ 5885 5886 static void 5887 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5888 { 5889 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5890 5891 ASSERT(!mutex_owned(SD_MUTEX(un))); 5892 5893 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5894 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5895 /* 5896 * When components are initially created they are idle, 5897 * power up any non-removables. 5898 * Note: the return value of pm_raise_power can't be used 5899 * for determining if PM should be enabled for this device. 5900 * Even if you check the return values and remove this 5901 * property created above, the PM framework will not honor the 5902 * change after the first call to pm_raise_power. Hence, 5903 * removal of that property does not help if pm_raise_power 5904 * fails. In the case of removable media, the start/stop 5905 * will fail if the media is not present. 5906 */ 5907 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5908 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5909 mutex_enter(SD_MUTEX(un)); 5910 un->un_power_level = SD_SPINDLE_ON; 5911 mutex_enter(&un->un_pm_mutex); 5912 /* Set to on and not busy. */ 5913 un->un_pm_count = 0; 5914 } else { 5915 mutex_enter(SD_MUTEX(un)); 5916 un->un_power_level = SD_SPINDLE_OFF; 5917 mutex_enter(&un->un_pm_mutex); 5918 /* Set to off. */ 5919 un->un_pm_count = -1; 5920 } 5921 mutex_exit(&un->un_pm_mutex); 5922 mutex_exit(SD_MUTEX(un)); 5923 } else { 5924 un->un_power_level = SD_SPINDLE_ON; 5925 un->un_f_pm_is_enabled = FALSE; 5926 } 5927 } 5928 5929 5930 /* 5931 * Function: sd_ddi_suspend 5932 * 5933 * Description: Performs system power-down operations. This includes 5934 * setting the drive state to indicate its suspended so 5935 * that no new commands will be accepted. Also, wait for 5936 * all commands that are in transport or queued to a timer 5937 * for retry to complete. All timeout threads are cancelled. 5938 * 5939 * Return Code: DDI_FAILURE or DDI_SUCCESS 5940 * 5941 * Context: Kernel thread context 5942 */ 5943 5944 static int 5945 sd_ddi_suspend(dev_info_t *devi) 5946 { 5947 struct sd_lun *un; 5948 clock_t wait_cmds_complete; 5949 5950 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5951 if (un == NULL) { 5952 return (DDI_FAILURE); 5953 } 5954 5955 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5956 5957 mutex_enter(SD_MUTEX(un)); 5958 5959 /* Return success if the device is already suspended. */ 5960 if (un->un_state == SD_STATE_SUSPENDED) { 5961 mutex_exit(SD_MUTEX(un)); 5962 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5963 "device already suspended, exiting\n"); 5964 return (DDI_SUCCESS); 5965 } 5966 5967 /* Return failure if the device is being used by HA */ 5968 if (un->un_resvd_status & 5969 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5970 mutex_exit(SD_MUTEX(un)); 5971 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5972 "device in use by HA, exiting\n"); 5973 return (DDI_FAILURE); 5974 } 5975 5976 /* 5977 * Return failure if the device is in a resource wait 5978 * or power changing state. 5979 */ 5980 if ((un->un_state == SD_STATE_RWAIT) || 5981 (un->un_state == SD_STATE_PM_CHANGING)) { 5982 mutex_exit(SD_MUTEX(un)); 5983 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5984 "device in resource wait state, exiting\n"); 5985 return (DDI_FAILURE); 5986 } 5987 5988 5989 un->un_save_state = un->un_last_state; 5990 New_state(un, SD_STATE_SUSPENDED); 5991 5992 /* 5993 * Wait for all commands that are in transport or queued to a timer 5994 * for retry to complete. 5995 * 5996 * While waiting, no new commands will be accepted or sent because of 5997 * the new state we set above. 5998 * 5999 * Wait till current operation has completed. If we are in the resource 6000 * wait state (with an intr outstanding) then we need to wait till the 6001 * intr completes and starts the next cmd. We want to wait for 6002 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6003 */ 6004 wait_cmds_complete = ddi_get_lbolt() + 6005 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6006 6007 while (un->un_ncmds_in_transport != 0) { 6008 /* 6009 * Fail if commands do not finish in the specified time. 6010 */ 6011 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6012 wait_cmds_complete) == -1) { 6013 /* 6014 * Undo the state changes made above. Everything 6015 * must go back to it's original value. 6016 */ 6017 Restore_state(un); 6018 un->un_last_state = un->un_save_state; 6019 /* Wake up any threads that might be waiting. */ 6020 cv_broadcast(&un->un_suspend_cv); 6021 mutex_exit(SD_MUTEX(un)); 6022 SD_ERROR(SD_LOG_IO_PM, un, 6023 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6024 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6025 return (DDI_FAILURE); 6026 } 6027 } 6028 6029 /* 6030 * Cancel SCSI watch thread and timeouts, if any are active 6031 */ 6032 6033 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6034 opaque_t temp_token = un->un_swr_token; 6035 mutex_exit(SD_MUTEX(un)); 6036 scsi_watch_suspend(temp_token); 6037 mutex_enter(SD_MUTEX(un)); 6038 } 6039 6040 if (un->un_reset_throttle_timeid != NULL) { 6041 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6042 un->un_reset_throttle_timeid = NULL; 6043 mutex_exit(SD_MUTEX(un)); 6044 (void) untimeout(temp_id); 6045 mutex_enter(SD_MUTEX(un)); 6046 } 6047 6048 if (un->un_dcvb_timeid != NULL) { 6049 timeout_id_t temp_id = un->un_dcvb_timeid; 6050 un->un_dcvb_timeid = NULL; 6051 mutex_exit(SD_MUTEX(un)); 6052 (void) untimeout(temp_id); 6053 mutex_enter(SD_MUTEX(un)); 6054 } 6055 6056 mutex_enter(&un->un_pm_mutex); 6057 if (un->un_pm_timeid != NULL) { 6058 timeout_id_t temp_id = un->un_pm_timeid; 6059 un->un_pm_timeid = NULL; 6060 mutex_exit(&un->un_pm_mutex); 6061 mutex_exit(SD_MUTEX(un)); 6062 (void) untimeout(temp_id); 6063 mutex_enter(SD_MUTEX(un)); 6064 } else { 6065 mutex_exit(&un->un_pm_mutex); 6066 } 6067 6068 if (un->un_rmw_msg_timeid != NULL) { 6069 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6070 un->un_rmw_msg_timeid = NULL; 6071 mutex_exit(SD_MUTEX(un)); 6072 (void) untimeout(temp_id); 6073 mutex_enter(SD_MUTEX(un)); 6074 } 6075 6076 if (un->un_retry_timeid != NULL) { 6077 timeout_id_t temp_id = un->un_retry_timeid; 6078 un->un_retry_timeid = NULL; 6079 mutex_exit(SD_MUTEX(un)); 6080 (void) untimeout(temp_id); 6081 mutex_enter(SD_MUTEX(un)); 6082 6083 if (un->un_retry_bp != NULL) { 6084 un->un_retry_bp->av_forw = un->un_waitq_headp; 6085 un->un_waitq_headp = un->un_retry_bp; 6086 if (un->un_waitq_tailp == NULL) { 6087 un->un_waitq_tailp = un->un_retry_bp; 6088 } 6089 un->un_retry_bp = NULL; 6090 un->un_retry_statp = NULL; 6091 } 6092 } 6093 6094 if (un->un_direct_priority_timeid != NULL) { 6095 timeout_id_t temp_id = un->un_direct_priority_timeid; 6096 un->un_direct_priority_timeid = NULL; 6097 mutex_exit(SD_MUTEX(un)); 6098 (void) untimeout(temp_id); 6099 mutex_enter(SD_MUTEX(un)); 6100 } 6101 6102 if (un->un_f_is_fibre == TRUE) { 6103 /* 6104 * Remove callbacks for insert and remove events 6105 */ 6106 if (un->un_insert_event != NULL) { 6107 mutex_exit(SD_MUTEX(un)); 6108 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6109 mutex_enter(SD_MUTEX(un)); 6110 un->un_insert_event = NULL; 6111 } 6112 6113 if (un->un_remove_event != NULL) { 6114 mutex_exit(SD_MUTEX(un)); 6115 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6116 mutex_enter(SD_MUTEX(un)); 6117 un->un_remove_event = NULL; 6118 } 6119 } 6120 6121 mutex_exit(SD_MUTEX(un)); 6122 6123 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6124 6125 return (DDI_SUCCESS); 6126 } 6127 6128 6129 /* 6130 * Function: sd_ddi_pm_suspend 6131 * 6132 * Description: Set the drive state to low power. 6133 * Someone else is required to actually change the drive 6134 * power level. 6135 * 6136 * Arguments: un - driver soft state (unit) structure 6137 * 6138 * Return Code: DDI_FAILURE or DDI_SUCCESS 6139 * 6140 * Context: Kernel thread context 6141 */ 6142 6143 static int 6144 sd_ddi_pm_suspend(struct sd_lun *un) 6145 { 6146 ASSERT(un != NULL); 6147 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6148 6149 ASSERT(!mutex_owned(SD_MUTEX(un))); 6150 mutex_enter(SD_MUTEX(un)); 6151 6152 /* 6153 * Exit if power management is not enabled for this device, or if 6154 * the device is being used by HA. 6155 */ 6156 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6157 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6158 mutex_exit(SD_MUTEX(un)); 6159 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6160 return (DDI_SUCCESS); 6161 } 6162 6163 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6164 un->un_ncmds_in_driver); 6165 6166 /* 6167 * See if the device is not busy, ie.: 6168 * - we have no commands in the driver for this device 6169 * - not waiting for resources 6170 */ 6171 if ((un->un_ncmds_in_driver == 0) && 6172 (un->un_state != SD_STATE_RWAIT)) { 6173 /* 6174 * The device is not busy, so it is OK to go to low power state. 6175 * Indicate low power, but rely on someone else to actually 6176 * change it. 6177 */ 6178 mutex_enter(&un->un_pm_mutex); 6179 un->un_pm_count = -1; 6180 mutex_exit(&un->un_pm_mutex); 6181 un->un_power_level = SD_SPINDLE_OFF; 6182 } 6183 6184 mutex_exit(SD_MUTEX(un)); 6185 6186 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6187 6188 return (DDI_SUCCESS); 6189 } 6190 6191 6192 /* 6193 * Function: sd_ddi_resume 6194 * 6195 * Description: Performs system power-up operations.. 6196 * 6197 * Return Code: DDI_SUCCESS 6198 * DDI_FAILURE 6199 * 6200 * Context: Kernel thread context 6201 */ 6202 6203 static int 6204 sd_ddi_resume(dev_info_t *devi) 6205 { 6206 struct sd_lun *un; 6207 6208 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6209 if (un == NULL) { 6210 return (DDI_FAILURE); 6211 } 6212 6213 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6214 6215 mutex_enter(SD_MUTEX(un)); 6216 Restore_state(un); 6217 6218 /* 6219 * Restore the state which was saved to give the 6220 * the right state in un_last_state 6221 */ 6222 un->un_last_state = un->un_save_state; 6223 /* 6224 * Note: throttle comes back at full. 6225 * Also note: this MUST be done before calling pm_raise_power 6226 * otherwise the system can get hung in biowait. The scenario where 6227 * this'll happen is under cpr suspend. Writing of the system 6228 * state goes through sddump, which writes 0 to un_throttle. If 6229 * writing the system state then fails, example if the partition is 6230 * too small, then cpr attempts a resume. If throttle isn't restored 6231 * from the saved value until after calling pm_raise_power then 6232 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6233 * in biowait. 6234 */ 6235 un->un_throttle = un->un_saved_throttle; 6236 6237 /* 6238 * The chance of failure is very rare as the only command done in power 6239 * entry point is START command when you transition from 0->1 or 6240 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6241 * which suspend was done. Ignore the return value as the resume should 6242 * not be failed. In the case of removable media the media need not be 6243 * inserted and hence there is a chance that raise power will fail with 6244 * media not present. 6245 */ 6246 if (un->un_f_attach_spinup) { 6247 mutex_exit(SD_MUTEX(un)); 6248 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6249 mutex_enter(SD_MUTEX(un)); 6250 } 6251 6252 /* 6253 * Don't broadcast to the suspend cv and therefore possibly 6254 * start I/O until after power has been restored. 6255 */ 6256 cv_broadcast(&un->un_suspend_cv); 6257 cv_broadcast(&un->un_state_cv); 6258 6259 /* restart thread */ 6260 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6261 scsi_watch_resume(un->un_swr_token); 6262 } 6263 6264 #if (defined(__fibre)) 6265 if (un->un_f_is_fibre == TRUE) { 6266 /* 6267 * Add callbacks for insert and remove events 6268 */ 6269 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6270 sd_init_event_callbacks(un); 6271 } 6272 } 6273 #endif 6274 6275 /* 6276 * Transport any pending commands to the target. 6277 * 6278 * If this is a low-activity device commands in queue will have to wait 6279 * until new commands come in, which may take awhile. Also, we 6280 * specifically don't check un_ncmds_in_transport because we know that 6281 * there really are no commands in progress after the unit was 6282 * suspended and we could have reached the throttle level, been 6283 * suspended, and have no new commands coming in for awhile. Highly 6284 * unlikely, but so is the low-activity disk scenario. 6285 */ 6286 ddi_xbuf_dispatch(un->un_xbuf_attr); 6287 6288 sd_start_cmds(un, NULL); 6289 mutex_exit(SD_MUTEX(un)); 6290 6291 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6292 6293 return (DDI_SUCCESS); 6294 } 6295 6296 6297 /* 6298 * Function: sd_ddi_pm_resume 6299 * 6300 * Description: Set the drive state to powered on. 6301 * Someone else is required to actually change the drive 6302 * power level. 6303 * 6304 * Arguments: un - driver soft state (unit) structure 6305 * 6306 * Return Code: DDI_SUCCESS 6307 * 6308 * Context: Kernel thread context 6309 */ 6310 6311 static int 6312 sd_ddi_pm_resume(struct sd_lun *un) 6313 { 6314 ASSERT(un != NULL); 6315 6316 ASSERT(!mutex_owned(SD_MUTEX(un))); 6317 mutex_enter(SD_MUTEX(un)); 6318 un->un_power_level = SD_SPINDLE_ON; 6319 6320 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6321 mutex_enter(&un->un_pm_mutex); 6322 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6323 un->un_pm_count++; 6324 ASSERT(un->un_pm_count == 0); 6325 /* 6326 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6327 * un_suspend_cv is for a system resume, not a power management 6328 * device resume. (4297749) 6329 * cv_broadcast(&un->un_suspend_cv); 6330 */ 6331 } 6332 mutex_exit(&un->un_pm_mutex); 6333 mutex_exit(SD_MUTEX(un)); 6334 6335 return (DDI_SUCCESS); 6336 } 6337 6338 6339 /* 6340 * Function: sd_pm_idletimeout_handler 6341 * 6342 * Description: A timer routine that's active only while a device is busy. 6343 * The purpose is to extend slightly the pm framework's busy 6344 * view of the device to prevent busy/idle thrashing for 6345 * back-to-back commands. Do this by comparing the current time 6346 * to the time at which the last command completed and when the 6347 * difference is greater than sd_pm_idletime, call 6348 * pm_idle_component. In addition to indicating idle to the pm 6349 * framework, update the chain type to again use the internal pm 6350 * layers of the driver. 6351 * 6352 * Arguments: arg - driver soft state (unit) structure 6353 * 6354 * Context: Executes in a timeout(9F) thread context 6355 */ 6356 6357 static void 6358 sd_pm_idletimeout_handler(void *arg) 6359 { 6360 struct sd_lun *un = arg; 6361 6362 time_t now; 6363 6364 mutex_enter(&sd_detach_mutex); 6365 if (un->un_detach_count != 0) { 6366 /* Abort if the instance is detaching */ 6367 mutex_exit(&sd_detach_mutex); 6368 return; 6369 } 6370 mutex_exit(&sd_detach_mutex); 6371 6372 now = ddi_get_time(); 6373 /* 6374 * Grab both mutexes, in the proper order, since we're accessing 6375 * both PM and softstate variables. 6376 */ 6377 mutex_enter(SD_MUTEX(un)); 6378 mutex_enter(&un->un_pm_mutex); 6379 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6380 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6381 /* 6382 * Update the chain types. 6383 * This takes affect on the next new command received. 6384 */ 6385 if (un->un_f_non_devbsize_supported) { 6386 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6387 } else { 6388 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6389 } 6390 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6391 6392 SD_TRACE(SD_LOG_IO_PM, un, 6393 "sd_pm_idletimeout_handler: idling device\n"); 6394 (void) pm_idle_component(SD_DEVINFO(un), 0); 6395 un->un_pm_idle_timeid = NULL; 6396 } else { 6397 un->un_pm_idle_timeid = 6398 timeout(sd_pm_idletimeout_handler, un, 6399 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6400 } 6401 mutex_exit(&un->un_pm_mutex); 6402 mutex_exit(SD_MUTEX(un)); 6403 } 6404 6405 6406 /* 6407 * Function: sd_pm_timeout_handler 6408 * 6409 * Description: Callback to tell framework we are idle. 6410 * 6411 * Context: timeout(9f) thread context. 6412 */ 6413 6414 static void 6415 sd_pm_timeout_handler(void *arg) 6416 { 6417 struct sd_lun *un = arg; 6418 6419 (void) pm_idle_component(SD_DEVINFO(un), 0); 6420 mutex_enter(&un->un_pm_mutex); 6421 un->un_pm_timeid = NULL; 6422 mutex_exit(&un->un_pm_mutex); 6423 } 6424 6425 6426 /* 6427 * Function: sdpower 6428 * 6429 * Description: PM entry point. 6430 * 6431 * Return Code: DDI_SUCCESS 6432 * DDI_FAILURE 6433 * 6434 * Context: Kernel thread context 6435 */ 6436 6437 static int 6438 sdpower(dev_info_t *devi, int component, int level) 6439 { 6440 struct sd_lun *un; 6441 int instance; 6442 int rval = DDI_SUCCESS; 6443 uint_t i, log_page_size, maxcycles, ncycles; 6444 uchar_t *log_page_data; 6445 int log_sense_page; 6446 int medium_present; 6447 time_t intvlp; 6448 dev_t dev; 6449 struct pm_trans_data sd_pm_tran_data; 6450 uchar_t save_state; 6451 int sval; 6452 uchar_t state_before_pm; 6453 int got_semaphore_here; 6454 sd_ssc_t *ssc; 6455 6456 instance = ddi_get_instance(devi); 6457 6458 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6459 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6460 component != 0) { 6461 return (DDI_FAILURE); 6462 } 6463 6464 dev = sd_make_device(SD_DEVINFO(un)); 6465 ssc = sd_ssc_init(un); 6466 6467 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6468 6469 /* 6470 * Must synchronize power down with close. 6471 * Attempt to decrement/acquire the open/close semaphore, 6472 * but do NOT wait on it. If it's not greater than zero, 6473 * ie. it can't be decremented without waiting, then 6474 * someone else, either open or close, already has it 6475 * and the try returns 0. Use that knowledge here to determine 6476 * if it's OK to change the device power level. 6477 * Also, only increment it on exit if it was decremented, ie. gotten, 6478 * here. 6479 */ 6480 got_semaphore_here = sema_tryp(&un->un_semoclose); 6481 6482 mutex_enter(SD_MUTEX(un)); 6483 6484 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6485 un->un_ncmds_in_driver); 6486 6487 /* 6488 * If un_ncmds_in_driver is non-zero it indicates commands are 6489 * already being processed in the driver, or if the semaphore was 6490 * not gotten here it indicates an open or close is being processed. 6491 * At the same time somebody is requesting to go low power which 6492 * can't happen, therefore we need to return failure. 6493 */ 6494 if ((level == SD_SPINDLE_OFF) && 6495 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6496 mutex_exit(SD_MUTEX(un)); 6497 6498 if (got_semaphore_here != 0) { 6499 sema_v(&un->un_semoclose); 6500 } 6501 SD_TRACE(SD_LOG_IO_PM, un, 6502 "sdpower: exit, device has queued cmds.\n"); 6503 6504 goto sdpower_failed; 6505 } 6506 6507 /* 6508 * if it is OFFLINE that means the disk is completely dead 6509 * in our case we have to put the disk in on or off by sending commands 6510 * Of course that will fail anyway so return back here. 6511 * 6512 * Power changes to a device that's OFFLINE or SUSPENDED 6513 * are not allowed. 6514 */ 6515 if ((un->un_state == SD_STATE_OFFLINE) || 6516 (un->un_state == SD_STATE_SUSPENDED)) { 6517 mutex_exit(SD_MUTEX(un)); 6518 6519 if (got_semaphore_here != 0) { 6520 sema_v(&un->un_semoclose); 6521 } 6522 SD_TRACE(SD_LOG_IO_PM, un, 6523 "sdpower: exit, device is off-line.\n"); 6524 6525 goto sdpower_failed; 6526 } 6527 6528 /* 6529 * Change the device's state to indicate it's power level 6530 * is being changed. Do this to prevent a power off in the 6531 * middle of commands, which is especially bad on devices 6532 * that are really powered off instead of just spun down. 6533 */ 6534 state_before_pm = un->un_state; 6535 un->un_state = SD_STATE_PM_CHANGING; 6536 6537 mutex_exit(SD_MUTEX(un)); 6538 6539 /* 6540 * If "pm-capable" property is set to TRUE by HBA drivers, 6541 * bypass the following checking, otherwise, check the log 6542 * sense information for this device 6543 */ 6544 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6545 /* 6546 * Get the log sense information to understand whether the 6547 * the powercycle counts have gone beyond the threshhold. 6548 */ 6549 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6550 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6551 6552 mutex_enter(SD_MUTEX(un)); 6553 log_sense_page = un->un_start_stop_cycle_page; 6554 mutex_exit(SD_MUTEX(un)); 6555 6556 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6557 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6558 6559 if (rval != 0) { 6560 if (rval == EIO) 6561 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6562 else 6563 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6564 } 6565 6566 #ifdef SDDEBUG 6567 if (sd_force_pm_supported) { 6568 /* Force a successful result */ 6569 rval = 0; 6570 } 6571 #endif 6572 if (rval != 0) { 6573 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6574 "Log Sense Failed\n"); 6575 6576 kmem_free(log_page_data, log_page_size); 6577 /* Cannot support power management on those drives */ 6578 6579 if (got_semaphore_here != 0) { 6580 sema_v(&un->un_semoclose); 6581 } 6582 /* 6583 * On exit put the state back to it's original value 6584 * and broadcast to anyone waiting for the power 6585 * change completion. 6586 */ 6587 mutex_enter(SD_MUTEX(un)); 6588 un->un_state = state_before_pm; 6589 cv_broadcast(&un->un_suspend_cv); 6590 mutex_exit(SD_MUTEX(un)); 6591 SD_TRACE(SD_LOG_IO_PM, un, 6592 "sdpower: exit, Log Sense Failed.\n"); 6593 6594 goto sdpower_failed; 6595 } 6596 6597 /* 6598 * From the page data - Convert the essential information to 6599 * pm_trans_data 6600 */ 6601 maxcycles = 6602 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6603 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6604 6605 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6606 6607 ncycles = 6608 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6609 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6610 6611 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6612 6613 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6614 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6615 log_page_data[8+i]; 6616 } 6617 6618 kmem_free(log_page_data, log_page_size); 6619 6620 /* 6621 * Call pm_trans_check routine to get the Ok from 6622 * the global policy 6623 */ 6624 6625 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6626 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6627 6628 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6629 #ifdef SDDEBUG 6630 if (sd_force_pm_supported) { 6631 /* Force a successful result */ 6632 rval = 1; 6633 } 6634 #endif 6635 switch (rval) { 6636 case 0: 6637 /* 6638 * Not Ok to Power cycle or error in parameters passed 6639 * Would have given the advised time to consider power 6640 * cycle. Based on the new intvlp parameter we are 6641 * supposed to pretend we are busy so that pm framework 6642 * will never call our power entry point. Because of 6643 * that install a timeout handler and wait for the 6644 * recommended time to elapse so that power management 6645 * can be effective again. 6646 * 6647 * To effect this behavior, call pm_busy_component to 6648 * indicate to the framework this device is busy. 6649 * By not adjusting un_pm_count the rest of PM in 6650 * the driver will function normally, and independent 6651 * of this but because the framework is told the device 6652 * is busy it won't attempt powering down until it gets 6653 * a matching idle. The timeout handler sends this. 6654 * Note: sd_pm_entry can't be called here to do this 6655 * because sdpower may have been called as a result 6656 * of a call to pm_raise_power from within sd_pm_entry. 6657 * 6658 * If a timeout handler is already active then 6659 * don't install another. 6660 */ 6661 mutex_enter(&un->un_pm_mutex); 6662 if (un->un_pm_timeid == NULL) { 6663 un->un_pm_timeid = 6664 timeout(sd_pm_timeout_handler, 6665 un, intvlp * drv_usectohz(1000000)); 6666 mutex_exit(&un->un_pm_mutex); 6667 (void) pm_busy_component(SD_DEVINFO(un), 0); 6668 } else { 6669 mutex_exit(&un->un_pm_mutex); 6670 } 6671 if (got_semaphore_here != 0) { 6672 sema_v(&un->un_semoclose); 6673 } 6674 /* 6675 * On exit put the state back to it's original value 6676 * and broadcast to anyone waiting for the power 6677 * change completion. 6678 */ 6679 mutex_enter(SD_MUTEX(un)); 6680 un->un_state = state_before_pm; 6681 cv_broadcast(&un->un_suspend_cv); 6682 mutex_exit(SD_MUTEX(un)); 6683 6684 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6685 "trans check Failed, not ok to power cycle.\n"); 6686 6687 goto sdpower_failed; 6688 case -1: 6689 if (got_semaphore_here != 0) { 6690 sema_v(&un->un_semoclose); 6691 } 6692 /* 6693 * On exit put the state back to it's original value 6694 * and broadcast to anyone waiting for the power 6695 * change completion. 6696 */ 6697 mutex_enter(SD_MUTEX(un)); 6698 un->un_state = state_before_pm; 6699 cv_broadcast(&un->un_suspend_cv); 6700 mutex_exit(SD_MUTEX(un)); 6701 SD_TRACE(SD_LOG_IO_PM, un, 6702 "sdpower: exit, trans check command Failed.\n"); 6703 6704 goto sdpower_failed; 6705 } 6706 } 6707 6708 if (level == SD_SPINDLE_OFF) { 6709 /* 6710 * Save the last state... if the STOP FAILS we need it 6711 * for restoring 6712 */ 6713 mutex_enter(SD_MUTEX(un)); 6714 save_state = un->un_last_state; 6715 /* 6716 * There must not be any cmds. getting processed 6717 * in the driver when we get here. Power to the 6718 * device is potentially going off. 6719 */ 6720 ASSERT(un->un_ncmds_in_driver == 0); 6721 mutex_exit(SD_MUTEX(un)); 6722 6723 /* 6724 * For now suspend the device completely before spindle is 6725 * turned off 6726 */ 6727 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6728 if (got_semaphore_here != 0) { 6729 sema_v(&un->un_semoclose); 6730 } 6731 /* 6732 * On exit put the state back to it's original value 6733 * and broadcast to anyone waiting for the power 6734 * change completion. 6735 */ 6736 mutex_enter(SD_MUTEX(un)); 6737 un->un_state = state_before_pm; 6738 cv_broadcast(&un->un_suspend_cv); 6739 mutex_exit(SD_MUTEX(un)); 6740 SD_TRACE(SD_LOG_IO_PM, un, 6741 "sdpower: exit, PM suspend Failed.\n"); 6742 6743 goto sdpower_failed; 6744 } 6745 } 6746 6747 /* 6748 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6749 * close, or strategy. Dump no long uses this routine, it uses it's 6750 * own code so it can be done in polled mode. 6751 */ 6752 6753 medium_present = TRUE; 6754 6755 /* 6756 * When powering up, issue a TUR in case the device is at unit 6757 * attention. Don't do retries. Bypass the PM layer, otherwise 6758 * a deadlock on un_pm_busy_cv will occur. 6759 */ 6760 if (level == SD_SPINDLE_ON) { 6761 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6762 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6763 if (sval != 0) 6764 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6765 } 6766 6767 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6768 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6769 6770 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6771 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6772 SD_PATH_DIRECT); 6773 if (sval != 0) { 6774 if (sval == EIO) 6775 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6776 else 6777 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6778 } 6779 6780 /* Command failed, check for media present. */ 6781 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6782 medium_present = FALSE; 6783 } 6784 6785 /* 6786 * The conditions of interest here are: 6787 * if a spindle off with media present fails, 6788 * then restore the state and return an error. 6789 * else if a spindle on fails, 6790 * then return an error (there's no state to restore). 6791 * In all other cases we setup for the new state 6792 * and return success. 6793 */ 6794 switch (level) { 6795 case SD_SPINDLE_OFF: 6796 if ((medium_present == TRUE) && (sval != 0)) { 6797 /* The stop command from above failed */ 6798 rval = DDI_FAILURE; 6799 /* 6800 * The stop command failed, and we have media 6801 * present. Put the level back by calling the 6802 * sd_pm_resume() and set the state back to 6803 * it's previous value. 6804 */ 6805 (void) sd_ddi_pm_resume(un); 6806 mutex_enter(SD_MUTEX(un)); 6807 un->un_last_state = save_state; 6808 mutex_exit(SD_MUTEX(un)); 6809 break; 6810 } 6811 /* 6812 * The stop command from above succeeded. 6813 */ 6814 if (un->un_f_monitor_media_state) { 6815 /* 6816 * Terminate watch thread in case of removable media 6817 * devices going into low power state. This is as per 6818 * the requirements of pm framework, otherwise commands 6819 * will be generated for the device (through watch 6820 * thread), even when the device is in low power state. 6821 */ 6822 mutex_enter(SD_MUTEX(un)); 6823 un->un_f_watcht_stopped = FALSE; 6824 if (un->un_swr_token != NULL) { 6825 opaque_t temp_token = un->un_swr_token; 6826 un->un_f_watcht_stopped = TRUE; 6827 un->un_swr_token = NULL; 6828 mutex_exit(SD_MUTEX(un)); 6829 (void) scsi_watch_request_terminate(temp_token, 6830 SCSI_WATCH_TERMINATE_ALL_WAIT); 6831 } else { 6832 mutex_exit(SD_MUTEX(un)); 6833 } 6834 } 6835 break; 6836 6837 default: /* The level requested is spindle on... */ 6838 /* 6839 * Legacy behavior: return success on a failed spinup 6840 * if there is no media in the drive. 6841 * Do this by looking at medium_present here. 6842 */ 6843 if ((sval != 0) && medium_present) { 6844 /* The start command from above failed */ 6845 rval = DDI_FAILURE; 6846 break; 6847 } 6848 /* 6849 * The start command from above succeeded 6850 * Resume the devices now that we have 6851 * started the disks 6852 */ 6853 (void) sd_ddi_pm_resume(un); 6854 6855 /* 6856 * Resume the watch thread since it was suspended 6857 * when the device went into low power mode. 6858 */ 6859 if (un->un_f_monitor_media_state) { 6860 mutex_enter(SD_MUTEX(un)); 6861 if (un->un_f_watcht_stopped == TRUE) { 6862 opaque_t temp_token; 6863 6864 un->un_f_watcht_stopped = FALSE; 6865 mutex_exit(SD_MUTEX(un)); 6866 temp_token = scsi_watch_request_submit( 6867 SD_SCSI_DEVP(un), 6868 sd_check_media_time, 6869 SENSE_LENGTH, sd_media_watch_cb, 6870 (caddr_t)dev); 6871 mutex_enter(SD_MUTEX(un)); 6872 un->un_swr_token = temp_token; 6873 } 6874 mutex_exit(SD_MUTEX(un)); 6875 } 6876 } 6877 if (got_semaphore_here != 0) { 6878 sema_v(&un->un_semoclose); 6879 } 6880 /* 6881 * On exit put the state back to it's original value 6882 * and broadcast to anyone waiting for the power 6883 * change completion. 6884 */ 6885 mutex_enter(SD_MUTEX(un)); 6886 un->un_state = state_before_pm; 6887 cv_broadcast(&un->un_suspend_cv); 6888 mutex_exit(SD_MUTEX(un)); 6889 6890 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6891 6892 sd_ssc_fini(ssc); 6893 return (rval); 6894 6895 sdpower_failed: 6896 6897 sd_ssc_fini(ssc); 6898 return (DDI_FAILURE); 6899 } 6900 6901 6902 6903 /* 6904 * Function: sdattach 6905 * 6906 * Description: Driver's attach(9e) entry point function. 6907 * 6908 * Arguments: devi - opaque device info handle 6909 * cmd - attach type 6910 * 6911 * Return Code: DDI_SUCCESS 6912 * DDI_FAILURE 6913 * 6914 * Context: Kernel thread context 6915 */ 6916 6917 static int 6918 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6919 { 6920 switch (cmd) { 6921 case DDI_ATTACH: 6922 return (sd_unit_attach(devi)); 6923 case DDI_RESUME: 6924 return (sd_ddi_resume(devi)); 6925 default: 6926 break; 6927 } 6928 return (DDI_FAILURE); 6929 } 6930 6931 6932 /* 6933 * Function: sddetach 6934 * 6935 * Description: Driver's detach(9E) entry point function. 6936 * 6937 * Arguments: devi - opaque device info handle 6938 * cmd - detach type 6939 * 6940 * Return Code: DDI_SUCCESS 6941 * DDI_FAILURE 6942 * 6943 * Context: Kernel thread context 6944 */ 6945 6946 static int 6947 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6948 { 6949 switch (cmd) { 6950 case DDI_DETACH: 6951 return (sd_unit_detach(devi)); 6952 case DDI_SUSPEND: 6953 return (sd_ddi_suspend(devi)); 6954 default: 6955 break; 6956 } 6957 return (DDI_FAILURE); 6958 } 6959 6960 6961 /* 6962 * Function: sd_sync_with_callback 6963 * 6964 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6965 * state while the callback routine is active. 6966 * 6967 * Arguments: un: softstate structure for the instance 6968 * 6969 * Context: Kernel thread context 6970 */ 6971 6972 static void 6973 sd_sync_with_callback(struct sd_lun *un) 6974 { 6975 ASSERT(un != NULL); 6976 6977 mutex_enter(SD_MUTEX(un)); 6978 6979 ASSERT(un->un_in_callback >= 0); 6980 6981 while (un->un_in_callback > 0) { 6982 mutex_exit(SD_MUTEX(un)); 6983 delay(2); 6984 mutex_enter(SD_MUTEX(un)); 6985 } 6986 6987 mutex_exit(SD_MUTEX(un)); 6988 } 6989 6990 /* 6991 * Function: sd_unit_attach 6992 * 6993 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6994 * the soft state structure for the device and performs 6995 * all necessary structure and device initializations. 6996 * 6997 * Arguments: devi: the system's dev_info_t for the device. 6998 * 6999 * Return Code: DDI_SUCCESS if attach is successful. 7000 * DDI_FAILURE if any part of the attach fails. 7001 * 7002 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7003 * Kernel thread context only. Can sleep. 7004 */ 7005 7006 static int 7007 sd_unit_attach(dev_info_t *devi) 7008 { 7009 struct scsi_device *devp; 7010 struct sd_lun *un; 7011 char *variantp; 7012 char name_str[48]; 7013 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7014 int instance; 7015 int rval; 7016 int wc_enabled; 7017 int tgt; 7018 uint64_t capacity; 7019 uint_t lbasize = 0; 7020 dev_info_t *pdip = ddi_get_parent(devi); 7021 int offbyone = 0; 7022 int geom_label_valid = 0; 7023 sd_ssc_t *ssc; 7024 int status; 7025 struct sd_fm_internal *sfip = NULL; 7026 int max_xfer_size; 7027 7028 /* 7029 * Retrieve the target driver's private data area. This was set 7030 * up by the HBA. 7031 */ 7032 devp = ddi_get_driver_private(devi); 7033 7034 /* 7035 * Retrieve the target ID of the device. 7036 */ 7037 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7038 SCSI_ADDR_PROP_TARGET, -1); 7039 7040 /* 7041 * Since we have no idea what state things were left in by the last 7042 * user of the device, set up some 'default' settings, ie. turn 'em 7043 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7044 * Do this before the scsi_probe, which sends an inquiry. 7045 * This is a fix for bug (4430280). 7046 * Of special importance is wide-xfer. The drive could have been left 7047 * in wide transfer mode by the last driver to communicate with it, 7048 * this includes us. If that's the case, and if the following is not 7049 * setup properly or we don't re-negotiate with the drive prior to 7050 * transferring data to/from the drive, it causes bus parity errors, 7051 * data overruns, and unexpected interrupts. This first occurred when 7052 * the fix for bug (4378686) was made. 7053 */ 7054 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7055 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7056 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7057 7058 /* 7059 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7060 * on a target. Setting it per lun instance actually sets the 7061 * capability of this target, which affects those luns already 7062 * attached on the same target. So during attach, we can only disable 7063 * this capability only when no other lun has been attached on this 7064 * target. By doing this, we assume a target has the same tagged-qing 7065 * capability for every lun. The condition can be removed when HBA 7066 * is changed to support per lun based tagged-qing capability. 7067 */ 7068 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7069 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7070 } 7071 7072 /* 7073 * Use scsi_probe() to issue an INQUIRY command to the device. 7074 * This call will allocate and fill in the scsi_inquiry structure 7075 * and point the sd_inq member of the scsi_device structure to it. 7076 * If the attach succeeds, then this memory will not be de-allocated 7077 * (via scsi_unprobe()) until the instance is detached. 7078 */ 7079 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7080 goto probe_failed; 7081 } 7082 7083 /* 7084 * Check the device type as specified in the inquiry data and 7085 * claim it if it is of a type that we support. 7086 */ 7087 switch (devp->sd_inq->inq_dtype) { 7088 case DTYPE_DIRECT: 7089 break; 7090 case DTYPE_RODIRECT: 7091 break; 7092 case DTYPE_OPTICAL: 7093 break; 7094 case DTYPE_NOTPRESENT: 7095 default: 7096 /* Unsupported device type; fail the attach. */ 7097 goto probe_failed; 7098 } 7099 7100 /* 7101 * Allocate the soft state structure for this unit. 7102 * 7103 * We rely upon this memory being set to all zeroes by 7104 * ddi_soft_state_zalloc(). We assume that any member of the 7105 * soft state structure that is not explicitly initialized by 7106 * this routine will have a value of zero. 7107 */ 7108 instance = ddi_get_instance(devp->sd_dev); 7109 #ifndef XPV_HVM_DRIVER 7110 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7111 goto probe_failed; 7112 } 7113 #endif /* !XPV_HVM_DRIVER */ 7114 7115 /* 7116 * Retrieve a pointer to the newly-allocated soft state. 7117 * 7118 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7119 * was successful, unless something has gone horribly wrong and the 7120 * ddi's soft state internals are corrupt (in which case it is 7121 * probably better to halt here than just fail the attach....) 7122 */ 7123 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7124 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7125 instance); 7126 /*NOTREACHED*/ 7127 } 7128 7129 /* 7130 * Link the back ptr of the driver soft state to the scsi_device 7131 * struct for this lun. 7132 * Save a pointer to the softstate in the driver-private area of 7133 * the scsi_device struct. 7134 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7135 * we first set un->un_sd below. 7136 */ 7137 un->un_sd = devp; 7138 devp->sd_private = (opaque_t)un; 7139 7140 /* 7141 * The following must be after devp is stored in the soft state struct. 7142 */ 7143 #ifdef SDDEBUG 7144 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7145 "%s_unit_attach: un:0x%p instance:%d\n", 7146 ddi_driver_name(devi), un, instance); 7147 #endif 7148 7149 /* 7150 * Set up the device type and node type (for the minor nodes). 7151 * By default we assume that the device can at least support the 7152 * Common Command Set. Call it a CD-ROM if it reports itself 7153 * as a RODIRECT device. 7154 */ 7155 switch (devp->sd_inq->inq_dtype) { 7156 case DTYPE_RODIRECT: 7157 un->un_node_type = DDI_NT_CD_CHAN; 7158 un->un_ctype = CTYPE_CDROM; 7159 break; 7160 case DTYPE_OPTICAL: 7161 un->un_node_type = DDI_NT_BLOCK_CHAN; 7162 un->un_ctype = CTYPE_ROD; 7163 break; 7164 default: 7165 un->un_node_type = DDI_NT_BLOCK_CHAN; 7166 un->un_ctype = CTYPE_CCS; 7167 break; 7168 } 7169 7170 /* 7171 * Try to read the interconnect type from the HBA. 7172 * 7173 * Note: This driver is currently compiled as two binaries, a parallel 7174 * scsi version (sd) and a fibre channel version (ssd). All functional 7175 * differences are determined at compile time. In the future a single 7176 * binary will be provided and the interconnect type will be used to 7177 * differentiate between fibre and parallel scsi behaviors. At that time 7178 * it will be necessary for all fibre channel HBAs to support this 7179 * property. 7180 * 7181 * set un_f_is_fiber to TRUE ( default fiber ) 7182 */ 7183 un->un_f_is_fibre = TRUE; 7184 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7185 case INTERCONNECT_SSA: 7186 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7187 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7188 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7189 break; 7190 case INTERCONNECT_PARALLEL: 7191 un->un_f_is_fibre = FALSE; 7192 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7193 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7194 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7195 break; 7196 case INTERCONNECT_SAS: 7197 un->un_f_is_fibre = FALSE; 7198 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7199 un->un_node_type = DDI_NT_BLOCK_SAS; 7200 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7201 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7202 break; 7203 case INTERCONNECT_SATA: 7204 un->un_f_is_fibre = FALSE; 7205 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7206 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7207 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7208 break; 7209 case INTERCONNECT_FIBRE: 7210 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7211 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7212 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7213 break; 7214 case INTERCONNECT_FABRIC: 7215 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7216 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7217 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7218 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7219 break; 7220 default: 7221 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7222 /* 7223 * The HBA does not support the "interconnect-type" property 7224 * (or did not provide a recognized type). 7225 * 7226 * Note: This will be obsoleted when a single fibre channel 7227 * and parallel scsi driver is delivered. In the meantime the 7228 * interconnect type will be set to the platform default.If that 7229 * type is not parallel SCSI, it means that we should be 7230 * assuming "ssd" semantics. However, here this also means that 7231 * the FC HBA is not supporting the "interconnect-type" property 7232 * like we expect it to, so log this occurrence. 7233 */ 7234 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7235 if (!SD_IS_PARALLEL_SCSI(un)) { 7236 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7237 "sd_unit_attach: un:0x%p Assuming " 7238 "INTERCONNECT_FIBRE\n", un); 7239 } else { 7240 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7241 "sd_unit_attach: un:0x%p Assuming " 7242 "INTERCONNECT_PARALLEL\n", un); 7243 un->un_f_is_fibre = FALSE; 7244 } 7245 #else 7246 /* 7247 * Note: This source will be implemented when a single fibre 7248 * channel and parallel scsi driver is delivered. The default 7249 * will be to assume that if a device does not support the 7250 * "interconnect-type" property it is a parallel SCSI HBA and 7251 * we will set the interconnect type for parallel scsi. 7252 */ 7253 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7254 un->un_f_is_fibre = FALSE; 7255 #endif 7256 break; 7257 } 7258 7259 if (un->un_f_is_fibre == TRUE) { 7260 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7261 SCSI_VERSION_3) { 7262 switch (un->un_interconnect_type) { 7263 case SD_INTERCONNECT_FIBRE: 7264 case SD_INTERCONNECT_SSA: 7265 un->un_node_type = DDI_NT_BLOCK_WWN; 7266 break; 7267 default: 7268 break; 7269 } 7270 } 7271 } 7272 7273 /* 7274 * Initialize the Request Sense command for the target 7275 */ 7276 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7277 goto alloc_rqs_failed; 7278 } 7279 7280 /* 7281 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7282 * with separate binary for sd and ssd. 7283 * 7284 * x86 has 1 binary, un_retry_count is set base on connection type. 7285 * The hardcoded values will go away when Sparc uses 1 binary 7286 * for sd and ssd. This hardcoded values need to match 7287 * SD_RETRY_COUNT in sddef.h 7288 * The value used is base on interconnect type. 7289 * fibre = 3, parallel = 5 7290 */ 7291 #if defined(__i386) || defined(__amd64) 7292 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7293 #else 7294 un->un_retry_count = SD_RETRY_COUNT; 7295 #endif 7296 7297 /* 7298 * Set the per disk retry count to the default number of retries 7299 * for disks and CDROMs. This value can be overridden by the 7300 * disk property list or an entry in sd.conf. 7301 */ 7302 un->un_notready_retry_count = 7303 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7304 : DISK_NOT_READY_RETRY_COUNT(un); 7305 7306 /* 7307 * Set the busy retry count to the default value of un_retry_count. 7308 * This can be overridden by entries in sd.conf or the device 7309 * config table. 7310 */ 7311 un->un_busy_retry_count = un->un_retry_count; 7312 7313 /* 7314 * Init the reset threshold for retries. This number determines 7315 * how many retries must be performed before a reset can be issued 7316 * (for certain error conditions). This can be overridden by entries 7317 * in sd.conf or the device config table. 7318 */ 7319 un->un_reset_retry_count = (un->un_retry_count / 2); 7320 7321 /* 7322 * Set the victim_retry_count to the default un_retry_count 7323 */ 7324 un->un_victim_retry_count = (2 * un->un_retry_count); 7325 7326 /* 7327 * Set the reservation release timeout to the default value of 7328 * 5 seconds. This can be overridden by entries in ssd.conf or the 7329 * device config table. 7330 */ 7331 un->un_reserve_release_time = 5; 7332 7333 /* 7334 * Set up the default maximum transfer size. Note that this may 7335 * get updated later in the attach, when setting up default wide 7336 * operations for disks. 7337 */ 7338 #if defined(__i386) || defined(__amd64) 7339 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7340 un->un_partial_dma_supported = 1; 7341 #else 7342 un->un_max_xfer_size = (uint_t)maxphys; 7343 #endif 7344 7345 /* 7346 * Get "allow bus device reset" property (defaults to "enabled" if 7347 * the property was not defined). This is to disable bus resets for 7348 * certain kinds of error recovery. Note: In the future when a run-time 7349 * fibre check is available the soft state flag should default to 7350 * enabled. 7351 */ 7352 if (un->un_f_is_fibre == TRUE) { 7353 un->un_f_allow_bus_device_reset = TRUE; 7354 } else { 7355 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7356 "allow-bus-device-reset", 1) != 0) { 7357 un->un_f_allow_bus_device_reset = TRUE; 7358 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7359 "sd_unit_attach: un:0x%p Bus device reset " 7360 "enabled\n", un); 7361 } else { 7362 un->un_f_allow_bus_device_reset = FALSE; 7363 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7364 "sd_unit_attach: un:0x%p Bus device reset " 7365 "disabled\n", un); 7366 } 7367 } 7368 7369 /* 7370 * Check if this is an ATAPI device. ATAPI devices use Group 1 7371 * Read/Write commands and Group 2 Mode Sense/Select commands. 7372 * 7373 * Note: The "obsolete" way of doing this is to check for the "atapi" 7374 * property. The new "variant" property with a value of "atapi" has been 7375 * introduced so that future 'variants' of standard SCSI behavior (like 7376 * atapi) could be specified by the underlying HBA drivers by supplying 7377 * a new value for the "variant" property, instead of having to define a 7378 * new property. 7379 */ 7380 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7381 un->un_f_cfg_is_atapi = TRUE; 7382 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7383 "sd_unit_attach: un:0x%p Atapi device\n", un); 7384 } 7385 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7386 &variantp) == DDI_PROP_SUCCESS) { 7387 if (strcmp(variantp, "atapi") == 0) { 7388 un->un_f_cfg_is_atapi = TRUE; 7389 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7390 "sd_unit_attach: un:0x%p Atapi device\n", un); 7391 } 7392 ddi_prop_free(variantp); 7393 } 7394 7395 un->un_cmd_timeout = SD_IO_TIME; 7396 7397 un->un_busy_timeout = SD_BSY_TIMEOUT; 7398 7399 /* Info on current states, statuses, etc. (Updated frequently) */ 7400 un->un_state = SD_STATE_NORMAL; 7401 un->un_last_state = SD_STATE_NORMAL; 7402 7403 /* Control & status info for command throttling */ 7404 un->un_throttle = sd_max_throttle; 7405 un->un_saved_throttle = sd_max_throttle; 7406 un->un_min_throttle = sd_min_throttle; 7407 7408 if (un->un_f_is_fibre == TRUE) { 7409 un->un_f_use_adaptive_throttle = TRUE; 7410 } else { 7411 un->un_f_use_adaptive_throttle = FALSE; 7412 } 7413 7414 /* Removable media support. */ 7415 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7416 un->un_mediastate = DKIO_NONE; 7417 un->un_specified_mediastate = DKIO_NONE; 7418 7419 /* CVs for suspend/resume (PM or DR) */ 7420 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7421 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7422 7423 /* Power management support. */ 7424 un->un_power_level = SD_SPINDLE_UNINIT; 7425 7426 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7427 un->un_f_wcc_inprog = 0; 7428 7429 /* 7430 * The open/close semaphore is used to serialize threads executing 7431 * in the driver's open & close entry point routines for a given 7432 * instance. 7433 */ 7434 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7435 7436 /* 7437 * The conf file entry and softstate variable is a forceful override, 7438 * meaning a non-zero value must be entered to change the default. 7439 */ 7440 un->un_f_disksort_disabled = FALSE; 7441 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7442 7443 /* 7444 * Retrieve the properties from the static driver table or the driver 7445 * configuration file (.conf) for this unit and update the soft state 7446 * for the device as needed for the indicated properties. 7447 * Note: the property configuration needs to occur here as some of the 7448 * following routines may have dependencies on soft state flags set 7449 * as part of the driver property configuration. 7450 */ 7451 sd_read_unit_properties(un); 7452 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7453 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7454 7455 /* 7456 * Only if a device has "hotpluggable" property, it is 7457 * treated as hotpluggable device. Otherwise, it is 7458 * regarded as non-hotpluggable one. 7459 */ 7460 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7461 -1) != -1) { 7462 un->un_f_is_hotpluggable = TRUE; 7463 } 7464 7465 /* 7466 * set unit's attributes(flags) according to "hotpluggable" and 7467 * RMB bit in INQUIRY data. 7468 */ 7469 sd_set_unit_attributes(un, devi); 7470 7471 /* 7472 * By default, we mark the capacity, lbasize, and geometry 7473 * as invalid. Only if we successfully read a valid capacity 7474 * will we update the un_blockcount and un_tgt_blocksize with the 7475 * valid values (the geometry will be validated later). 7476 */ 7477 un->un_f_blockcount_is_valid = FALSE; 7478 un->un_f_tgt_blocksize_is_valid = FALSE; 7479 7480 /* 7481 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7482 * otherwise. 7483 */ 7484 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7485 un->un_blockcount = 0; 7486 7487 /* 7488 * Set up the per-instance info needed to determine the correct 7489 * CDBs and other info for issuing commands to the target. 7490 */ 7491 sd_init_cdb_limits(un); 7492 7493 /* 7494 * Set up the IO chains to use, based upon the target type. 7495 */ 7496 if (un->un_f_non_devbsize_supported) { 7497 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7498 } else { 7499 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7500 } 7501 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7502 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7503 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7504 7505 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7506 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7507 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7508 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7509 7510 7511 if (ISCD(un)) { 7512 un->un_additional_codes = sd_additional_codes; 7513 } else { 7514 un->un_additional_codes = NULL; 7515 } 7516 7517 /* 7518 * Create the kstats here so they can be available for attach-time 7519 * routines that send commands to the unit (either polled or via 7520 * sd_send_scsi_cmd). 7521 * 7522 * Note: This is a critical sequence that needs to be maintained: 7523 * 1) Instantiate the kstats here, before any routines using the 7524 * iopath (i.e. sd_send_scsi_cmd). 7525 * 2) Instantiate and initialize the partition stats 7526 * (sd_set_pstats). 7527 * 3) Initialize the error stats (sd_set_errstats), following 7528 * sd_validate_geometry(),sd_register_devid(), 7529 * and sd_cache_control(). 7530 */ 7531 7532 un->un_stats = kstat_create(sd_label, instance, 7533 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7534 if (un->un_stats != NULL) { 7535 un->un_stats->ks_lock = SD_MUTEX(un); 7536 kstat_install(un->un_stats); 7537 } 7538 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7539 "sd_unit_attach: un:0x%p un_stats created\n", un); 7540 7541 sd_create_errstats(un, instance); 7542 if (un->un_errstats == NULL) { 7543 goto create_errstats_failed; 7544 } 7545 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7546 "sd_unit_attach: un:0x%p errstats created\n", un); 7547 7548 /* 7549 * The following if/else code was relocated here from below as part 7550 * of the fix for bug (4430280). However with the default setup added 7551 * on entry to this routine, it's no longer absolutely necessary for 7552 * this to be before the call to sd_spin_up_unit. 7553 */ 7554 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7555 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7556 (devp->sd_inq->inq_ansi == 5)) && 7557 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7558 7559 /* 7560 * If tagged queueing is supported by the target 7561 * and by the host adapter then we will enable it 7562 */ 7563 un->un_tagflags = 0; 7564 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7565 (un->un_f_arq_enabled == TRUE)) { 7566 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7567 1, 1) == 1) { 7568 un->un_tagflags = FLAG_STAG; 7569 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7570 "sd_unit_attach: un:0x%p tag queueing " 7571 "enabled\n", un); 7572 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7573 "untagged-qing", 0) == 1) { 7574 un->un_f_opt_queueing = TRUE; 7575 un->un_saved_throttle = un->un_throttle = 7576 min(un->un_throttle, 3); 7577 } else { 7578 un->un_f_opt_queueing = FALSE; 7579 un->un_saved_throttle = un->un_throttle = 1; 7580 } 7581 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7582 == 1) && (un->un_f_arq_enabled == TRUE)) { 7583 /* The Host Adapter supports internal queueing. */ 7584 un->un_f_opt_queueing = TRUE; 7585 un->un_saved_throttle = un->un_throttle = 7586 min(un->un_throttle, 3); 7587 } else { 7588 un->un_f_opt_queueing = FALSE; 7589 un->un_saved_throttle = un->un_throttle = 1; 7590 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7591 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7592 } 7593 7594 /* 7595 * Enable large transfers for SATA/SAS drives 7596 */ 7597 if (SD_IS_SERIAL(un)) { 7598 un->un_max_xfer_size = 7599 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7600 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7601 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7602 "sd_unit_attach: un:0x%p max transfer " 7603 "size=0x%x\n", un, un->un_max_xfer_size); 7604 7605 } 7606 7607 /* Setup or tear down default wide operations for disks */ 7608 7609 /* 7610 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7611 * and "ssd_max_xfer_size" to exist simultaneously on the same 7612 * system and be set to different values. In the future this 7613 * code may need to be updated when the ssd module is 7614 * obsoleted and removed from the system. (4299588) 7615 */ 7616 if (SD_IS_PARALLEL_SCSI(un) && 7617 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7618 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7619 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7620 1, 1) == 1) { 7621 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7622 "sd_unit_attach: un:0x%p Wide Transfer " 7623 "enabled\n", un); 7624 } 7625 7626 /* 7627 * If tagged queuing has also been enabled, then 7628 * enable large xfers 7629 */ 7630 if (un->un_saved_throttle == sd_max_throttle) { 7631 un->un_max_xfer_size = 7632 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7633 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7634 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7635 "sd_unit_attach: un:0x%p max transfer " 7636 "size=0x%x\n", un, un->un_max_xfer_size); 7637 } 7638 } else { 7639 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7640 0, 1) == 1) { 7641 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7642 "sd_unit_attach: un:0x%p " 7643 "Wide Transfer disabled\n", un); 7644 } 7645 } 7646 } else { 7647 un->un_tagflags = FLAG_STAG; 7648 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7649 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7650 } 7651 7652 /* 7653 * If this target supports LUN reset, try to enable it. 7654 */ 7655 if (un->un_f_lun_reset_enabled) { 7656 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7657 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7658 "un:0x%p lun_reset capability set\n", un); 7659 } else { 7660 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7661 "un:0x%p lun-reset capability not set\n", un); 7662 } 7663 } 7664 7665 /* 7666 * Adjust the maximum transfer size. This is to fix 7667 * the problem of partial DMA support on SPARC. Some 7668 * HBA driver, like aac, has very small dma_attr_maxxfer 7669 * size, which requires partial DMA support on SPARC. 7670 * In the future the SPARC pci nexus driver may solve 7671 * the problem instead of this fix. 7672 */ 7673 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7674 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7675 /* We need DMA partial even on sparc to ensure sddump() works */ 7676 un->un_max_xfer_size = max_xfer_size; 7677 if (un->un_partial_dma_supported == 0) 7678 un->un_partial_dma_supported = 1; 7679 } 7680 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7681 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7682 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7683 un->un_max_xfer_size) == 1) { 7684 un->un_buf_breakup_supported = 1; 7685 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7686 "un:0x%p Buf breakup enabled\n", un); 7687 } 7688 } 7689 7690 /* 7691 * Set PKT_DMA_PARTIAL flag. 7692 */ 7693 if (un->un_partial_dma_supported == 1) { 7694 un->un_pkt_flags = PKT_DMA_PARTIAL; 7695 } else { 7696 un->un_pkt_flags = 0; 7697 } 7698 7699 /* Initialize sd_ssc_t for internal uscsi commands */ 7700 ssc = sd_ssc_init(un); 7701 scsi_fm_init(devp); 7702 7703 /* 7704 * Allocate memory for SCSI FMA stuffs. 7705 */ 7706 un->un_fm_private = 7707 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7708 sfip = (struct sd_fm_internal *)un->un_fm_private; 7709 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7710 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7711 sfip->fm_ssc.ssc_un = un; 7712 7713 if (ISCD(un) || 7714 un->un_f_has_removable_media || 7715 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7716 /* 7717 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7718 * Their log are unchanged. 7719 */ 7720 sfip->fm_log_level = SD_FM_LOG_NSUP; 7721 } else { 7722 /* 7723 * If enter here, it should be non-CDROM and FM-capable 7724 * device, and it will not keep the old scsi_log as before 7725 * in /var/adm/messages. However, the property 7726 * "fm-scsi-log" will control whether the FM telemetry will 7727 * be logged in /var/adm/messages. 7728 */ 7729 int fm_scsi_log; 7730 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7731 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7732 7733 if (fm_scsi_log) 7734 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7735 else 7736 sfip->fm_log_level = SD_FM_LOG_SILENT; 7737 } 7738 7739 /* 7740 * At this point in the attach, we have enough info in the 7741 * soft state to be able to issue commands to the target. 7742 * 7743 * All command paths used below MUST issue their commands as 7744 * SD_PATH_DIRECT. This is important as intermediate layers 7745 * are not all initialized yet (such as PM). 7746 */ 7747 7748 /* 7749 * Send a TEST UNIT READY command to the device. This should clear 7750 * any outstanding UNIT ATTENTION that may be present. 7751 * 7752 * Note: Don't check for success, just track if there is a reservation, 7753 * this is a throw away command to clear any unit attentions. 7754 * 7755 * Note: This MUST be the first command issued to the target during 7756 * attach to ensure power on UNIT ATTENTIONS are cleared. 7757 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7758 * with attempts at spinning up a device with no media. 7759 */ 7760 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7761 if (status != 0) { 7762 if (status == EACCES) 7763 reservation_flag = SD_TARGET_IS_RESERVED; 7764 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7765 } 7766 7767 /* 7768 * If the device is NOT a removable media device, attempt to spin 7769 * it up (using the START_STOP_UNIT command) and read its capacity 7770 * (using the READ CAPACITY command). Note, however, that either 7771 * of these could fail and in some cases we would continue with 7772 * the attach despite the failure (see below). 7773 */ 7774 if (un->un_f_descr_format_supported) { 7775 7776 switch (sd_spin_up_unit(ssc)) { 7777 case 0: 7778 /* 7779 * Spin-up was successful; now try to read the 7780 * capacity. If successful then save the results 7781 * and mark the capacity & lbasize as valid. 7782 */ 7783 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7784 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7785 7786 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7787 &lbasize, SD_PATH_DIRECT); 7788 7789 switch (status) { 7790 case 0: { 7791 if (capacity > DK_MAX_BLOCKS) { 7792 #ifdef _LP64 7793 if ((capacity + 1) > 7794 SD_GROUP1_MAX_ADDRESS) { 7795 /* 7796 * Enable descriptor format 7797 * sense data so that we can 7798 * get 64 bit sense data 7799 * fields. 7800 */ 7801 sd_enable_descr_sense(ssc); 7802 } 7803 #else 7804 /* 32-bit kernels can't handle this */ 7805 scsi_log(SD_DEVINFO(un), 7806 sd_label, CE_WARN, 7807 "disk has %llu blocks, which " 7808 "is too large for a 32-bit " 7809 "kernel", capacity); 7810 7811 #if defined(__i386) || defined(__amd64) 7812 /* 7813 * 1TB disk was treated as (1T - 512)B 7814 * in the past, so that it might have 7815 * valid VTOC and solaris partitions, 7816 * we have to allow it to continue to 7817 * work. 7818 */ 7819 if (capacity -1 > DK_MAX_BLOCKS) 7820 #endif 7821 goto spinup_failed; 7822 #endif 7823 } 7824 7825 /* 7826 * Here it's not necessary to check the case: 7827 * the capacity of the device is bigger than 7828 * what the max hba cdb can support. Because 7829 * sd_send_scsi_READ_CAPACITY will retrieve 7830 * the capacity by sending USCSI command, which 7831 * is constrained by the max hba cdb. Actually, 7832 * sd_send_scsi_READ_CAPACITY will return 7833 * EINVAL when using bigger cdb than required 7834 * cdb length. Will handle this case in 7835 * "case EINVAL". 7836 */ 7837 7838 /* 7839 * The following relies on 7840 * sd_send_scsi_READ_CAPACITY never 7841 * returning 0 for capacity and/or lbasize. 7842 */ 7843 sd_update_block_info(un, lbasize, capacity); 7844 7845 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7846 "sd_unit_attach: un:0x%p capacity = %ld " 7847 "blocks; lbasize= %ld.\n", un, 7848 un->un_blockcount, un->un_tgt_blocksize); 7849 7850 break; 7851 } 7852 case EINVAL: 7853 /* 7854 * In the case where the max-cdb-length property 7855 * is smaller than the required CDB length for 7856 * a SCSI device, a target driver can fail to 7857 * attach to that device. 7858 */ 7859 scsi_log(SD_DEVINFO(un), 7860 sd_label, CE_WARN, 7861 "disk capacity is too large " 7862 "for current cdb length"); 7863 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7864 7865 goto spinup_failed; 7866 case EACCES: 7867 /* 7868 * Should never get here if the spin-up 7869 * succeeded, but code it in anyway. 7870 * From here, just continue with the attach... 7871 */ 7872 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7873 "sd_unit_attach: un:0x%p " 7874 "sd_send_scsi_READ_CAPACITY " 7875 "returned reservation conflict\n", un); 7876 reservation_flag = SD_TARGET_IS_RESERVED; 7877 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7878 break; 7879 default: 7880 /* 7881 * Likewise, should never get here if the 7882 * spin-up succeeded. Just continue with 7883 * the attach... 7884 */ 7885 if (status == EIO) 7886 sd_ssc_assessment(ssc, 7887 SD_FMT_STATUS_CHECK); 7888 else 7889 sd_ssc_assessment(ssc, 7890 SD_FMT_IGNORE); 7891 break; 7892 } 7893 break; 7894 case EACCES: 7895 /* 7896 * Device is reserved by another host. In this case 7897 * we could not spin it up or read the capacity, but 7898 * we continue with the attach anyway. 7899 */ 7900 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7901 "sd_unit_attach: un:0x%p spin-up reservation " 7902 "conflict.\n", un); 7903 reservation_flag = SD_TARGET_IS_RESERVED; 7904 break; 7905 default: 7906 /* Fail the attach if the spin-up failed. */ 7907 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7908 "sd_unit_attach: un:0x%p spin-up failed.", un); 7909 goto spinup_failed; 7910 } 7911 7912 } 7913 7914 /* 7915 * Check to see if this is a MMC drive 7916 */ 7917 if (ISCD(un)) { 7918 sd_set_mmc_caps(ssc); 7919 } 7920 7921 7922 /* 7923 * Add a zero-length attribute to tell the world we support 7924 * kernel ioctls (for layered drivers) 7925 */ 7926 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7927 DDI_KERNEL_IOCTL, NULL, 0); 7928 7929 /* 7930 * Add a boolean property to tell the world we support 7931 * the B_FAILFAST flag (for layered drivers) 7932 */ 7933 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7934 "ddi-failfast-supported", NULL, 0); 7935 7936 /* 7937 * Initialize power management 7938 */ 7939 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7940 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7941 sd_setup_pm(ssc, devi); 7942 if (un->un_f_pm_is_enabled == FALSE) { 7943 /* 7944 * For performance, point to a jump table that does 7945 * not include pm. 7946 * The direct and priority chains don't change with PM. 7947 * 7948 * Note: this is currently done based on individual device 7949 * capabilities. When an interface for determining system 7950 * power enabled state becomes available, or when additional 7951 * layers are added to the command chain, these values will 7952 * have to be re-evaluated for correctness. 7953 */ 7954 if (un->un_f_non_devbsize_supported) { 7955 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7956 } else { 7957 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7958 } 7959 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7960 } 7961 7962 /* 7963 * This property is set to 0 by HA software to avoid retries 7964 * on a reserved disk. (The preferred property name is 7965 * "retry-on-reservation-conflict") (1189689) 7966 * 7967 * Note: The use of a global here can have unintended consequences. A 7968 * per instance variable is preferable to match the capabilities of 7969 * different underlying hba's (4402600) 7970 */ 7971 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7972 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7973 sd_retry_on_reservation_conflict); 7974 if (sd_retry_on_reservation_conflict != 0) { 7975 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7976 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7977 sd_retry_on_reservation_conflict); 7978 } 7979 7980 /* Set up options for QFULL handling. */ 7981 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7982 "qfull-retries", -1)) != -1) { 7983 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7984 rval, 1); 7985 } 7986 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7987 "qfull-retry-interval", -1)) != -1) { 7988 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7989 rval, 1); 7990 } 7991 7992 /* 7993 * This just prints a message that announces the existence of the 7994 * device. The message is always printed in the system logfile, but 7995 * only appears on the console if the system is booted with the 7996 * -v (verbose) argument. 7997 */ 7998 ddi_report_dev(devi); 7999 8000 un->un_mediastate = DKIO_NONE; 8001 8002 cmlb_alloc_handle(&un->un_cmlbhandle); 8003 8004 #if defined(__i386) || defined(__amd64) 8005 /* 8006 * On x86, compensate for off-by-1 legacy error 8007 */ 8008 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8009 (lbasize == un->un_sys_blocksize)) 8010 offbyone = CMLB_OFF_BY_ONE; 8011 #endif 8012 8013 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8014 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8015 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8016 un->un_node_type, offbyone, un->un_cmlbhandle, 8017 (void *)SD_PATH_DIRECT) != 0) { 8018 goto cmlb_attach_failed; 8019 } 8020 8021 8022 /* 8023 * Read and validate the device's geometry (ie, disk label) 8024 * A new unformatted drive will not have a valid geometry, but 8025 * the driver needs to successfully attach to this device so 8026 * the drive can be formatted via ioctls. 8027 */ 8028 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8029 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8030 8031 mutex_enter(SD_MUTEX(un)); 8032 8033 /* 8034 * Read and initialize the devid for the unit. 8035 */ 8036 if (un->un_f_devid_supported) { 8037 sd_register_devid(ssc, devi, reservation_flag); 8038 } 8039 mutex_exit(SD_MUTEX(un)); 8040 8041 #if (defined(__fibre)) 8042 /* 8043 * Register callbacks for fibre only. You can't do this solely 8044 * on the basis of the devid_type because this is hba specific. 8045 * We need to query our hba capabilities to find out whether to 8046 * register or not. 8047 */ 8048 if (un->un_f_is_fibre) { 8049 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8050 sd_init_event_callbacks(un); 8051 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8052 "sd_unit_attach: un:0x%p event callbacks inserted", 8053 un); 8054 } 8055 } 8056 #endif 8057 8058 if (un->un_f_opt_disable_cache == TRUE) { 8059 /* 8060 * Disable both read cache and write cache. This is 8061 * the historic behavior of the keywords in the config file. 8062 */ 8063 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8064 0) { 8065 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8066 "sd_unit_attach: un:0x%p Could not disable " 8067 "caching", un); 8068 goto devid_failed; 8069 } 8070 } 8071 8072 /* 8073 * Check the value of the WCE bit now and 8074 * set un_f_write_cache_enabled accordingly. 8075 */ 8076 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8077 mutex_enter(SD_MUTEX(un)); 8078 un->un_f_write_cache_enabled = (wc_enabled != 0); 8079 mutex_exit(SD_MUTEX(un)); 8080 8081 if (un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8082 un->un_tgt_blocksize != DEV_BSIZE) { 8083 if (!(un->un_wm_cache)) { 8084 (void) snprintf(name_str, sizeof (name_str), 8085 "%s%d_cache", 8086 ddi_driver_name(SD_DEVINFO(un)), 8087 ddi_get_instance(SD_DEVINFO(un))); 8088 un->un_wm_cache = kmem_cache_create( 8089 name_str, sizeof (struct sd_w_map), 8090 8, sd_wm_cache_constructor, 8091 sd_wm_cache_destructor, NULL, 8092 (void *)un, NULL, 0); 8093 if (!(un->un_wm_cache)) { 8094 goto wm_cache_failed; 8095 } 8096 } 8097 } 8098 8099 /* 8100 * Check the value of the NV_SUP bit and set 8101 * un_f_suppress_cache_flush accordingly. 8102 */ 8103 sd_get_nv_sup(ssc); 8104 8105 /* 8106 * Find out what type of reservation this disk supports. 8107 */ 8108 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8109 8110 switch (status) { 8111 case 0: 8112 /* 8113 * SCSI-3 reservations are supported. 8114 */ 8115 un->un_reservation_type = SD_SCSI3_RESERVATION; 8116 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8117 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8118 break; 8119 case ENOTSUP: 8120 /* 8121 * The PERSISTENT RESERVE IN command would not be recognized by 8122 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8123 */ 8124 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8125 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8126 un->un_reservation_type = SD_SCSI2_RESERVATION; 8127 8128 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8129 break; 8130 default: 8131 /* 8132 * default to SCSI-3 reservations 8133 */ 8134 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8135 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8136 un->un_reservation_type = SD_SCSI3_RESERVATION; 8137 8138 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8139 break; 8140 } 8141 8142 /* 8143 * Set the pstat and error stat values here, so data obtained during the 8144 * previous attach-time routines is available. 8145 * 8146 * Note: This is a critical sequence that needs to be maintained: 8147 * 1) Instantiate the kstats before any routines using the iopath 8148 * (i.e. sd_send_scsi_cmd). 8149 * 2) Initialize the error stats (sd_set_errstats) and partition 8150 * stats (sd_set_pstats)here, following 8151 * cmlb_validate_geometry(), sd_register_devid(), and 8152 * sd_cache_control(). 8153 */ 8154 8155 if (un->un_f_pkstats_enabled && geom_label_valid) { 8156 sd_set_pstats(un); 8157 SD_TRACE(SD_LOG_IO_PARTITION, un, 8158 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8159 } 8160 8161 sd_set_errstats(un); 8162 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8163 "sd_unit_attach: un:0x%p errstats set\n", un); 8164 8165 8166 /* 8167 * After successfully attaching an instance, we record the information 8168 * of how many luns have been attached on the relative target and 8169 * controller for parallel SCSI. This information is used when sd tries 8170 * to set the tagged queuing capability in HBA. 8171 */ 8172 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8173 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8174 } 8175 8176 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8177 "sd_unit_attach: un:0x%p exit success\n", un); 8178 8179 /* Uninitialize sd_ssc_t pointer */ 8180 sd_ssc_fini(ssc); 8181 8182 return (DDI_SUCCESS); 8183 8184 /* 8185 * An error occurred during the attach; clean up & return failure. 8186 */ 8187 wm_cache_failed: 8188 devid_failed: 8189 8190 setup_pm_failed: 8191 ddi_remove_minor_node(devi, NULL); 8192 8193 cmlb_attach_failed: 8194 /* 8195 * Cleanup from the scsi_ifsetcap() calls (437868) 8196 */ 8197 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8198 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8199 8200 /* 8201 * Refer to the comments of setting tagged-qing in the beginning of 8202 * sd_unit_attach. We can only disable tagged queuing when there is 8203 * no lun attached on the target. 8204 */ 8205 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8206 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8207 } 8208 8209 if (un->un_f_is_fibre == FALSE) { 8210 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8211 } 8212 8213 spinup_failed: 8214 8215 /* Uninitialize sd_ssc_t pointer */ 8216 sd_ssc_fini(ssc); 8217 8218 mutex_enter(SD_MUTEX(un)); 8219 8220 /* Deallocate SCSI FMA memory spaces */ 8221 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8222 8223 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8224 if (un->un_direct_priority_timeid != NULL) { 8225 timeout_id_t temp_id = un->un_direct_priority_timeid; 8226 un->un_direct_priority_timeid = NULL; 8227 mutex_exit(SD_MUTEX(un)); 8228 (void) untimeout(temp_id); 8229 mutex_enter(SD_MUTEX(un)); 8230 } 8231 8232 /* Cancel any pending start/stop timeouts */ 8233 if (un->un_startstop_timeid != NULL) { 8234 timeout_id_t temp_id = un->un_startstop_timeid; 8235 un->un_startstop_timeid = NULL; 8236 mutex_exit(SD_MUTEX(un)); 8237 (void) untimeout(temp_id); 8238 mutex_enter(SD_MUTEX(un)); 8239 } 8240 8241 /* Cancel any pending reset-throttle timeouts */ 8242 if (un->un_reset_throttle_timeid != NULL) { 8243 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8244 un->un_reset_throttle_timeid = NULL; 8245 mutex_exit(SD_MUTEX(un)); 8246 (void) untimeout(temp_id); 8247 mutex_enter(SD_MUTEX(un)); 8248 } 8249 8250 /* Cancel rmw warning message timeouts */ 8251 if (un->un_rmw_msg_timeid != NULL) { 8252 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8253 un->un_rmw_msg_timeid = NULL; 8254 mutex_exit(SD_MUTEX(un)); 8255 (void) untimeout(temp_id); 8256 mutex_enter(SD_MUTEX(un)); 8257 } 8258 8259 /* Cancel any pending retry timeouts */ 8260 if (un->un_retry_timeid != NULL) { 8261 timeout_id_t temp_id = un->un_retry_timeid; 8262 un->un_retry_timeid = NULL; 8263 mutex_exit(SD_MUTEX(un)); 8264 (void) untimeout(temp_id); 8265 mutex_enter(SD_MUTEX(un)); 8266 } 8267 8268 /* Cancel any pending delayed cv broadcast timeouts */ 8269 if (un->un_dcvb_timeid != NULL) { 8270 timeout_id_t temp_id = un->un_dcvb_timeid; 8271 un->un_dcvb_timeid = NULL; 8272 mutex_exit(SD_MUTEX(un)); 8273 (void) untimeout(temp_id); 8274 mutex_enter(SD_MUTEX(un)); 8275 } 8276 8277 mutex_exit(SD_MUTEX(un)); 8278 8279 /* There should not be any in-progress I/O so ASSERT this check */ 8280 ASSERT(un->un_ncmds_in_transport == 0); 8281 ASSERT(un->un_ncmds_in_driver == 0); 8282 8283 /* Do not free the softstate if the callback routine is active */ 8284 sd_sync_with_callback(un); 8285 8286 /* 8287 * Partition stats apparently are not used with removables. These would 8288 * not have been created during attach, so no need to clean them up... 8289 */ 8290 if (un->un_errstats != NULL) { 8291 kstat_delete(un->un_errstats); 8292 un->un_errstats = NULL; 8293 } 8294 8295 create_errstats_failed: 8296 8297 if (un->un_stats != NULL) { 8298 kstat_delete(un->un_stats); 8299 un->un_stats = NULL; 8300 } 8301 8302 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8303 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8304 8305 ddi_prop_remove_all(devi); 8306 sema_destroy(&un->un_semoclose); 8307 cv_destroy(&un->un_state_cv); 8308 8309 getrbuf_failed: 8310 8311 sd_free_rqs(un); 8312 8313 alloc_rqs_failed: 8314 8315 devp->sd_private = NULL; 8316 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8317 8318 get_softstate_failed: 8319 /* 8320 * Note: the man pages are unclear as to whether or not doing a 8321 * ddi_soft_state_free(sd_state, instance) is the right way to 8322 * clean up after the ddi_soft_state_zalloc() if the subsequent 8323 * ddi_get_soft_state() fails. The implication seems to be 8324 * that the get_soft_state cannot fail if the zalloc succeeds. 8325 */ 8326 #ifndef XPV_HVM_DRIVER 8327 ddi_soft_state_free(sd_state, instance); 8328 #endif /* !XPV_HVM_DRIVER */ 8329 8330 probe_failed: 8331 scsi_unprobe(devp); 8332 8333 return (DDI_FAILURE); 8334 } 8335 8336 8337 /* 8338 * Function: sd_unit_detach 8339 * 8340 * Description: Performs DDI_DETACH processing for sddetach(). 8341 * 8342 * Return Code: DDI_SUCCESS 8343 * DDI_FAILURE 8344 * 8345 * Context: Kernel thread context 8346 */ 8347 8348 static int 8349 sd_unit_detach(dev_info_t *devi) 8350 { 8351 struct scsi_device *devp; 8352 struct sd_lun *un; 8353 int i; 8354 int tgt; 8355 dev_t dev; 8356 dev_info_t *pdip = ddi_get_parent(devi); 8357 #ifndef XPV_HVM_DRIVER 8358 int instance = ddi_get_instance(devi); 8359 #endif /* !XPV_HVM_DRIVER */ 8360 8361 mutex_enter(&sd_detach_mutex); 8362 8363 /* 8364 * Fail the detach for any of the following: 8365 * - Unable to get the sd_lun struct for the instance 8366 * - A layered driver has an outstanding open on the instance 8367 * - Another thread is already detaching this instance 8368 * - Another thread is currently performing an open 8369 */ 8370 devp = ddi_get_driver_private(devi); 8371 if ((devp == NULL) || 8372 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8373 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8374 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8375 mutex_exit(&sd_detach_mutex); 8376 return (DDI_FAILURE); 8377 } 8378 8379 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8380 8381 /* 8382 * Mark this instance as currently in a detach, to inhibit any 8383 * opens from a layered driver. 8384 */ 8385 un->un_detach_count++; 8386 mutex_exit(&sd_detach_mutex); 8387 8388 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8389 SCSI_ADDR_PROP_TARGET, -1); 8390 8391 dev = sd_make_device(SD_DEVINFO(un)); 8392 8393 #ifndef lint 8394 _NOTE(COMPETING_THREADS_NOW); 8395 #endif 8396 8397 mutex_enter(SD_MUTEX(un)); 8398 8399 /* 8400 * Fail the detach if there are any outstanding layered 8401 * opens on this device. 8402 */ 8403 for (i = 0; i < NDKMAP; i++) { 8404 if (un->un_ocmap.lyropen[i] != 0) { 8405 goto err_notclosed; 8406 } 8407 } 8408 8409 /* 8410 * Verify there are NO outstanding commands issued to this device. 8411 * ie, un_ncmds_in_transport == 0. 8412 * It's possible to have outstanding commands through the physio 8413 * code path, even though everything's closed. 8414 */ 8415 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8416 (un->un_direct_priority_timeid != NULL) || 8417 (un->un_state == SD_STATE_RWAIT)) { 8418 mutex_exit(SD_MUTEX(un)); 8419 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8420 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8421 goto err_stillbusy; 8422 } 8423 8424 /* 8425 * If we have the device reserved, release the reservation. 8426 */ 8427 if ((un->un_resvd_status & SD_RESERVE) && 8428 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8429 mutex_exit(SD_MUTEX(un)); 8430 /* 8431 * Note: sd_reserve_release sends a command to the device 8432 * via the sd_ioctlcmd() path, and can sleep. 8433 */ 8434 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8435 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8436 "sd_dr_detach: Cannot release reservation \n"); 8437 } 8438 } else { 8439 mutex_exit(SD_MUTEX(un)); 8440 } 8441 8442 /* 8443 * Untimeout any reserve recover, throttle reset, restart unit 8444 * and delayed broadcast timeout threads. Protect the timeout pointer 8445 * from getting nulled by their callback functions. 8446 */ 8447 mutex_enter(SD_MUTEX(un)); 8448 if (un->un_resvd_timeid != NULL) { 8449 timeout_id_t temp_id = un->un_resvd_timeid; 8450 un->un_resvd_timeid = NULL; 8451 mutex_exit(SD_MUTEX(un)); 8452 (void) untimeout(temp_id); 8453 mutex_enter(SD_MUTEX(un)); 8454 } 8455 8456 if (un->un_reset_throttle_timeid != NULL) { 8457 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8458 un->un_reset_throttle_timeid = NULL; 8459 mutex_exit(SD_MUTEX(un)); 8460 (void) untimeout(temp_id); 8461 mutex_enter(SD_MUTEX(un)); 8462 } 8463 8464 if (un->un_startstop_timeid != NULL) { 8465 timeout_id_t temp_id = un->un_startstop_timeid; 8466 un->un_startstop_timeid = NULL; 8467 mutex_exit(SD_MUTEX(un)); 8468 (void) untimeout(temp_id); 8469 mutex_enter(SD_MUTEX(un)); 8470 } 8471 8472 if (un->un_rmw_msg_timeid != NULL) { 8473 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8474 un->un_rmw_msg_timeid = NULL; 8475 mutex_exit(SD_MUTEX(un)); 8476 (void) untimeout(temp_id); 8477 mutex_enter(SD_MUTEX(un)); 8478 } 8479 8480 if (un->un_dcvb_timeid != NULL) { 8481 timeout_id_t temp_id = un->un_dcvb_timeid; 8482 un->un_dcvb_timeid = NULL; 8483 mutex_exit(SD_MUTEX(un)); 8484 (void) untimeout(temp_id); 8485 } else { 8486 mutex_exit(SD_MUTEX(un)); 8487 } 8488 8489 /* Remove any pending reservation reclaim requests for this device */ 8490 sd_rmv_resv_reclaim_req(dev); 8491 8492 mutex_enter(SD_MUTEX(un)); 8493 8494 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8495 if (un->un_direct_priority_timeid != NULL) { 8496 timeout_id_t temp_id = un->un_direct_priority_timeid; 8497 un->un_direct_priority_timeid = NULL; 8498 mutex_exit(SD_MUTEX(un)); 8499 (void) untimeout(temp_id); 8500 mutex_enter(SD_MUTEX(un)); 8501 } 8502 8503 /* Cancel any active multi-host disk watch thread requests */ 8504 if (un->un_mhd_token != NULL) { 8505 mutex_exit(SD_MUTEX(un)); 8506 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8507 if (scsi_watch_request_terminate(un->un_mhd_token, 8508 SCSI_WATCH_TERMINATE_NOWAIT)) { 8509 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8510 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8511 /* 8512 * Note: We are returning here after having removed 8513 * some driver timeouts above. This is consistent with 8514 * the legacy implementation but perhaps the watch 8515 * terminate call should be made with the wait flag set. 8516 */ 8517 goto err_stillbusy; 8518 } 8519 mutex_enter(SD_MUTEX(un)); 8520 un->un_mhd_token = NULL; 8521 } 8522 8523 if (un->un_swr_token != NULL) { 8524 mutex_exit(SD_MUTEX(un)); 8525 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8526 if (scsi_watch_request_terminate(un->un_swr_token, 8527 SCSI_WATCH_TERMINATE_NOWAIT)) { 8528 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8529 "sd_dr_detach: Cannot cancel swr watch request\n"); 8530 /* 8531 * Note: We are returning here after having removed 8532 * some driver timeouts above. This is consistent with 8533 * the legacy implementation but perhaps the watch 8534 * terminate call should be made with the wait flag set. 8535 */ 8536 goto err_stillbusy; 8537 } 8538 mutex_enter(SD_MUTEX(un)); 8539 un->un_swr_token = NULL; 8540 } 8541 8542 mutex_exit(SD_MUTEX(un)); 8543 8544 /* 8545 * Clear any scsi_reset_notifies. We clear the reset notifies 8546 * if we have not registered one. 8547 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8548 */ 8549 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8550 sd_mhd_reset_notify_cb, (caddr_t)un); 8551 8552 /* 8553 * protect the timeout pointers from getting nulled by 8554 * their callback functions during the cancellation process. 8555 * In such a scenario untimeout can be invoked with a null value. 8556 */ 8557 _NOTE(NO_COMPETING_THREADS_NOW); 8558 8559 mutex_enter(&un->un_pm_mutex); 8560 if (un->un_pm_idle_timeid != NULL) { 8561 timeout_id_t temp_id = un->un_pm_idle_timeid; 8562 un->un_pm_idle_timeid = NULL; 8563 mutex_exit(&un->un_pm_mutex); 8564 8565 /* 8566 * Timeout is active; cancel it. 8567 * Note that it'll never be active on a device 8568 * that does not support PM therefore we don't 8569 * have to check before calling pm_idle_component. 8570 */ 8571 (void) untimeout(temp_id); 8572 (void) pm_idle_component(SD_DEVINFO(un), 0); 8573 mutex_enter(&un->un_pm_mutex); 8574 } 8575 8576 /* 8577 * Check whether there is already a timeout scheduled for power 8578 * management. If yes then don't lower the power here, that's. 8579 * the timeout handler's job. 8580 */ 8581 if (un->un_pm_timeid != NULL) { 8582 timeout_id_t temp_id = un->un_pm_timeid; 8583 un->un_pm_timeid = NULL; 8584 mutex_exit(&un->un_pm_mutex); 8585 /* 8586 * Timeout is active; cancel it. 8587 * Note that it'll never be active on a device 8588 * that does not support PM therefore we don't 8589 * have to check before calling pm_idle_component. 8590 */ 8591 (void) untimeout(temp_id); 8592 (void) pm_idle_component(SD_DEVINFO(un), 0); 8593 8594 } else { 8595 mutex_exit(&un->un_pm_mutex); 8596 if ((un->un_f_pm_is_enabled == TRUE) && 8597 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8598 DDI_SUCCESS)) { 8599 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8600 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8601 /* 8602 * Fix for bug: 4297749, item # 13 8603 * The above test now includes a check to see if PM is 8604 * supported by this device before call 8605 * pm_lower_power(). 8606 * Note, the following is not dead code. The call to 8607 * pm_lower_power above will generate a call back into 8608 * our sdpower routine which might result in a timeout 8609 * handler getting activated. Therefore the following 8610 * code is valid and necessary. 8611 */ 8612 mutex_enter(&un->un_pm_mutex); 8613 if (un->un_pm_timeid != NULL) { 8614 timeout_id_t temp_id = un->un_pm_timeid; 8615 un->un_pm_timeid = NULL; 8616 mutex_exit(&un->un_pm_mutex); 8617 (void) untimeout(temp_id); 8618 (void) pm_idle_component(SD_DEVINFO(un), 0); 8619 } else { 8620 mutex_exit(&un->un_pm_mutex); 8621 } 8622 } 8623 } 8624 8625 /* 8626 * Cleanup from the scsi_ifsetcap() calls (437868) 8627 * Relocated here from above to be after the call to 8628 * pm_lower_power, which was getting errors. 8629 */ 8630 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8631 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8632 8633 /* 8634 * Currently, tagged queuing is supported per target based by HBA. 8635 * Setting this per lun instance actually sets the capability of this 8636 * target in HBA, which affects those luns already attached on the 8637 * same target. So during detach, we can only disable this capability 8638 * only when this is the only lun left on this target. By doing 8639 * this, we assume a target has the same tagged queuing capability 8640 * for every lun. The condition can be removed when HBA is changed to 8641 * support per lun based tagged queuing capability. 8642 */ 8643 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8644 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8645 } 8646 8647 if (un->un_f_is_fibre == FALSE) { 8648 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8649 } 8650 8651 /* 8652 * Remove any event callbacks, fibre only 8653 */ 8654 if (un->un_f_is_fibre == TRUE) { 8655 if ((un->un_insert_event != NULL) && 8656 (ddi_remove_event_handler(un->un_insert_cb_id) != 8657 DDI_SUCCESS)) { 8658 /* 8659 * Note: We are returning here after having done 8660 * substantial cleanup above. This is consistent 8661 * with the legacy implementation but this may not 8662 * be the right thing to do. 8663 */ 8664 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8665 "sd_dr_detach: Cannot cancel insert event\n"); 8666 goto err_remove_event; 8667 } 8668 un->un_insert_event = NULL; 8669 8670 if ((un->un_remove_event != NULL) && 8671 (ddi_remove_event_handler(un->un_remove_cb_id) != 8672 DDI_SUCCESS)) { 8673 /* 8674 * Note: We are returning here after having done 8675 * substantial cleanup above. This is consistent 8676 * with the legacy implementation but this may not 8677 * be the right thing to do. 8678 */ 8679 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8680 "sd_dr_detach: Cannot cancel remove event\n"); 8681 goto err_remove_event; 8682 } 8683 un->un_remove_event = NULL; 8684 } 8685 8686 /* Do not free the softstate if the callback routine is active */ 8687 sd_sync_with_callback(un); 8688 8689 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8690 cmlb_free_handle(&un->un_cmlbhandle); 8691 8692 /* 8693 * Hold the detach mutex here, to make sure that no other threads ever 8694 * can access a (partially) freed soft state structure. 8695 */ 8696 mutex_enter(&sd_detach_mutex); 8697 8698 /* 8699 * Clean up the soft state struct. 8700 * Cleanup is done in reverse order of allocs/inits. 8701 * At this point there should be no competing threads anymore. 8702 */ 8703 8704 scsi_fm_fini(devp); 8705 8706 /* 8707 * Deallocate memory for SCSI FMA. 8708 */ 8709 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8710 8711 /* 8712 * Unregister and free device id if it was not registered 8713 * by the transport. 8714 */ 8715 if (un->un_f_devid_transport_defined == FALSE) 8716 ddi_devid_unregister(devi); 8717 8718 /* 8719 * free the devid structure if allocated before (by ddi_devid_init() 8720 * or ddi_devid_get()). 8721 */ 8722 if (un->un_devid) { 8723 ddi_devid_free(un->un_devid); 8724 un->un_devid = NULL; 8725 } 8726 8727 /* 8728 * Destroy wmap cache if it exists. 8729 */ 8730 if (un->un_wm_cache != NULL) { 8731 kmem_cache_destroy(un->un_wm_cache); 8732 un->un_wm_cache = NULL; 8733 } 8734 8735 /* 8736 * kstat cleanup is done in detach for all device types (4363169). 8737 * We do not want to fail detach if the device kstats are not deleted 8738 * since there is a confusion about the devo_refcnt for the device. 8739 * We just delete the kstats and let detach complete successfully. 8740 */ 8741 if (un->un_stats != NULL) { 8742 kstat_delete(un->un_stats); 8743 un->un_stats = NULL; 8744 } 8745 if (un->un_errstats != NULL) { 8746 kstat_delete(un->un_errstats); 8747 un->un_errstats = NULL; 8748 } 8749 8750 /* Remove partition stats */ 8751 if (un->un_f_pkstats_enabled) { 8752 for (i = 0; i < NSDMAP; i++) { 8753 if (un->un_pstats[i] != NULL) { 8754 kstat_delete(un->un_pstats[i]); 8755 un->un_pstats[i] = NULL; 8756 } 8757 } 8758 } 8759 8760 /* Remove xbuf registration */ 8761 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8762 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8763 8764 /* Remove driver properties */ 8765 ddi_prop_remove_all(devi); 8766 8767 mutex_destroy(&un->un_pm_mutex); 8768 cv_destroy(&un->un_pm_busy_cv); 8769 8770 cv_destroy(&un->un_wcc_cv); 8771 8772 /* Open/close semaphore */ 8773 sema_destroy(&un->un_semoclose); 8774 8775 /* Removable media condvar. */ 8776 cv_destroy(&un->un_state_cv); 8777 8778 /* Suspend/resume condvar. */ 8779 cv_destroy(&un->un_suspend_cv); 8780 cv_destroy(&un->un_disk_busy_cv); 8781 8782 sd_free_rqs(un); 8783 8784 /* Free up soft state */ 8785 devp->sd_private = NULL; 8786 8787 bzero(un, sizeof (struct sd_lun)); 8788 #ifndef XPV_HVM_DRIVER 8789 ddi_soft_state_free(sd_state, instance); 8790 #endif /* !XPV_HVM_DRIVER */ 8791 8792 mutex_exit(&sd_detach_mutex); 8793 8794 /* This frees up the INQUIRY data associated with the device. */ 8795 scsi_unprobe(devp); 8796 8797 /* 8798 * After successfully detaching an instance, we update the information 8799 * of how many luns have been attached in the relative target and 8800 * controller for parallel SCSI. This information is used when sd tries 8801 * to set the tagged queuing capability in HBA. 8802 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8803 * check if the device is parallel SCSI. However, we don't need to 8804 * check here because we've already checked during attach. No device 8805 * that is not parallel SCSI is in the chain. 8806 */ 8807 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8808 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8809 } 8810 8811 return (DDI_SUCCESS); 8812 8813 err_notclosed: 8814 mutex_exit(SD_MUTEX(un)); 8815 8816 err_stillbusy: 8817 _NOTE(NO_COMPETING_THREADS_NOW); 8818 8819 err_remove_event: 8820 mutex_enter(&sd_detach_mutex); 8821 un->un_detach_count--; 8822 mutex_exit(&sd_detach_mutex); 8823 8824 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8825 return (DDI_FAILURE); 8826 } 8827 8828 8829 /* 8830 * Function: sd_create_errstats 8831 * 8832 * Description: This routine instantiates the device error stats. 8833 * 8834 * Note: During attach the stats are instantiated first so they are 8835 * available for attach-time routines that utilize the driver 8836 * iopath to send commands to the device. The stats are initialized 8837 * separately so data obtained during some attach-time routines is 8838 * available. (4362483) 8839 * 8840 * Arguments: un - driver soft state (unit) structure 8841 * instance - driver instance 8842 * 8843 * Context: Kernel thread context 8844 */ 8845 8846 static void 8847 sd_create_errstats(struct sd_lun *un, int instance) 8848 { 8849 struct sd_errstats *stp; 8850 char kstatmodule_err[KSTAT_STRLEN]; 8851 char kstatname[KSTAT_STRLEN]; 8852 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8853 8854 ASSERT(un != NULL); 8855 8856 if (un->un_errstats != NULL) { 8857 return; 8858 } 8859 8860 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8861 "%serr", sd_label); 8862 (void) snprintf(kstatname, sizeof (kstatname), 8863 "%s%d,err", sd_label, instance); 8864 8865 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8866 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8867 8868 if (un->un_errstats == NULL) { 8869 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8870 "sd_create_errstats: Failed kstat_create\n"); 8871 return; 8872 } 8873 8874 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8875 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8876 KSTAT_DATA_UINT32); 8877 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8878 KSTAT_DATA_UINT32); 8879 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8880 KSTAT_DATA_UINT32); 8881 kstat_named_init(&stp->sd_vid, "Vendor", 8882 KSTAT_DATA_CHAR); 8883 kstat_named_init(&stp->sd_pid, "Product", 8884 KSTAT_DATA_CHAR); 8885 kstat_named_init(&stp->sd_revision, "Revision", 8886 KSTAT_DATA_CHAR); 8887 kstat_named_init(&stp->sd_serial, "Serial No", 8888 KSTAT_DATA_CHAR); 8889 kstat_named_init(&stp->sd_capacity, "Size", 8890 KSTAT_DATA_ULONGLONG); 8891 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8892 KSTAT_DATA_UINT32); 8893 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8894 KSTAT_DATA_UINT32); 8895 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8896 KSTAT_DATA_UINT32); 8897 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8898 KSTAT_DATA_UINT32); 8899 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8900 KSTAT_DATA_UINT32); 8901 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8902 KSTAT_DATA_UINT32); 8903 8904 un->un_errstats->ks_private = un; 8905 un->un_errstats->ks_update = nulldev; 8906 8907 kstat_install(un->un_errstats); 8908 } 8909 8910 8911 /* 8912 * Function: sd_set_errstats 8913 * 8914 * Description: This routine sets the value of the vendor id, product id, 8915 * revision, serial number, and capacity device error stats. 8916 * 8917 * Note: During attach the stats are instantiated first so they are 8918 * available for attach-time routines that utilize the driver 8919 * iopath to send commands to the device. The stats are initialized 8920 * separately so data obtained during some attach-time routines is 8921 * available. (4362483) 8922 * 8923 * Arguments: un - driver soft state (unit) structure 8924 * 8925 * Context: Kernel thread context 8926 */ 8927 8928 static void 8929 sd_set_errstats(struct sd_lun *un) 8930 { 8931 struct sd_errstats *stp; 8932 8933 ASSERT(un != NULL); 8934 ASSERT(un->un_errstats != NULL); 8935 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8936 ASSERT(stp != NULL); 8937 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8938 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8939 (void) strncpy(stp->sd_revision.value.c, 8940 un->un_sd->sd_inq->inq_revision, 4); 8941 8942 /* 8943 * All the errstats are persistent across detach/attach, 8944 * so reset all the errstats here in case of the hot 8945 * replacement of disk drives, except for not changed 8946 * Sun qualified drives. 8947 */ 8948 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8949 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8950 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8951 stp->sd_softerrs.value.ui32 = 0; 8952 stp->sd_harderrs.value.ui32 = 0; 8953 stp->sd_transerrs.value.ui32 = 0; 8954 stp->sd_rq_media_err.value.ui32 = 0; 8955 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8956 stp->sd_rq_nodev_err.value.ui32 = 0; 8957 stp->sd_rq_recov_err.value.ui32 = 0; 8958 stp->sd_rq_illrq_err.value.ui32 = 0; 8959 stp->sd_rq_pfa_err.value.ui32 = 0; 8960 } 8961 8962 /* 8963 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8964 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8965 * (4376302)) 8966 */ 8967 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8968 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8969 sizeof (SD_INQUIRY(un)->inq_serial)); 8970 } 8971 8972 if (un->un_f_blockcount_is_valid != TRUE) { 8973 /* 8974 * Set capacity error stat to 0 for no media. This ensures 8975 * a valid capacity is displayed in response to 'iostat -E' 8976 * when no media is present in the device. 8977 */ 8978 stp->sd_capacity.value.ui64 = 0; 8979 } else { 8980 /* 8981 * Multiply un_blockcount by un->un_sys_blocksize to get 8982 * capacity. 8983 * 8984 * Note: for non-512 blocksize devices "un_blockcount" has been 8985 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8986 * (un_tgt_blocksize / un->un_sys_blocksize). 8987 */ 8988 stp->sd_capacity.value.ui64 = (uint64_t) 8989 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8990 } 8991 } 8992 8993 8994 /* 8995 * Function: sd_set_pstats 8996 * 8997 * Description: This routine instantiates and initializes the partition 8998 * stats for each partition with more than zero blocks. 8999 * (4363169) 9000 * 9001 * Arguments: un - driver soft state (unit) structure 9002 * 9003 * Context: Kernel thread context 9004 */ 9005 9006 static void 9007 sd_set_pstats(struct sd_lun *un) 9008 { 9009 char kstatname[KSTAT_STRLEN]; 9010 int instance; 9011 int i; 9012 diskaddr_t nblks = 0; 9013 char *partname = NULL; 9014 9015 ASSERT(un != NULL); 9016 9017 instance = ddi_get_instance(SD_DEVINFO(un)); 9018 9019 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9020 for (i = 0; i < NSDMAP; i++) { 9021 9022 if (cmlb_partinfo(un->un_cmlbhandle, i, 9023 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9024 continue; 9025 mutex_enter(SD_MUTEX(un)); 9026 9027 if ((un->un_pstats[i] == NULL) && 9028 (nblks != 0)) { 9029 9030 (void) snprintf(kstatname, sizeof (kstatname), 9031 "%s%d,%s", sd_label, instance, 9032 partname); 9033 9034 un->un_pstats[i] = kstat_create(sd_label, 9035 instance, kstatname, "partition", KSTAT_TYPE_IO, 9036 1, KSTAT_FLAG_PERSISTENT); 9037 if (un->un_pstats[i] != NULL) { 9038 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9039 kstat_install(un->un_pstats[i]); 9040 } 9041 } 9042 mutex_exit(SD_MUTEX(un)); 9043 } 9044 } 9045 9046 9047 #if (defined(__fibre)) 9048 /* 9049 * Function: sd_init_event_callbacks 9050 * 9051 * Description: This routine initializes the insertion and removal event 9052 * callbacks. (fibre only) 9053 * 9054 * Arguments: un - driver soft state (unit) structure 9055 * 9056 * Context: Kernel thread context 9057 */ 9058 9059 static void 9060 sd_init_event_callbacks(struct sd_lun *un) 9061 { 9062 ASSERT(un != NULL); 9063 9064 if ((un->un_insert_event == NULL) && 9065 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9066 &un->un_insert_event) == DDI_SUCCESS)) { 9067 /* 9068 * Add the callback for an insertion event 9069 */ 9070 (void) ddi_add_event_handler(SD_DEVINFO(un), 9071 un->un_insert_event, sd_event_callback, (void *)un, 9072 &(un->un_insert_cb_id)); 9073 } 9074 9075 if ((un->un_remove_event == NULL) && 9076 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9077 &un->un_remove_event) == DDI_SUCCESS)) { 9078 /* 9079 * Add the callback for a removal event 9080 */ 9081 (void) ddi_add_event_handler(SD_DEVINFO(un), 9082 un->un_remove_event, sd_event_callback, (void *)un, 9083 &(un->un_remove_cb_id)); 9084 } 9085 } 9086 9087 9088 /* 9089 * Function: sd_event_callback 9090 * 9091 * Description: This routine handles insert/remove events (photon). The 9092 * state is changed to OFFLINE which can be used to supress 9093 * error msgs. (fibre only) 9094 * 9095 * Arguments: un - driver soft state (unit) structure 9096 * 9097 * Context: Callout thread context 9098 */ 9099 /* ARGSUSED */ 9100 static void 9101 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9102 void *bus_impldata) 9103 { 9104 struct sd_lun *un = (struct sd_lun *)arg; 9105 9106 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9107 if (event == un->un_insert_event) { 9108 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9109 mutex_enter(SD_MUTEX(un)); 9110 if (un->un_state == SD_STATE_OFFLINE) { 9111 if (un->un_last_state != SD_STATE_SUSPENDED) { 9112 un->un_state = un->un_last_state; 9113 } else { 9114 /* 9115 * We have gone through SUSPEND/RESUME while 9116 * we were offline. Restore the last state 9117 */ 9118 un->un_state = un->un_save_state; 9119 } 9120 } 9121 mutex_exit(SD_MUTEX(un)); 9122 9123 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9124 } else if (event == un->un_remove_event) { 9125 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9126 mutex_enter(SD_MUTEX(un)); 9127 /* 9128 * We need to handle an event callback that occurs during 9129 * the suspend operation, since we don't prevent it. 9130 */ 9131 if (un->un_state != SD_STATE_OFFLINE) { 9132 if (un->un_state != SD_STATE_SUSPENDED) { 9133 New_state(un, SD_STATE_OFFLINE); 9134 } else { 9135 un->un_last_state = SD_STATE_OFFLINE; 9136 } 9137 } 9138 mutex_exit(SD_MUTEX(un)); 9139 } else { 9140 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9141 "!Unknown event\n"); 9142 } 9143 9144 } 9145 #endif 9146 9147 /* 9148 * Function: sd_cache_control() 9149 * 9150 * Description: This routine is the driver entry point for setting 9151 * read and write caching by modifying the WCE (write cache 9152 * enable) and RCD (read cache disable) bits of mode 9153 * page 8 (MODEPAGE_CACHING). 9154 * 9155 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9156 * structure for this target. 9157 * rcd_flag - flag for controlling the read cache 9158 * wce_flag - flag for controlling the write cache 9159 * 9160 * Return Code: EIO 9161 * code returned by sd_send_scsi_MODE_SENSE and 9162 * sd_send_scsi_MODE_SELECT 9163 * 9164 * Context: Kernel Thread 9165 */ 9166 9167 static int 9168 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9169 { 9170 struct mode_caching *mode_caching_page; 9171 uchar_t *header; 9172 size_t buflen; 9173 int hdrlen; 9174 int bd_len; 9175 int rval = 0; 9176 struct mode_header_grp2 *mhp; 9177 struct sd_lun *un; 9178 int status; 9179 9180 ASSERT(ssc != NULL); 9181 un = ssc->ssc_un; 9182 ASSERT(un != NULL); 9183 9184 /* 9185 * Do a test unit ready, otherwise a mode sense may not work if this 9186 * is the first command sent to the device after boot. 9187 */ 9188 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9189 if (status != 0) 9190 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9191 9192 if (un->un_f_cfg_is_atapi == TRUE) { 9193 hdrlen = MODE_HEADER_LENGTH_GRP2; 9194 } else { 9195 hdrlen = MODE_HEADER_LENGTH; 9196 } 9197 9198 /* 9199 * Allocate memory for the retrieved mode page and its headers. Set 9200 * a pointer to the page itself. Use mode_cache_scsi3 to insure 9201 * we get all of the mode sense data otherwise, the mode select 9202 * will fail. mode_cache_scsi3 is a superset of mode_caching. 9203 */ 9204 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 9205 sizeof (struct mode_cache_scsi3); 9206 9207 header = kmem_zalloc(buflen, KM_SLEEP); 9208 9209 /* Get the information from the device. */ 9210 if (un->un_f_cfg_is_atapi == TRUE) { 9211 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9212 MODEPAGE_CACHING, SD_PATH_DIRECT); 9213 } else { 9214 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9215 MODEPAGE_CACHING, SD_PATH_DIRECT); 9216 } 9217 9218 if (rval != 0) { 9219 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9220 "sd_cache_control: Mode Sense Failed\n"); 9221 goto mode_sense_failed; 9222 } 9223 9224 /* 9225 * Determine size of Block Descriptors in order to locate 9226 * the mode page data. ATAPI devices return 0, SCSI devices 9227 * should return MODE_BLK_DESC_LENGTH. 9228 */ 9229 if (un->un_f_cfg_is_atapi == TRUE) { 9230 mhp = (struct mode_header_grp2 *)header; 9231 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9232 } else { 9233 bd_len = ((struct mode_header *)header)->bdesc_length; 9234 } 9235 9236 if (bd_len > MODE_BLK_DESC_LENGTH) { 9237 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9238 "sd_cache_control: Mode Sense returned invalid block " 9239 "descriptor length\n"); 9240 rval = EIO; 9241 goto mode_sense_failed; 9242 } 9243 9244 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9245 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9246 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9247 "sd_cache_control: Mode Sense caching page code mismatch " 9248 "%d\n", mode_caching_page->mode_page.code); 9249 rval = EIO; 9250 goto mode_sense_failed; 9251 } 9252 9253 /* Check the relevant bits on successful mode sense. */ 9254 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9255 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9256 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9257 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9258 9259 size_t sbuflen; 9260 uchar_t save_pg; 9261 9262 /* 9263 * Construct select buffer length based on the 9264 * length of the sense data returned. 9265 */ 9266 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 9267 sizeof (struct mode_page) + 9268 (int)mode_caching_page->mode_page.length; 9269 9270 /* 9271 * Set the caching bits as requested. 9272 */ 9273 if (rcd_flag == SD_CACHE_ENABLE) 9274 mode_caching_page->rcd = 0; 9275 else if (rcd_flag == SD_CACHE_DISABLE) 9276 mode_caching_page->rcd = 1; 9277 9278 if (wce_flag == SD_CACHE_ENABLE) 9279 mode_caching_page->wce = 1; 9280 else if (wce_flag == SD_CACHE_DISABLE) 9281 mode_caching_page->wce = 0; 9282 9283 /* 9284 * Save the page if the mode sense says the 9285 * drive supports it. 9286 */ 9287 save_pg = mode_caching_page->mode_page.ps ? 9288 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9289 9290 /* Clear reserved bits before mode select. */ 9291 mode_caching_page->mode_page.ps = 0; 9292 9293 /* 9294 * Clear out mode header for mode select. 9295 * The rest of the retrieved page will be reused. 9296 */ 9297 bzero(header, hdrlen); 9298 9299 if (un->un_f_cfg_is_atapi == TRUE) { 9300 mhp = (struct mode_header_grp2 *)header; 9301 mhp->bdesc_length_hi = bd_len >> 8; 9302 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9303 } else { 9304 ((struct mode_header *)header)->bdesc_length = bd_len; 9305 } 9306 9307 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9308 9309 /* Issue mode select to change the cache settings */ 9310 if (un->un_f_cfg_is_atapi == TRUE) { 9311 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9312 sbuflen, save_pg, SD_PATH_DIRECT); 9313 } else { 9314 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9315 sbuflen, save_pg, SD_PATH_DIRECT); 9316 } 9317 9318 } 9319 9320 9321 mode_sense_failed: 9322 9323 kmem_free(header, buflen); 9324 9325 if (rval != 0) { 9326 if (rval == EIO) 9327 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9328 else 9329 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9330 } 9331 return (rval); 9332 } 9333 9334 9335 /* 9336 * Function: sd_get_write_cache_enabled() 9337 * 9338 * Description: This routine is the driver entry point for determining if 9339 * write caching is enabled. It examines the WCE (write cache 9340 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9341 * 9342 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9343 * structure for this target. 9344 * is_enabled - pointer to int where write cache enabled state 9345 * is returned (non-zero -> write cache enabled) 9346 * 9347 * 9348 * Return Code: EIO 9349 * code returned by sd_send_scsi_MODE_SENSE 9350 * 9351 * Context: Kernel Thread 9352 * 9353 * NOTE: If ioctl is added to disable write cache, this sequence should 9354 * be followed so that no locking is required for accesses to 9355 * un->un_f_write_cache_enabled: 9356 * do mode select to clear wce 9357 * do synchronize cache to flush cache 9358 * set un->un_f_write_cache_enabled = FALSE 9359 * 9360 * Conversely, an ioctl to enable the write cache should be done 9361 * in this order: 9362 * set un->un_f_write_cache_enabled = TRUE 9363 * do mode select to set wce 9364 */ 9365 9366 static int 9367 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9368 { 9369 struct mode_caching *mode_caching_page; 9370 uchar_t *header; 9371 size_t buflen; 9372 int hdrlen; 9373 int bd_len; 9374 int rval = 0; 9375 struct sd_lun *un; 9376 int status; 9377 9378 ASSERT(ssc != NULL); 9379 un = ssc->ssc_un; 9380 ASSERT(un != NULL); 9381 ASSERT(is_enabled != NULL); 9382 9383 /* in case of error, flag as enabled */ 9384 *is_enabled = TRUE; 9385 9386 /* 9387 * Do a test unit ready, otherwise a mode sense may not work if this 9388 * is the first command sent to the device after boot. 9389 */ 9390 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9391 9392 if (status != 0) 9393 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9394 9395 if (un->un_f_cfg_is_atapi == TRUE) { 9396 hdrlen = MODE_HEADER_LENGTH_GRP2; 9397 } else { 9398 hdrlen = MODE_HEADER_LENGTH; 9399 } 9400 9401 /* 9402 * Allocate memory for the retrieved mode page and its headers. Set 9403 * a pointer to the page itself. 9404 */ 9405 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9406 header = kmem_zalloc(buflen, KM_SLEEP); 9407 9408 /* Get the information from the device. */ 9409 if (un->un_f_cfg_is_atapi == TRUE) { 9410 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9411 MODEPAGE_CACHING, SD_PATH_DIRECT); 9412 } else { 9413 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9414 MODEPAGE_CACHING, SD_PATH_DIRECT); 9415 } 9416 9417 if (rval != 0) { 9418 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9419 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9420 goto mode_sense_failed; 9421 } 9422 9423 /* 9424 * Determine size of Block Descriptors in order to locate 9425 * the mode page data. ATAPI devices return 0, SCSI devices 9426 * should return MODE_BLK_DESC_LENGTH. 9427 */ 9428 if (un->un_f_cfg_is_atapi == TRUE) { 9429 struct mode_header_grp2 *mhp; 9430 mhp = (struct mode_header_grp2 *)header; 9431 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9432 } else { 9433 bd_len = ((struct mode_header *)header)->bdesc_length; 9434 } 9435 9436 if (bd_len > MODE_BLK_DESC_LENGTH) { 9437 /* FMA should make upset complain here */ 9438 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9439 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9440 "block descriptor length\n"); 9441 rval = EIO; 9442 goto mode_sense_failed; 9443 } 9444 9445 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9446 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9447 /* FMA could make upset complain here */ 9448 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9449 "sd_get_write_cache_enabled: Mode Sense caching page " 9450 "code mismatch %d\n", mode_caching_page->mode_page.code); 9451 rval = EIO; 9452 goto mode_sense_failed; 9453 } 9454 *is_enabled = mode_caching_page->wce; 9455 9456 mode_sense_failed: 9457 if (rval == 0) { 9458 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9459 } else if (rval == EIO) { 9460 /* 9461 * Some disks do not support mode sense(6), we 9462 * should ignore this kind of error(sense key is 9463 * 0x5 - illegal request). 9464 */ 9465 uint8_t *sensep; 9466 int senlen; 9467 9468 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9469 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9470 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9471 9472 if (senlen > 0 && 9473 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9474 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9475 } else { 9476 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9477 } 9478 } else { 9479 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9480 } 9481 kmem_free(header, buflen); 9482 return (rval); 9483 } 9484 9485 /* 9486 * Function: sd_get_nv_sup() 9487 * 9488 * Description: This routine is the driver entry point for 9489 * determining whether non-volatile cache is supported. This 9490 * determination process works as follows: 9491 * 9492 * 1. sd first queries sd.conf on whether 9493 * suppress_cache_flush bit is set for this device. 9494 * 9495 * 2. if not there, then queries the internal disk table. 9496 * 9497 * 3. if either sd.conf or internal disk table specifies 9498 * cache flush be suppressed, we don't bother checking 9499 * NV_SUP bit. 9500 * 9501 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9502 * the optional INQUIRY VPD page 0x86. If the device 9503 * supports VPD page 0x86, sd examines the NV_SUP 9504 * (non-volatile cache support) bit in the INQUIRY VPD page 9505 * 0x86: 9506 * o If NV_SUP bit is set, sd assumes the device has a 9507 * non-volatile cache and set the 9508 * un_f_sync_nv_supported to TRUE. 9509 * o Otherwise cache is not non-volatile, 9510 * un_f_sync_nv_supported is set to FALSE. 9511 * 9512 * Arguments: un - driver soft state (unit) structure 9513 * 9514 * Return Code: 9515 * 9516 * Context: Kernel Thread 9517 */ 9518 9519 static void 9520 sd_get_nv_sup(sd_ssc_t *ssc) 9521 { 9522 int rval = 0; 9523 uchar_t *inq86 = NULL; 9524 size_t inq86_len = MAX_INQUIRY_SIZE; 9525 size_t inq86_resid = 0; 9526 struct dk_callback *dkc; 9527 struct sd_lun *un; 9528 9529 ASSERT(ssc != NULL); 9530 un = ssc->ssc_un; 9531 ASSERT(un != NULL); 9532 9533 mutex_enter(SD_MUTEX(un)); 9534 9535 /* 9536 * Be conservative on the device's support of 9537 * SYNC_NV bit: un_f_sync_nv_supported is 9538 * initialized to be false. 9539 */ 9540 un->un_f_sync_nv_supported = FALSE; 9541 9542 /* 9543 * If either sd.conf or internal disk table 9544 * specifies cache flush be suppressed, then 9545 * we don't bother checking NV_SUP bit. 9546 */ 9547 if (un->un_f_suppress_cache_flush == TRUE) { 9548 mutex_exit(SD_MUTEX(un)); 9549 return; 9550 } 9551 9552 if (sd_check_vpd_page_support(ssc) == 0 && 9553 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9554 mutex_exit(SD_MUTEX(un)); 9555 /* collect page 86 data if available */ 9556 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9557 9558 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9559 0x01, 0x86, &inq86_resid); 9560 9561 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9562 SD_TRACE(SD_LOG_COMMON, un, 9563 "sd_get_nv_sup: \ 9564 successfully get VPD page: %x \ 9565 PAGE LENGTH: %x BYTE 6: %x\n", 9566 inq86[1], inq86[3], inq86[6]); 9567 9568 mutex_enter(SD_MUTEX(un)); 9569 /* 9570 * check the value of NV_SUP bit: only if the device 9571 * reports NV_SUP bit to be 1, the 9572 * un_f_sync_nv_supported bit will be set to true. 9573 */ 9574 if (inq86[6] & SD_VPD_NV_SUP) { 9575 un->un_f_sync_nv_supported = TRUE; 9576 } 9577 mutex_exit(SD_MUTEX(un)); 9578 } else if (rval != 0) { 9579 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9580 } 9581 9582 kmem_free(inq86, inq86_len); 9583 } else { 9584 mutex_exit(SD_MUTEX(un)); 9585 } 9586 9587 /* 9588 * Send a SYNC CACHE command to check whether 9589 * SYNC_NV bit is supported. This command should have 9590 * un_f_sync_nv_supported set to correct value. 9591 */ 9592 mutex_enter(SD_MUTEX(un)); 9593 if (un->un_f_sync_nv_supported) { 9594 mutex_exit(SD_MUTEX(un)); 9595 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9596 dkc->dkc_flag = FLUSH_VOLATILE; 9597 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9598 9599 /* 9600 * Send a TEST UNIT READY command to the device. This should 9601 * clear any outstanding UNIT ATTENTION that may be present. 9602 */ 9603 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9604 if (rval != 0) 9605 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9606 9607 kmem_free(dkc, sizeof (struct dk_callback)); 9608 } else { 9609 mutex_exit(SD_MUTEX(un)); 9610 } 9611 9612 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9613 un_f_suppress_cache_flush is set to %d\n", 9614 un->un_f_suppress_cache_flush); 9615 } 9616 9617 /* 9618 * Function: sd_make_device 9619 * 9620 * Description: Utility routine to return the Solaris device number from 9621 * the data in the device's dev_info structure. 9622 * 9623 * Return Code: The Solaris device number 9624 * 9625 * Context: Any 9626 */ 9627 9628 static dev_t 9629 sd_make_device(dev_info_t *devi) 9630 { 9631 return (makedevice(ddi_driver_major(devi), 9632 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9633 } 9634 9635 9636 /* 9637 * Function: sd_pm_entry 9638 * 9639 * Description: Called at the start of a new command to manage power 9640 * and busy status of a device. This includes determining whether 9641 * the current power state of the device is sufficient for 9642 * performing the command or whether it must be changed. 9643 * The PM framework is notified appropriately. 9644 * Only with a return status of DDI_SUCCESS will the 9645 * component be busy to the framework. 9646 * 9647 * All callers of sd_pm_entry must check the return status 9648 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9649 * of DDI_FAILURE indicates the device failed to power up. 9650 * In this case un_pm_count has been adjusted so the result 9651 * on exit is still powered down, ie. count is less than 0. 9652 * Calling sd_pm_exit with this count value hits an ASSERT. 9653 * 9654 * Return Code: DDI_SUCCESS or DDI_FAILURE 9655 * 9656 * Context: Kernel thread context. 9657 */ 9658 9659 static int 9660 sd_pm_entry(struct sd_lun *un) 9661 { 9662 int return_status = DDI_SUCCESS; 9663 9664 ASSERT(!mutex_owned(SD_MUTEX(un))); 9665 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9666 9667 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9668 9669 if (un->un_f_pm_is_enabled == FALSE) { 9670 SD_TRACE(SD_LOG_IO_PM, un, 9671 "sd_pm_entry: exiting, PM not enabled\n"); 9672 return (return_status); 9673 } 9674 9675 /* 9676 * Just increment a counter if PM is enabled. On the transition from 9677 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9678 * the count with each IO and mark the device as idle when the count 9679 * hits 0. 9680 * 9681 * If the count is less than 0 the device is powered down. If a powered 9682 * down device is successfully powered up then the count must be 9683 * incremented to reflect the power up. Note that it'll get incremented 9684 * a second time to become busy. 9685 * 9686 * Because the following has the potential to change the device state 9687 * and must release the un_pm_mutex to do so, only one thread can be 9688 * allowed through at a time. 9689 */ 9690 9691 mutex_enter(&un->un_pm_mutex); 9692 while (un->un_pm_busy == TRUE) { 9693 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9694 } 9695 un->un_pm_busy = TRUE; 9696 9697 if (un->un_pm_count < 1) { 9698 9699 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9700 9701 /* 9702 * Indicate we are now busy so the framework won't attempt to 9703 * power down the device. This call will only fail if either 9704 * we passed a bad component number or the device has no 9705 * components. Neither of these should ever happen. 9706 */ 9707 mutex_exit(&un->un_pm_mutex); 9708 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9709 ASSERT(return_status == DDI_SUCCESS); 9710 9711 mutex_enter(&un->un_pm_mutex); 9712 9713 if (un->un_pm_count < 0) { 9714 mutex_exit(&un->un_pm_mutex); 9715 9716 SD_TRACE(SD_LOG_IO_PM, un, 9717 "sd_pm_entry: power up component\n"); 9718 9719 /* 9720 * pm_raise_power will cause sdpower to be called 9721 * which brings the device power level to the 9722 * desired state, ON in this case. If successful, 9723 * un_pm_count and un_power_level will be updated 9724 * appropriately. 9725 */ 9726 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9727 SD_SPINDLE_ON); 9728 9729 mutex_enter(&un->un_pm_mutex); 9730 9731 if (return_status != DDI_SUCCESS) { 9732 /* 9733 * Power up failed. 9734 * Idle the device and adjust the count 9735 * so the result on exit is that we're 9736 * still powered down, ie. count is less than 0. 9737 */ 9738 SD_TRACE(SD_LOG_IO_PM, un, 9739 "sd_pm_entry: power up failed," 9740 " idle the component\n"); 9741 9742 (void) pm_idle_component(SD_DEVINFO(un), 0); 9743 un->un_pm_count--; 9744 } else { 9745 /* 9746 * Device is powered up, verify the 9747 * count is non-negative. 9748 * This is debug only. 9749 */ 9750 ASSERT(un->un_pm_count == 0); 9751 } 9752 } 9753 9754 if (return_status == DDI_SUCCESS) { 9755 /* 9756 * For performance, now that the device has been tagged 9757 * as busy, and it's known to be powered up, update the 9758 * chain types to use jump tables that do not include 9759 * pm. This significantly lowers the overhead and 9760 * therefore improves performance. 9761 */ 9762 9763 mutex_exit(&un->un_pm_mutex); 9764 mutex_enter(SD_MUTEX(un)); 9765 SD_TRACE(SD_LOG_IO_PM, un, 9766 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9767 un->un_uscsi_chain_type); 9768 9769 if (un->un_f_non_devbsize_supported) { 9770 un->un_buf_chain_type = 9771 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9772 } else { 9773 un->un_buf_chain_type = 9774 SD_CHAIN_INFO_DISK_NO_PM; 9775 } 9776 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9777 9778 SD_TRACE(SD_LOG_IO_PM, un, 9779 " changed uscsi_chain_type to %d\n", 9780 un->un_uscsi_chain_type); 9781 mutex_exit(SD_MUTEX(un)); 9782 mutex_enter(&un->un_pm_mutex); 9783 9784 if (un->un_pm_idle_timeid == NULL) { 9785 /* 300 ms. */ 9786 un->un_pm_idle_timeid = 9787 timeout(sd_pm_idletimeout_handler, un, 9788 (drv_usectohz((clock_t)300000))); 9789 /* 9790 * Include an extra call to busy which keeps the 9791 * device busy with-respect-to the PM layer 9792 * until the timer fires, at which time it'll 9793 * get the extra idle call. 9794 */ 9795 (void) pm_busy_component(SD_DEVINFO(un), 0); 9796 } 9797 } 9798 } 9799 un->un_pm_busy = FALSE; 9800 /* Next... */ 9801 cv_signal(&un->un_pm_busy_cv); 9802 9803 un->un_pm_count++; 9804 9805 SD_TRACE(SD_LOG_IO_PM, un, 9806 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9807 9808 mutex_exit(&un->un_pm_mutex); 9809 9810 return (return_status); 9811 } 9812 9813 9814 /* 9815 * Function: sd_pm_exit 9816 * 9817 * Description: Called at the completion of a command to manage busy 9818 * status for the device. If the device becomes idle the 9819 * PM framework is notified. 9820 * 9821 * Context: Kernel thread context 9822 */ 9823 9824 static void 9825 sd_pm_exit(struct sd_lun *un) 9826 { 9827 ASSERT(!mutex_owned(SD_MUTEX(un))); 9828 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9829 9830 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9831 9832 /* 9833 * After attach the following flag is only read, so don't 9834 * take the penalty of acquiring a mutex for it. 9835 */ 9836 if (un->un_f_pm_is_enabled == TRUE) { 9837 9838 mutex_enter(&un->un_pm_mutex); 9839 un->un_pm_count--; 9840 9841 SD_TRACE(SD_LOG_IO_PM, un, 9842 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9843 9844 ASSERT(un->un_pm_count >= 0); 9845 if (un->un_pm_count == 0) { 9846 mutex_exit(&un->un_pm_mutex); 9847 9848 SD_TRACE(SD_LOG_IO_PM, un, 9849 "sd_pm_exit: idle component\n"); 9850 9851 (void) pm_idle_component(SD_DEVINFO(un), 0); 9852 9853 } else { 9854 mutex_exit(&un->un_pm_mutex); 9855 } 9856 } 9857 9858 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9859 } 9860 9861 9862 /* 9863 * Function: sdopen 9864 * 9865 * Description: Driver's open(9e) entry point function. 9866 * 9867 * Arguments: dev_i - pointer to device number 9868 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9869 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9870 * cred_p - user credential pointer 9871 * 9872 * Return Code: EINVAL 9873 * ENXIO 9874 * EIO 9875 * EROFS 9876 * EBUSY 9877 * 9878 * Context: Kernel thread context 9879 */ 9880 /* ARGSUSED */ 9881 static int 9882 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9883 { 9884 struct sd_lun *un; 9885 int nodelay; 9886 int part; 9887 uint64_t partmask; 9888 int instance; 9889 dev_t dev; 9890 int rval = EIO; 9891 diskaddr_t nblks = 0; 9892 diskaddr_t label_cap; 9893 9894 /* Validate the open type */ 9895 if (otyp >= OTYPCNT) { 9896 return (EINVAL); 9897 } 9898 9899 dev = *dev_p; 9900 instance = SDUNIT(dev); 9901 mutex_enter(&sd_detach_mutex); 9902 9903 /* 9904 * Fail the open if there is no softstate for the instance, or 9905 * if another thread somewhere is trying to detach the instance. 9906 */ 9907 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9908 (un->un_detach_count != 0)) { 9909 mutex_exit(&sd_detach_mutex); 9910 /* 9911 * The probe cache only needs to be cleared when open (9e) fails 9912 * with ENXIO (4238046). 9913 */ 9914 /* 9915 * un-conditionally clearing probe cache is ok with 9916 * separate sd/ssd binaries 9917 * x86 platform can be an issue with both parallel 9918 * and fibre in 1 binary 9919 */ 9920 sd_scsi_clear_probe_cache(); 9921 return (ENXIO); 9922 } 9923 9924 /* 9925 * The un_layer_count is to prevent another thread in specfs from 9926 * trying to detach the instance, which can happen when we are 9927 * called from a higher-layer driver instead of thru specfs. 9928 * This will not be needed when DDI provides a layered driver 9929 * interface that allows specfs to know that an instance is in 9930 * use by a layered driver & should not be detached. 9931 * 9932 * Note: the semantics for layered driver opens are exactly one 9933 * close for every open. 9934 */ 9935 if (otyp == OTYP_LYR) { 9936 un->un_layer_count++; 9937 } 9938 9939 /* 9940 * Keep a count of the current # of opens in progress. This is because 9941 * some layered drivers try to call us as a regular open. This can 9942 * cause problems that we cannot prevent, however by keeping this count 9943 * we can at least keep our open and detach routines from racing against 9944 * each other under such conditions. 9945 */ 9946 un->un_opens_in_progress++; 9947 mutex_exit(&sd_detach_mutex); 9948 9949 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9950 part = SDPART(dev); 9951 partmask = 1 << part; 9952 9953 /* 9954 * We use a semaphore here in order to serialize 9955 * open and close requests on the device. 9956 */ 9957 sema_p(&un->un_semoclose); 9958 9959 mutex_enter(SD_MUTEX(un)); 9960 9961 /* 9962 * All device accesses go thru sdstrategy() where we check 9963 * on suspend status but there could be a scsi_poll command, 9964 * which bypasses sdstrategy(), so we need to check pm 9965 * status. 9966 */ 9967 9968 if (!nodelay) { 9969 while ((un->un_state == SD_STATE_SUSPENDED) || 9970 (un->un_state == SD_STATE_PM_CHANGING)) { 9971 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9972 } 9973 9974 mutex_exit(SD_MUTEX(un)); 9975 if (sd_pm_entry(un) != DDI_SUCCESS) { 9976 rval = EIO; 9977 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9978 "sdopen: sd_pm_entry failed\n"); 9979 goto open_failed_with_pm; 9980 } 9981 mutex_enter(SD_MUTEX(un)); 9982 } 9983 9984 /* check for previous exclusive open */ 9985 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9986 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9987 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9988 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9989 9990 if (un->un_exclopen & (partmask)) { 9991 goto excl_open_fail; 9992 } 9993 9994 if (flag & FEXCL) { 9995 int i; 9996 if (un->un_ocmap.lyropen[part]) { 9997 goto excl_open_fail; 9998 } 9999 for (i = 0; i < (OTYPCNT - 1); i++) { 10000 if (un->un_ocmap.regopen[i] & (partmask)) { 10001 goto excl_open_fail; 10002 } 10003 } 10004 } 10005 10006 /* 10007 * Check the write permission if this is a removable media device, 10008 * NDELAY has not been set, and writable permission is requested. 10009 * 10010 * Note: If NDELAY was set and this is write-protected media the WRITE 10011 * attempt will fail with EIO as part of the I/O processing. This is a 10012 * more permissive implementation that allows the open to succeed and 10013 * WRITE attempts to fail when appropriate. 10014 */ 10015 if (un->un_f_chk_wp_open) { 10016 if ((flag & FWRITE) && (!nodelay)) { 10017 mutex_exit(SD_MUTEX(un)); 10018 /* 10019 * Defer the check for write permission on writable 10020 * DVD drive till sdstrategy and will not fail open even 10021 * if FWRITE is set as the device can be writable 10022 * depending upon the media and the media can change 10023 * after the call to open(). 10024 */ 10025 if (un->un_f_dvdram_writable_device == FALSE) { 10026 if (ISCD(un) || sr_check_wp(dev)) { 10027 rval = EROFS; 10028 mutex_enter(SD_MUTEX(un)); 10029 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10030 "write to cd or write protected media\n"); 10031 goto open_fail; 10032 } 10033 } 10034 mutex_enter(SD_MUTEX(un)); 10035 } 10036 } 10037 10038 /* 10039 * If opening in NDELAY/NONBLOCK mode, just return. 10040 * Check if disk is ready and has a valid geometry later. 10041 */ 10042 if (!nodelay) { 10043 sd_ssc_t *ssc; 10044 10045 mutex_exit(SD_MUTEX(un)); 10046 ssc = sd_ssc_init(un); 10047 rval = sd_ready_and_valid(ssc, part); 10048 sd_ssc_fini(ssc); 10049 mutex_enter(SD_MUTEX(un)); 10050 /* 10051 * Fail if device is not ready or if the number of disk 10052 * blocks is zero or negative for non CD devices. 10053 */ 10054 10055 nblks = 0; 10056 10057 if (rval == SD_READY_VALID && (!ISCD(un))) { 10058 /* if cmlb_partinfo fails, nblks remains 0 */ 10059 mutex_exit(SD_MUTEX(un)); 10060 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10061 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10062 mutex_enter(SD_MUTEX(un)); 10063 } 10064 10065 if ((rval != SD_READY_VALID) || 10066 (!ISCD(un) && nblks <= 0)) { 10067 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10068 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10069 "device not ready or invalid disk block value\n"); 10070 goto open_fail; 10071 } 10072 #if defined(__i386) || defined(__amd64) 10073 } else { 10074 uchar_t *cp; 10075 /* 10076 * x86 requires special nodelay handling, so that p0 is 10077 * always defined and accessible. 10078 * Invalidate geometry only if device is not already open. 10079 */ 10080 cp = &un->un_ocmap.chkd[0]; 10081 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10082 if (*cp != (uchar_t)0) { 10083 break; 10084 } 10085 cp++; 10086 } 10087 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10088 mutex_exit(SD_MUTEX(un)); 10089 cmlb_invalidate(un->un_cmlbhandle, 10090 (void *)SD_PATH_DIRECT); 10091 mutex_enter(SD_MUTEX(un)); 10092 } 10093 10094 #endif 10095 } 10096 10097 if (otyp == OTYP_LYR) { 10098 un->un_ocmap.lyropen[part]++; 10099 } else { 10100 un->un_ocmap.regopen[otyp] |= partmask; 10101 } 10102 10103 /* Set up open and exclusive open flags */ 10104 if (flag & FEXCL) { 10105 un->un_exclopen |= (partmask); 10106 } 10107 10108 /* 10109 * If the lun is EFI labeled and lun capacity is greater than the 10110 * capacity contained in the label, log a sys-event to notify the 10111 * interested module. 10112 * To avoid an infinite loop of logging sys-event, we only log the 10113 * event when the lun is not opened in NDELAY mode. The event handler 10114 * should open the lun in NDELAY mode. 10115 */ 10116 if (!(flag & FNDELAY)) { 10117 mutex_exit(SD_MUTEX(un)); 10118 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10119 (void*)SD_PATH_DIRECT) == 0) { 10120 mutex_enter(SD_MUTEX(un)); 10121 if (un->un_f_blockcount_is_valid && 10122 un->un_blockcount > label_cap) { 10123 mutex_exit(SD_MUTEX(un)); 10124 sd_log_lun_expansion_event(un, 10125 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10126 mutex_enter(SD_MUTEX(un)); 10127 } 10128 } else { 10129 mutex_enter(SD_MUTEX(un)); 10130 } 10131 } 10132 10133 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10134 "open of part %d type %d\n", part, otyp); 10135 10136 mutex_exit(SD_MUTEX(un)); 10137 if (!nodelay) { 10138 sd_pm_exit(un); 10139 } 10140 10141 sema_v(&un->un_semoclose); 10142 10143 mutex_enter(&sd_detach_mutex); 10144 un->un_opens_in_progress--; 10145 mutex_exit(&sd_detach_mutex); 10146 10147 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10148 return (DDI_SUCCESS); 10149 10150 excl_open_fail: 10151 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10152 rval = EBUSY; 10153 10154 open_fail: 10155 mutex_exit(SD_MUTEX(un)); 10156 10157 /* 10158 * On a failed open we must exit the pm management. 10159 */ 10160 if (!nodelay) { 10161 sd_pm_exit(un); 10162 } 10163 open_failed_with_pm: 10164 sema_v(&un->un_semoclose); 10165 10166 mutex_enter(&sd_detach_mutex); 10167 un->un_opens_in_progress--; 10168 if (otyp == OTYP_LYR) { 10169 un->un_layer_count--; 10170 } 10171 mutex_exit(&sd_detach_mutex); 10172 10173 return (rval); 10174 } 10175 10176 10177 /* 10178 * Function: sdclose 10179 * 10180 * Description: Driver's close(9e) entry point function. 10181 * 10182 * Arguments: dev - device number 10183 * flag - file status flag, informational only 10184 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10185 * cred_p - user credential pointer 10186 * 10187 * Return Code: ENXIO 10188 * 10189 * Context: Kernel thread context 10190 */ 10191 /* ARGSUSED */ 10192 static int 10193 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10194 { 10195 struct sd_lun *un; 10196 uchar_t *cp; 10197 int part; 10198 int nodelay; 10199 int rval = 0; 10200 10201 /* Validate the open type */ 10202 if (otyp >= OTYPCNT) { 10203 return (ENXIO); 10204 } 10205 10206 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10207 return (ENXIO); 10208 } 10209 10210 part = SDPART(dev); 10211 nodelay = flag & (FNDELAY | FNONBLOCK); 10212 10213 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10214 "sdclose: close of part %d type %d\n", part, otyp); 10215 10216 /* 10217 * We use a semaphore here in order to serialize 10218 * open and close requests on the device. 10219 */ 10220 sema_p(&un->un_semoclose); 10221 10222 mutex_enter(SD_MUTEX(un)); 10223 10224 /* Don't proceed if power is being changed. */ 10225 while (un->un_state == SD_STATE_PM_CHANGING) { 10226 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10227 } 10228 10229 if (un->un_exclopen & (1 << part)) { 10230 un->un_exclopen &= ~(1 << part); 10231 } 10232 10233 /* Update the open partition map */ 10234 if (otyp == OTYP_LYR) { 10235 un->un_ocmap.lyropen[part] -= 1; 10236 } else { 10237 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10238 } 10239 10240 cp = &un->un_ocmap.chkd[0]; 10241 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10242 if (*cp != NULL) { 10243 break; 10244 } 10245 cp++; 10246 } 10247 10248 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10249 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10250 10251 /* 10252 * We avoid persistance upon the last close, and set 10253 * the throttle back to the maximum. 10254 */ 10255 un->un_throttle = un->un_saved_throttle; 10256 10257 if (un->un_state == SD_STATE_OFFLINE) { 10258 if (un->un_f_is_fibre == FALSE) { 10259 scsi_log(SD_DEVINFO(un), sd_label, 10260 CE_WARN, "offline\n"); 10261 } 10262 mutex_exit(SD_MUTEX(un)); 10263 cmlb_invalidate(un->un_cmlbhandle, 10264 (void *)SD_PATH_DIRECT); 10265 mutex_enter(SD_MUTEX(un)); 10266 10267 } else { 10268 /* 10269 * Flush any outstanding writes in NVRAM cache. 10270 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10271 * cmd, it may not work for non-Pluto devices. 10272 * SYNCHRONIZE CACHE is not required for removables, 10273 * except DVD-RAM drives. 10274 * 10275 * Also note: because SYNCHRONIZE CACHE is currently 10276 * the only command issued here that requires the 10277 * drive be powered up, only do the power up before 10278 * sending the Sync Cache command. If additional 10279 * commands are added which require a powered up 10280 * drive, the following sequence may have to change. 10281 * 10282 * And finally, note that parallel SCSI on SPARC 10283 * only issues a Sync Cache to DVD-RAM, a newly 10284 * supported device. 10285 */ 10286 #if defined(__i386) || defined(__amd64) 10287 if ((un->un_f_sync_cache_supported && 10288 un->un_f_sync_cache_required) || 10289 un->un_f_dvdram_writable_device == TRUE) { 10290 #else 10291 if (un->un_f_dvdram_writable_device == TRUE) { 10292 #endif 10293 mutex_exit(SD_MUTEX(un)); 10294 if (sd_pm_entry(un) == DDI_SUCCESS) { 10295 rval = 10296 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10297 NULL); 10298 /* ignore error if not supported */ 10299 if (rval == ENOTSUP) { 10300 rval = 0; 10301 } else if (rval != 0) { 10302 rval = EIO; 10303 } 10304 sd_pm_exit(un); 10305 } else { 10306 rval = EIO; 10307 } 10308 mutex_enter(SD_MUTEX(un)); 10309 } 10310 10311 /* 10312 * For devices which supports DOOR_LOCK, send an ALLOW 10313 * MEDIA REMOVAL command, but don't get upset if it 10314 * fails. We need to raise the power of the drive before 10315 * we can call sd_send_scsi_DOORLOCK() 10316 */ 10317 if (un->un_f_doorlock_supported) { 10318 mutex_exit(SD_MUTEX(un)); 10319 if (sd_pm_entry(un) == DDI_SUCCESS) { 10320 sd_ssc_t *ssc; 10321 10322 ssc = sd_ssc_init(un); 10323 rval = sd_send_scsi_DOORLOCK(ssc, 10324 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10325 if (rval != 0) 10326 sd_ssc_assessment(ssc, 10327 SD_FMT_IGNORE); 10328 sd_ssc_fini(ssc); 10329 10330 sd_pm_exit(un); 10331 if (ISCD(un) && (rval != 0) && 10332 (nodelay != 0)) { 10333 rval = ENXIO; 10334 } 10335 } else { 10336 rval = EIO; 10337 } 10338 mutex_enter(SD_MUTEX(un)); 10339 } 10340 10341 /* 10342 * If a device has removable media, invalidate all 10343 * parameters related to media, such as geometry, 10344 * blocksize, and blockcount. 10345 */ 10346 if (un->un_f_has_removable_media) { 10347 sr_ejected(un); 10348 } 10349 10350 /* 10351 * Destroy the cache (if it exists) which was 10352 * allocated for the write maps since this is 10353 * the last close for this media. 10354 */ 10355 if (un->un_wm_cache) { 10356 /* 10357 * Check if there are pending commands. 10358 * and if there are give a warning and 10359 * do not destroy the cache. 10360 */ 10361 if (un->un_ncmds_in_driver > 0) { 10362 scsi_log(SD_DEVINFO(un), 10363 sd_label, CE_WARN, 10364 "Unable to clean up memory " 10365 "because of pending I/O\n"); 10366 } else { 10367 kmem_cache_destroy( 10368 un->un_wm_cache); 10369 un->un_wm_cache = NULL; 10370 } 10371 } 10372 } 10373 } 10374 10375 mutex_exit(SD_MUTEX(un)); 10376 sema_v(&un->un_semoclose); 10377 10378 if (otyp == OTYP_LYR) { 10379 mutex_enter(&sd_detach_mutex); 10380 /* 10381 * The detach routine may run when the layer count 10382 * drops to zero. 10383 */ 10384 un->un_layer_count--; 10385 mutex_exit(&sd_detach_mutex); 10386 } 10387 10388 return (rval); 10389 } 10390 10391 10392 /* 10393 * Function: sd_ready_and_valid 10394 * 10395 * Description: Test if device is ready and has a valid geometry. 10396 * 10397 * Arguments: ssc - sd_ssc_t will contain un 10398 * un - driver soft state (unit) structure 10399 * 10400 * Return Code: SD_READY_VALID ready and valid label 10401 * SD_NOT_READY_VALID not ready, no label 10402 * SD_RESERVED_BY_OTHERS reservation conflict 10403 * 10404 * Context: Never called at interrupt context. 10405 */ 10406 10407 static int 10408 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10409 { 10410 struct sd_errstats *stp; 10411 uint64_t capacity; 10412 uint_t lbasize; 10413 int rval = SD_READY_VALID; 10414 char name_str[48]; 10415 boolean_t is_valid; 10416 struct sd_lun *un; 10417 int status; 10418 10419 ASSERT(ssc != NULL); 10420 un = ssc->ssc_un; 10421 ASSERT(un != NULL); 10422 ASSERT(!mutex_owned(SD_MUTEX(un))); 10423 10424 mutex_enter(SD_MUTEX(un)); 10425 /* 10426 * If a device has removable media, we must check if media is 10427 * ready when checking if this device is ready and valid. 10428 */ 10429 if (un->un_f_has_removable_media) { 10430 mutex_exit(SD_MUTEX(un)); 10431 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10432 10433 if (status != 0) { 10434 rval = SD_NOT_READY_VALID; 10435 mutex_enter(SD_MUTEX(un)); 10436 10437 /* Ignore all failed status for removalbe media */ 10438 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10439 10440 goto done; 10441 } 10442 10443 is_valid = SD_IS_VALID_LABEL(un); 10444 mutex_enter(SD_MUTEX(un)); 10445 if (!is_valid || 10446 (un->un_f_blockcount_is_valid == FALSE) || 10447 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10448 10449 /* capacity has to be read every open. */ 10450 mutex_exit(SD_MUTEX(un)); 10451 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10452 &lbasize, SD_PATH_DIRECT); 10453 10454 if (status != 0) { 10455 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10456 10457 cmlb_invalidate(un->un_cmlbhandle, 10458 (void *)SD_PATH_DIRECT); 10459 mutex_enter(SD_MUTEX(un)); 10460 rval = SD_NOT_READY_VALID; 10461 10462 goto done; 10463 } else { 10464 mutex_enter(SD_MUTEX(un)); 10465 sd_update_block_info(un, lbasize, capacity); 10466 } 10467 } 10468 10469 /* 10470 * Check if the media in the device is writable or not. 10471 */ 10472 if (!is_valid && ISCD(un)) { 10473 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10474 } 10475 10476 } else { 10477 /* 10478 * Do a test unit ready to clear any unit attention from non-cd 10479 * devices. 10480 */ 10481 mutex_exit(SD_MUTEX(un)); 10482 10483 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10484 if (status != 0) { 10485 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10486 } 10487 10488 mutex_enter(SD_MUTEX(un)); 10489 } 10490 10491 10492 /* 10493 * If this is a non 512 block device, allocate space for 10494 * the wmap cache. This is being done here since every time 10495 * a media is changed this routine will be called and the 10496 * block size is a function of media rather than device. 10497 */ 10498 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10499 un->un_f_non_devbsize_supported) && 10500 un->un_tgt_blocksize != DEV_BSIZE) { 10501 if (!(un->un_wm_cache)) { 10502 (void) snprintf(name_str, sizeof (name_str), 10503 "%s%d_cache", 10504 ddi_driver_name(SD_DEVINFO(un)), 10505 ddi_get_instance(SD_DEVINFO(un))); 10506 un->un_wm_cache = kmem_cache_create( 10507 name_str, sizeof (struct sd_w_map), 10508 8, sd_wm_cache_constructor, 10509 sd_wm_cache_destructor, NULL, 10510 (void *)un, NULL, 0); 10511 if (!(un->un_wm_cache)) { 10512 rval = ENOMEM; 10513 goto done; 10514 } 10515 } 10516 } 10517 10518 if (un->un_state == SD_STATE_NORMAL) { 10519 /* 10520 * If the target is not yet ready here (defined by a TUR 10521 * failure), invalidate the geometry and print an 'offline' 10522 * message. This is a legacy message, as the state of the 10523 * target is not actually changed to SD_STATE_OFFLINE. 10524 * 10525 * If the TUR fails for EACCES (Reservation Conflict), 10526 * SD_RESERVED_BY_OTHERS will be returned to indicate 10527 * reservation conflict. If the TUR fails for other 10528 * reasons, SD_NOT_READY_VALID will be returned. 10529 */ 10530 int err; 10531 10532 mutex_exit(SD_MUTEX(un)); 10533 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10534 mutex_enter(SD_MUTEX(un)); 10535 10536 if (err != 0) { 10537 mutex_exit(SD_MUTEX(un)); 10538 cmlb_invalidate(un->un_cmlbhandle, 10539 (void *)SD_PATH_DIRECT); 10540 mutex_enter(SD_MUTEX(un)); 10541 if (err == EACCES) { 10542 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10543 "reservation conflict\n"); 10544 rval = SD_RESERVED_BY_OTHERS; 10545 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10546 } else { 10547 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10548 "drive offline\n"); 10549 rval = SD_NOT_READY_VALID; 10550 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10551 } 10552 goto done; 10553 } 10554 } 10555 10556 if (un->un_f_format_in_progress == FALSE) { 10557 mutex_exit(SD_MUTEX(un)); 10558 10559 (void) cmlb_validate(un->un_cmlbhandle, 0, 10560 (void *)SD_PATH_DIRECT); 10561 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10562 NULL, (void *) SD_PATH_DIRECT) != 0) { 10563 rval = SD_NOT_READY_VALID; 10564 mutex_enter(SD_MUTEX(un)); 10565 10566 goto done; 10567 } 10568 if (un->un_f_pkstats_enabled) { 10569 sd_set_pstats(un); 10570 SD_TRACE(SD_LOG_IO_PARTITION, un, 10571 "sd_ready_and_valid: un:0x%p pstats created and " 10572 "set\n", un); 10573 } 10574 mutex_enter(SD_MUTEX(un)); 10575 } 10576 10577 /* 10578 * If this device supports DOOR_LOCK command, try and send 10579 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10580 * if it fails. For a CD, however, it is an error 10581 */ 10582 if (un->un_f_doorlock_supported) { 10583 mutex_exit(SD_MUTEX(un)); 10584 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10585 SD_PATH_DIRECT); 10586 10587 if ((status != 0) && ISCD(un)) { 10588 rval = SD_NOT_READY_VALID; 10589 mutex_enter(SD_MUTEX(un)); 10590 10591 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10592 10593 goto done; 10594 } else if (status != 0) 10595 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10596 mutex_enter(SD_MUTEX(un)); 10597 } 10598 10599 /* The state has changed, inform the media watch routines */ 10600 un->un_mediastate = DKIO_INSERTED; 10601 cv_broadcast(&un->un_state_cv); 10602 rval = SD_READY_VALID; 10603 10604 done: 10605 10606 /* 10607 * Initialize the capacity kstat value, if no media previously 10608 * (capacity kstat is 0) and a media has been inserted 10609 * (un_blockcount > 0). 10610 */ 10611 if (un->un_errstats != NULL) { 10612 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10613 if ((stp->sd_capacity.value.ui64 == 0) && 10614 (un->un_f_blockcount_is_valid == TRUE)) { 10615 stp->sd_capacity.value.ui64 = 10616 (uint64_t)((uint64_t)un->un_blockcount * 10617 un->un_sys_blocksize); 10618 } 10619 } 10620 10621 mutex_exit(SD_MUTEX(un)); 10622 return (rval); 10623 } 10624 10625 10626 /* 10627 * Function: sdmin 10628 * 10629 * Description: Routine to limit the size of a data transfer. Used in 10630 * conjunction with physio(9F). 10631 * 10632 * Arguments: bp - pointer to the indicated buf(9S) struct. 10633 * 10634 * Context: Kernel thread context. 10635 */ 10636 10637 static void 10638 sdmin(struct buf *bp) 10639 { 10640 struct sd_lun *un; 10641 int instance; 10642 10643 instance = SDUNIT(bp->b_edev); 10644 10645 un = ddi_get_soft_state(sd_state, instance); 10646 ASSERT(un != NULL); 10647 10648 /* 10649 * We depend on DMA partial or buf breakup to restrict 10650 * IO size if any of them enabled. 10651 */ 10652 if (un->un_partial_dma_supported || 10653 un->un_buf_breakup_supported) { 10654 return; 10655 } 10656 10657 if (bp->b_bcount > un->un_max_xfer_size) { 10658 bp->b_bcount = un->un_max_xfer_size; 10659 } 10660 } 10661 10662 10663 /* 10664 * Function: sdread 10665 * 10666 * Description: Driver's read(9e) entry point function. 10667 * 10668 * Arguments: dev - device number 10669 * uio - structure pointer describing where data is to be stored 10670 * in user's space 10671 * cred_p - user credential pointer 10672 * 10673 * Return Code: ENXIO 10674 * EIO 10675 * EINVAL 10676 * value returned by physio 10677 * 10678 * Context: Kernel thread context. 10679 */ 10680 /* ARGSUSED */ 10681 static int 10682 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10683 { 10684 struct sd_lun *un = NULL; 10685 int secmask; 10686 int err = 0; 10687 sd_ssc_t *ssc; 10688 10689 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10690 return (ENXIO); 10691 } 10692 10693 ASSERT(!mutex_owned(SD_MUTEX(un))); 10694 10695 10696 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10697 mutex_enter(SD_MUTEX(un)); 10698 /* 10699 * Because the call to sd_ready_and_valid will issue I/O we 10700 * must wait here if either the device is suspended or 10701 * if it's power level is changing. 10702 */ 10703 while ((un->un_state == SD_STATE_SUSPENDED) || 10704 (un->un_state == SD_STATE_PM_CHANGING)) { 10705 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10706 } 10707 un->un_ncmds_in_driver++; 10708 mutex_exit(SD_MUTEX(un)); 10709 10710 /* Initialize sd_ssc_t for internal uscsi commands */ 10711 ssc = sd_ssc_init(un); 10712 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10713 err = EIO; 10714 } else { 10715 err = 0; 10716 } 10717 sd_ssc_fini(ssc); 10718 10719 mutex_enter(SD_MUTEX(un)); 10720 un->un_ncmds_in_driver--; 10721 ASSERT(un->un_ncmds_in_driver >= 0); 10722 mutex_exit(SD_MUTEX(un)); 10723 if (err != 0) 10724 return (err); 10725 } 10726 10727 /* 10728 * Read requests are restricted to multiples of the system block size. 10729 */ 10730 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10731 secmask = un->un_tgt_blocksize - 1; 10732 else 10733 secmask = DEV_BSIZE - 1; 10734 10735 if (uio->uio_loffset & ((offset_t)(secmask))) { 10736 SD_ERROR(SD_LOG_READ_WRITE, un, 10737 "sdread: file offset not modulo %d\n", 10738 secmask + 1); 10739 err = EINVAL; 10740 } else if (uio->uio_iov->iov_len & (secmask)) { 10741 SD_ERROR(SD_LOG_READ_WRITE, un, 10742 "sdread: transfer length not modulo %d\n", 10743 secmask + 1); 10744 err = EINVAL; 10745 } else { 10746 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10747 } 10748 10749 return (err); 10750 } 10751 10752 10753 /* 10754 * Function: sdwrite 10755 * 10756 * Description: Driver's write(9e) entry point function. 10757 * 10758 * Arguments: dev - device number 10759 * uio - structure pointer describing where data is stored in 10760 * user's space 10761 * cred_p - user credential pointer 10762 * 10763 * Return Code: ENXIO 10764 * EIO 10765 * EINVAL 10766 * value returned by physio 10767 * 10768 * Context: Kernel thread context. 10769 */ 10770 /* ARGSUSED */ 10771 static int 10772 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10773 { 10774 struct sd_lun *un = NULL; 10775 int secmask; 10776 int err = 0; 10777 sd_ssc_t *ssc; 10778 10779 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10780 return (ENXIO); 10781 } 10782 10783 ASSERT(!mutex_owned(SD_MUTEX(un))); 10784 10785 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10786 mutex_enter(SD_MUTEX(un)); 10787 /* 10788 * Because the call to sd_ready_and_valid will issue I/O we 10789 * must wait here if either the device is suspended or 10790 * if it's power level is changing. 10791 */ 10792 while ((un->un_state == SD_STATE_SUSPENDED) || 10793 (un->un_state == SD_STATE_PM_CHANGING)) { 10794 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10795 } 10796 un->un_ncmds_in_driver++; 10797 mutex_exit(SD_MUTEX(un)); 10798 10799 /* Initialize sd_ssc_t for internal uscsi commands */ 10800 ssc = sd_ssc_init(un); 10801 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10802 err = EIO; 10803 } else { 10804 err = 0; 10805 } 10806 sd_ssc_fini(ssc); 10807 10808 mutex_enter(SD_MUTEX(un)); 10809 un->un_ncmds_in_driver--; 10810 ASSERT(un->un_ncmds_in_driver >= 0); 10811 mutex_exit(SD_MUTEX(un)); 10812 if (err != 0) 10813 return (err); 10814 } 10815 10816 /* 10817 * Write requests are restricted to multiples of the system block size. 10818 */ 10819 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10820 secmask = un->un_tgt_blocksize - 1; 10821 else 10822 secmask = DEV_BSIZE - 1; 10823 10824 if (uio->uio_loffset & ((offset_t)(secmask))) { 10825 SD_ERROR(SD_LOG_READ_WRITE, un, 10826 "sdwrite: file offset not modulo %d\n", 10827 secmask + 1); 10828 err = EINVAL; 10829 } else if (uio->uio_iov->iov_len & (secmask)) { 10830 SD_ERROR(SD_LOG_READ_WRITE, un, 10831 "sdwrite: transfer length not modulo %d\n", 10832 secmask + 1); 10833 err = EINVAL; 10834 } else { 10835 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10836 } 10837 10838 return (err); 10839 } 10840 10841 10842 /* 10843 * Function: sdaread 10844 * 10845 * Description: Driver's aread(9e) entry point function. 10846 * 10847 * Arguments: dev - device number 10848 * aio - structure pointer describing where data is to be stored 10849 * cred_p - user credential pointer 10850 * 10851 * Return Code: ENXIO 10852 * EIO 10853 * EINVAL 10854 * value returned by aphysio 10855 * 10856 * Context: Kernel thread context. 10857 */ 10858 /* ARGSUSED */ 10859 static int 10860 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10861 { 10862 struct sd_lun *un = NULL; 10863 struct uio *uio = aio->aio_uio; 10864 int secmask; 10865 int err = 0; 10866 sd_ssc_t *ssc; 10867 10868 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10869 return (ENXIO); 10870 } 10871 10872 ASSERT(!mutex_owned(SD_MUTEX(un))); 10873 10874 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10875 mutex_enter(SD_MUTEX(un)); 10876 /* 10877 * Because the call to sd_ready_and_valid will issue I/O we 10878 * must wait here if either the device is suspended or 10879 * if it's power level is changing. 10880 */ 10881 while ((un->un_state == SD_STATE_SUSPENDED) || 10882 (un->un_state == SD_STATE_PM_CHANGING)) { 10883 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10884 } 10885 un->un_ncmds_in_driver++; 10886 mutex_exit(SD_MUTEX(un)); 10887 10888 /* Initialize sd_ssc_t for internal uscsi commands */ 10889 ssc = sd_ssc_init(un); 10890 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10891 err = EIO; 10892 } else { 10893 err = 0; 10894 } 10895 sd_ssc_fini(ssc); 10896 10897 mutex_enter(SD_MUTEX(un)); 10898 un->un_ncmds_in_driver--; 10899 ASSERT(un->un_ncmds_in_driver >= 0); 10900 mutex_exit(SD_MUTEX(un)); 10901 if (err != 0) 10902 return (err); 10903 } 10904 10905 /* 10906 * Read requests are restricted to multiples of the system block size. 10907 */ 10908 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10909 secmask = un->un_tgt_blocksize - 1; 10910 else 10911 secmask = DEV_BSIZE - 1; 10912 10913 if (uio->uio_loffset & ((offset_t)(secmask))) { 10914 SD_ERROR(SD_LOG_READ_WRITE, un, 10915 "sdaread: file offset not modulo %d\n", 10916 secmask + 1); 10917 err = EINVAL; 10918 } else if (uio->uio_iov->iov_len & (secmask)) { 10919 SD_ERROR(SD_LOG_READ_WRITE, un, 10920 "sdaread: transfer length not modulo %d\n", 10921 secmask + 1); 10922 err = EINVAL; 10923 } else { 10924 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10925 } 10926 10927 return (err); 10928 } 10929 10930 10931 /* 10932 * Function: sdawrite 10933 * 10934 * Description: Driver's awrite(9e) entry point function. 10935 * 10936 * Arguments: dev - device number 10937 * aio - structure pointer describing where data is stored 10938 * cred_p - user credential pointer 10939 * 10940 * Return Code: ENXIO 10941 * EIO 10942 * EINVAL 10943 * value returned by aphysio 10944 * 10945 * Context: Kernel thread context. 10946 */ 10947 /* ARGSUSED */ 10948 static int 10949 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10950 { 10951 struct sd_lun *un = NULL; 10952 struct uio *uio = aio->aio_uio; 10953 int secmask; 10954 int err = 0; 10955 sd_ssc_t *ssc; 10956 10957 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10958 return (ENXIO); 10959 } 10960 10961 ASSERT(!mutex_owned(SD_MUTEX(un))); 10962 10963 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10964 mutex_enter(SD_MUTEX(un)); 10965 /* 10966 * Because the call to sd_ready_and_valid will issue I/O we 10967 * must wait here if either the device is suspended or 10968 * if it's power level is changing. 10969 */ 10970 while ((un->un_state == SD_STATE_SUSPENDED) || 10971 (un->un_state == SD_STATE_PM_CHANGING)) { 10972 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10973 } 10974 un->un_ncmds_in_driver++; 10975 mutex_exit(SD_MUTEX(un)); 10976 10977 /* Initialize sd_ssc_t for internal uscsi commands */ 10978 ssc = sd_ssc_init(un); 10979 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10980 err = EIO; 10981 } else { 10982 err = 0; 10983 } 10984 sd_ssc_fini(ssc); 10985 10986 mutex_enter(SD_MUTEX(un)); 10987 un->un_ncmds_in_driver--; 10988 ASSERT(un->un_ncmds_in_driver >= 0); 10989 mutex_exit(SD_MUTEX(un)); 10990 if (err != 0) 10991 return (err); 10992 } 10993 10994 /* 10995 * Write requests are restricted to multiples of the system block size. 10996 */ 10997 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10998 secmask = un->un_tgt_blocksize - 1; 10999 else 11000 secmask = DEV_BSIZE - 1; 11001 11002 if (uio->uio_loffset & ((offset_t)(secmask))) { 11003 SD_ERROR(SD_LOG_READ_WRITE, un, 11004 "sdawrite: file offset not modulo %d\n", 11005 secmask + 1); 11006 err = EINVAL; 11007 } else if (uio->uio_iov->iov_len & (secmask)) { 11008 SD_ERROR(SD_LOG_READ_WRITE, un, 11009 "sdawrite: transfer length not modulo %d\n", 11010 secmask + 1); 11011 err = EINVAL; 11012 } else { 11013 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11014 } 11015 11016 return (err); 11017 } 11018 11019 11020 11021 11022 11023 /* 11024 * Driver IO processing follows the following sequence: 11025 * 11026 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11027 * | | ^ 11028 * v v | 11029 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11030 * | | | | 11031 * v | | | 11032 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11033 * | | ^ ^ 11034 * v v | | 11035 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11036 * | | | | 11037 * +---+ | +------------+ +-------+ 11038 * | | | | 11039 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11040 * | v | | 11041 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11042 * | | ^ | 11043 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11044 * | v | | 11045 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11046 * | | ^ | 11047 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11048 * | v | | 11049 * | sd_checksum_iostart() sd_checksum_iodone() | 11050 * | | ^ | 11051 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11052 * | v | | 11053 * | sd_pm_iostart() sd_pm_iodone() | 11054 * | | ^ | 11055 * | | | | 11056 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11057 * | ^ 11058 * v | 11059 * sd_core_iostart() | 11060 * | | 11061 * | +------>(*destroypkt)() 11062 * +-> sd_start_cmds() <-+ | | 11063 * | | | v 11064 * | | | scsi_destroy_pkt(9F) 11065 * | | | 11066 * +->(*initpkt)() +- sdintr() 11067 * | | | | 11068 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11069 * | +-> scsi_setup_cdb(9F) | 11070 * | | 11071 * +--> scsi_transport(9F) | 11072 * | | 11073 * +----> SCSA ---->+ 11074 * 11075 * 11076 * This code is based upon the following presumptions: 11077 * 11078 * - iostart and iodone functions operate on buf(9S) structures. These 11079 * functions perform the necessary operations on the buf(9S) and pass 11080 * them along to the next function in the chain by using the macros 11081 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11082 * (for iodone side functions). 11083 * 11084 * - The iostart side functions may sleep. The iodone side functions 11085 * are called under interrupt context and may NOT sleep. Therefore 11086 * iodone side functions also may not call iostart side functions. 11087 * (NOTE: iostart side functions should NOT sleep for memory, as 11088 * this could result in deadlock.) 11089 * 11090 * - An iostart side function may call its corresponding iodone side 11091 * function directly (if necessary). 11092 * 11093 * - In the event of an error, an iostart side function can return a buf(9S) 11094 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11095 * b_error in the usual way of course). 11096 * 11097 * - The taskq mechanism may be used by the iodone side functions to dispatch 11098 * requests to the iostart side functions. The iostart side functions in 11099 * this case would be called under the context of a taskq thread, so it's 11100 * OK for them to block/sleep/spin in this case. 11101 * 11102 * - iostart side functions may allocate "shadow" buf(9S) structs and 11103 * pass them along to the next function in the chain. The corresponding 11104 * iodone side functions must coalesce the "shadow" bufs and return 11105 * the "original" buf to the next higher layer. 11106 * 11107 * - The b_private field of the buf(9S) struct holds a pointer to 11108 * an sd_xbuf struct, which contains information needed to 11109 * construct the scsi_pkt for the command. 11110 * 11111 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11112 * layer must acquire & release the SD_MUTEX(un) as needed. 11113 */ 11114 11115 11116 /* 11117 * Create taskq for all targets in the system. This is created at 11118 * _init(9E) and destroyed at _fini(9E). 11119 * 11120 * Note: here we set the minalloc to a reasonably high number to ensure that 11121 * we will have an adequate supply of task entries available at interrupt time. 11122 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11123 * sd_create_taskq(). Since we do not want to sleep for allocations at 11124 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11125 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11126 * requests any one instant in time. 11127 */ 11128 #define SD_TASKQ_NUMTHREADS 8 11129 #define SD_TASKQ_MINALLOC 256 11130 #define SD_TASKQ_MAXALLOC 256 11131 11132 static taskq_t *sd_tq = NULL; 11133 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11134 11135 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11136 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11137 11138 /* 11139 * The following task queue is being created for the write part of 11140 * read-modify-write of non-512 block size devices. 11141 * Limit the number of threads to 1 for now. This number has been chosen 11142 * considering the fact that it applies only to dvd ram drives/MO drives 11143 * currently. Performance for which is not main criteria at this stage. 11144 * Note: It needs to be explored if we can use a single taskq in future 11145 */ 11146 #define SD_WMR_TASKQ_NUMTHREADS 1 11147 static taskq_t *sd_wmr_tq = NULL; 11148 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11149 11150 /* 11151 * Function: sd_taskq_create 11152 * 11153 * Description: Create taskq thread(s) and preallocate task entries 11154 * 11155 * Return Code: Returns a pointer to the allocated taskq_t. 11156 * 11157 * Context: Can sleep. Requires blockable context. 11158 * 11159 * Notes: - The taskq() facility currently is NOT part of the DDI. 11160 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11161 * - taskq_create() will block for memory, also it will panic 11162 * if it cannot create the requested number of threads. 11163 * - Currently taskq_create() creates threads that cannot be 11164 * swapped. 11165 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11166 * supply of taskq entries at interrupt time (ie, so that we 11167 * do not have to sleep for memory) 11168 */ 11169 11170 static void 11171 sd_taskq_create(void) 11172 { 11173 char taskq_name[TASKQ_NAMELEN]; 11174 11175 ASSERT(sd_tq == NULL); 11176 ASSERT(sd_wmr_tq == NULL); 11177 11178 (void) snprintf(taskq_name, sizeof (taskq_name), 11179 "%s_drv_taskq", sd_label); 11180 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11181 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11182 TASKQ_PREPOPULATE)); 11183 11184 (void) snprintf(taskq_name, sizeof (taskq_name), 11185 "%s_rmw_taskq", sd_label); 11186 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11187 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11188 TASKQ_PREPOPULATE)); 11189 } 11190 11191 11192 /* 11193 * Function: sd_taskq_delete 11194 * 11195 * Description: Complementary cleanup routine for sd_taskq_create(). 11196 * 11197 * Context: Kernel thread context. 11198 */ 11199 11200 static void 11201 sd_taskq_delete(void) 11202 { 11203 ASSERT(sd_tq != NULL); 11204 ASSERT(sd_wmr_tq != NULL); 11205 taskq_destroy(sd_tq); 11206 taskq_destroy(sd_wmr_tq); 11207 sd_tq = NULL; 11208 sd_wmr_tq = NULL; 11209 } 11210 11211 11212 /* 11213 * Function: sdstrategy 11214 * 11215 * Description: Driver's strategy (9E) entry point function. 11216 * 11217 * Arguments: bp - pointer to buf(9S) 11218 * 11219 * Return Code: Always returns zero 11220 * 11221 * Context: Kernel thread context. 11222 */ 11223 11224 static int 11225 sdstrategy(struct buf *bp) 11226 { 11227 struct sd_lun *un; 11228 11229 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11230 if (un == NULL) { 11231 bioerror(bp, EIO); 11232 bp->b_resid = bp->b_bcount; 11233 biodone(bp); 11234 return (0); 11235 } 11236 11237 /* As was done in the past, fail new cmds. if state is dumping. */ 11238 if (un->un_state == SD_STATE_DUMPING) { 11239 bioerror(bp, ENXIO); 11240 bp->b_resid = bp->b_bcount; 11241 biodone(bp); 11242 return (0); 11243 } 11244 11245 ASSERT(!mutex_owned(SD_MUTEX(un))); 11246 11247 /* 11248 * Commands may sneak in while we released the mutex in 11249 * DDI_SUSPEND, we should block new commands. However, old 11250 * commands that are still in the driver at this point should 11251 * still be allowed to drain. 11252 */ 11253 mutex_enter(SD_MUTEX(un)); 11254 /* 11255 * Must wait here if either the device is suspended or 11256 * if it's power level is changing. 11257 */ 11258 while ((un->un_state == SD_STATE_SUSPENDED) || 11259 (un->un_state == SD_STATE_PM_CHANGING)) { 11260 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11261 } 11262 11263 un->un_ncmds_in_driver++; 11264 11265 /* 11266 * atapi: Since we are running the CD for now in PIO mode we need to 11267 * call bp_mapin here to avoid bp_mapin called interrupt context under 11268 * the HBA's init_pkt routine. 11269 */ 11270 if (un->un_f_cfg_is_atapi == TRUE) { 11271 mutex_exit(SD_MUTEX(un)); 11272 bp_mapin(bp); 11273 mutex_enter(SD_MUTEX(un)); 11274 } 11275 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11276 un->un_ncmds_in_driver); 11277 11278 if (bp->b_flags & B_WRITE) 11279 un->un_f_sync_cache_required = TRUE; 11280 11281 mutex_exit(SD_MUTEX(un)); 11282 11283 /* 11284 * This will (eventually) allocate the sd_xbuf area and 11285 * call sd_xbuf_strategy(). We just want to return the 11286 * result of ddi_xbuf_qstrategy so that we have an opt- 11287 * imized tail call which saves us a stack frame. 11288 */ 11289 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11290 } 11291 11292 11293 /* 11294 * Function: sd_xbuf_strategy 11295 * 11296 * Description: Function for initiating IO operations via the 11297 * ddi_xbuf_qstrategy() mechanism. 11298 * 11299 * Context: Kernel thread context. 11300 */ 11301 11302 static void 11303 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11304 { 11305 struct sd_lun *un = arg; 11306 11307 ASSERT(bp != NULL); 11308 ASSERT(xp != NULL); 11309 ASSERT(un != NULL); 11310 ASSERT(!mutex_owned(SD_MUTEX(un))); 11311 11312 /* 11313 * Initialize the fields in the xbuf and save a pointer to the 11314 * xbuf in bp->b_private. 11315 */ 11316 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11317 11318 /* Send the buf down the iostart chain */ 11319 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11320 } 11321 11322 11323 /* 11324 * Function: sd_xbuf_init 11325 * 11326 * Description: Prepare the given sd_xbuf struct for use. 11327 * 11328 * Arguments: un - ptr to softstate 11329 * bp - ptr to associated buf(9S) 11330 * xp - ptr to associated sd_xbuf 11331 * chain_type - IO chain type to use: 11332 * SD_CHAIN_NULL 11333 * SD_CHAIN_BUFIO 11334 * SD_CHAIN_USCSI 11335 * SD_CHAIN_DIRECT 11336 * SD_CHAIN_DIRECT_PRIORITY 11337 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11338 * initialization; may be NULL if none. 11339 * 11340 * Context: Kernel thread context 11341 */ 11342 11343 static void 11344 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11345 uchar_t chain_type, void *pktinfop) 11346 { 11347 int index; 11348 11349 ASSERT(un != NULL); 11350 ASSERT(bp != NULL); 11351 ASSERT(xp != NULL); 11352 11353 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11354 bp, chain_type); 11355 11356 xp->xb_un = un; 11357 xp->xb_pktp = NULL; 11358 xp->xb_pktinfo = pktinfop; 11359 xp->xb_private = bp->b_private; 11360 xp->xb_blkno = (daddr_t)bp->b_blkno; 11361 11362 /* 11363 * Set up the iostart and iodone chain indexes in the xbuf, based 11364 * upon the specified chain type to use. 11365 */ 11366 switch (chain_type) { 11367 case SD_CHAIN_NULL: 11368 /* 11369 * Fall thru to just use the values for the buf type, even 11370 * tho for the NULL chain these values will never be used. 11371 */ 11372 /* FALLTHRU */ 11373 case SD_CHAIN_BUFIO: 11374 index = un->un_buf_chain_type; 11375 if ((!un->un_f_has_removable_media) && 11376 (un->un_tgt_blocksize != 0) && 11377 (un->un_tgt_blocksize != DEV_BSIZE)) { 11378 int secmask = 0, blknomask = 0; 11379 blknomask = 11380 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11381 secmask = un->un_tgt_blocksize - 1; 11382 11383 if ((bp->b_lblkno & (blknomask)) || 11384 (bp->b_bcount & (secmask))) { 11385 if (un->un_f_rmw_type != 11386 SD_RMW_TYPE_RETURN_ERROR) { 11387 if (un->un_f_pm_is_enabled == FALSE) 11388 index = 11389 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11390 else 11391 index = 11392 SD_CHAIN_INFO_MSS_DISK; 11393 } 11394 } 11395 } 11396 break; 11397 case SD_CHAIN_USCSI: 11398 index = un->un_uscsi_chain_type; 11399 break; 11400 case SD_CHAIN_DIRECT: 11401 index = un->un_direct_chain_type; 11402 break; 11403 case SD_CHAIN_DIRECT_PRIORITY: 11404 index = un->un_priority_chain_type; 11405 break; 11406 default: 11407 /* We're really broken if we ever get here... */ 11408 panic("sd_xbuf_init: illegal chain type!"); 11409 /*NOTREACHED*/ 11410 } 11411 11412 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11413 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11414 11415 /* 11416 * It might be a bit easier to simply bzero the entire xbuf above, 11417 * but it turns out that since we init a fair number of members anyway, 11418 * we save a fair number cycles by doing explicit assignment of zero. 11419 */ 11420 xp->xb_pkt_flags = 0; 11421 xp->xb_dma_resid = 0; 11422 xp->xb_retry_count = 0; 11423 xp->xb_victim_retry_count = 0; 11424 xp->xb_ua_retry_count = 0; 11425 xp->xb_nr_retry_count = 0; 11426 xp->xb_sense_bp = NULL; 11427 xp->xb_sense_status = 0; 11428 xp->xb_sense_state = 0; 11429 xp->xb_sense_resid = 0; 11430 xp->xb_ena = 0; 11431 11432 bp->b_private = xp; 11433 bp->b_flags &= ~(B_DONE | B_ERROR); 11434 bp->b_resid = 0; 11435 bp->av_forw = NULL; 11436 bp->av_back = NULL; 11437 bioerror(bp, 0); 11438 11439 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11440 } 11441 11442 11443 /* 11444 * Function: sd_uscsi_strategy 11445 * 11446 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11447 * 11448 * Arguments: bp - buf struct ptr 11449 * 11450 * Return Code: Always returns 0 11451 * 11452 * Context: Kernel thread context 11453 */ 11454 11455 static int 11456 sd_uscsi_strategy(struct buf *bp) 11457 { 11458 struct sd_lun *un; 11459 struct sd_uscsi_info *uip; 11460 struct sd_xbuf *xp; 11461 uchar_t chain_type; 11462 uchar_t cmd; 11463 11464 ASSERT(bp != NULL); 11465 11466 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11467 if (un == NULL) { 11468 bioerror(bp, EIO); 11469 bp->b_resid = bp->b_bcount; 11470 biodone(bp); 11471 return (0); 11472 } 11473 11474 ASSERT(!mutex_owned(SD_MUTEX(un))); 11475 11476 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11477 11478 /* 11479 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11480 */ 11481 ASSERT(bp->b_private != NULL); 11482 uip = (struct sd_uscsi_info *)bp->b_private; 11483 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11484 11485 mutex_enter(SD_MUTEX(un)); 11486 /* 11487 * atapi: Since we are running the CD for now in PIO mode we need to 11488 * call bp_mapin here to avoid bp_mapin called interrupt context under 11489 * the HBA's init_pkt routine. 11490 */ 11491 if (un->un_f_cfg_is_atapi == TRUE) { 11492 mutex_exit(SD_MUTEX(un)); 11493 bp_mapin(bp); 11494 mutex_enter(SD_MUTEX(un)); 11495 } 11496 un->un_ncmds_in_driver++; 11497 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11498 un->un_ncmds_in_driver); 11499 11500 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11501 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11502 un->un_f_sync_cache_required = TRUE; 11503 11504 mutex_exit(SD_MUTEX(un)); 11505 11506 switch (uip->ui_flags) { 11507 case SD_PATH_DIRECT: 11508 chain_type = SD_CHAIN_DIRECT; 11509 break; 11510 case SD_PATH_DIRECT_PRIORITY: 11511 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11512 break; 11513 default: 11514 chain_type = SD_CHAIN_USCSI; 11515 break; 11516 } 11517 11518 /* 11519 * We may allocate extra buf for external USCSI commands. If the 11520 * application asks for bigger than 20-byte sense data via USCSI, 11521 * SCSA layer will allocate 252 bytes sense buf for that command. 11522 */ 11523 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11524 SENSE_LENGTH) { 11525 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11526 MAX_SENSE_LENGTH, KM_SLEEP); 11527 } else { 11528 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11529 } 11530 11531 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11532 11533 /* Use the index obtained within xbuf_init */ 11534 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11535 11536 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11537 11538 return (0); 11539 } 11540 11541 /* 11542 * Function: sd_send_scsi_cmd 11543 * 11544 * Description: Runs a USCSI command for user (when called thru sdioctl), 11545 * or for the driver 11546 * 11547 * Arguments: dev - the dev_t for the device 11548 * incmd - ptr to a valid uscsi_cmd struct 11549 * flag - bit flag, indicating open settings, 32/64 bit type 11550 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11551 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11552 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11553 * to use the USCSI "direct" chain and bypass the normal 11554 * command waitq. 11555 * 11556 * Return Code: 0 - successful completion of the given command 11557 * EIO - scsi_uscsi_handle_command() failed 11558 * ENXIO - soft state not found for specified dev 11559 * EINVAL 11560 * EFAULT - copyin/copyout error 11561 * return code of scsi_uscsi_handle_command(): 11562 * EIO 11563 * ENXIO 11564 * EACCES 11565 * 11566 * Context: Waits for command to complete. Can sleep. 11567 */ 11568 11569 static int 11570 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11571 enum uio_seg dataspace, int path_flag) 11572 { 11573 struct sd_lun *un; 11574 sd_ssc_t *ssc; 11575 int rval; 11576 11577 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11578 if (un == NULL) { 11579 return (ENXIO); 11580 } 11581 11582 /* 11583 * Using sd_ssc_send to handle uscsi cmd 11584 */ 11585 ssc = sd_ssc_init(un); 11586 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11587 sd_ssc_fini(ssc); 11588 11589 return (rval); 11590 } 11591 11592 /* 11593 * Function: sd_ssc_init 11594 * 11595 * Description: Uscsi end-user call this function to initialize necessary 11596 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11597 * 11598 * The return value of sd_send_scsi_cmd will be treated as a 11599 * fault in various conditions. Even it is not Zero, some 11600 * callers may ignore the return value. That is to say, we can 11601 * not make an accurate assessment in sdintr, since if a 11602 * command is failed in sdintr it does not mean the caller of 11603 * sd_send_scsi_cmd will treat it as a real failure. 11604 * 11605 * To avoid printing too many error logs for a failed uscsi 11606 * packet that the caller may not treat it as a failure, the 11607 * sd will keep silent for handling all uscsi commands. 11608 * 11609 * During detach->attach and attach-open, for some types of 11610 * problems, the driver should be providing information about 11611 * the problem encountered. Device use USCSI_SILENT, which 11612 * suppresses all driver information. The result is that no 11613 * information about the problem is available. Being 11614 * completely silent during this time is inappropriate. The 11615 * driver needs a more selective filter than USCSI_SILENT, so 11616 * that information related to faults is provided. 11617 * 11618 * To make the accurate accessment, the caller of 11619 * sd_send_scsi_USCSI_CMD should take the ownership and 11620 * get necessary information to print error messages. 11621 * 11622 * If we want to print necessary info of uscsi command, we need to 11623 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11624 * assessment. We use sd_ssc_init to alloc necessary 11625 * structs for sending an uscsi command and we are also 11626 * responsible for free the memory by calling 11627 * sd_ssc_fini. 11628 * 11629 * The calling secquences will look like: 11630 * sd_ssc_init-> 11631 * 11632 * ... 11633 * 11634 * sd_send_scsi_USCSI_CMD-> 11635 * sd_ssc_send-> - - - sdintr 11636 * ... 11637 * 11638 * if we think the return value should be treated as a 11639 * failure, we make the accessment here and print out 11640 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11641 * 11642 * ... 11643 * 11644 * sd_ssc_fini 11645 * 11646 * 11647 * Arguments: un - pointer to driver soft state (unit) structure for this 11648 * target. 11649 * 11650 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11651 * uscsi_cmd and sd_uscsi_info. 11652 * NULL - if can not alloc memory for sd_ssc_t struct 11653 * 11654 * Context: Kernel Thread. 11655 */ 11656 static sd_ssc_t * 11657 sd_ssc_init(struct sd_lun *un) 11658 { 11659 sd_ssc_t *ssc; 11660 struct uscsi_cmd *ucmdp; 11661 struct sd_uscsi_info *uip; 11662 11663 ASSERT(un != NULL); 11664 ASSERT(!mutex_owned(SD_MUTEX(un))); 11665 11666 /* 11667 * Allocate sd_ssc_t structure 11668 */ 11669 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11670 11671 /* 11672 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11673 */ 11674 ucmdp = scsi_uscsi_alloc(); 11675 11676 /* 11677 * Allocate sd_uscsi_info structure 11678 */ 11679 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11680 11681 ssc->ssc_uscsi_cmd = ucmdp; 11682 ssc->ssc_uscsi_info = uip; 11683 ssc->ssc_un = un; 11684 11685 return (ssc); 11686 } 11687 11688 /* 11689 * Function: sd_ssc_fini 11690 * 11691 * Description: To free sd_ssc_t and it's hanging off 11692 * 11693 * Arguments: ssc - struct pointer of sd_ssc_t. 11694 */ 11695 static void 11696 sd_ssc_fini(sd_ssc_t *ssc) 11697 { 11698 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11699 11700 if (ssc->ssc_uscsi_info != NULL) { 11701 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11702 ssc->ssc_uscsi_info = NULL; 11703 } 11704 11705 kmem_free(ssc, sizeof (sd_ssc_t)); 11706 ssc = NULL; 11707 } 11708 11709 /* 11710 * Function: sd_ssc_send 11711 * 11712 * Description: Runs a USCSI command for user when called through sdioctl, 11713 * or for the driver. 11714 * 11715 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11716 * sd_uscsi_info in. 11717 * incmd - ptr to a valid uscsi_cmd struct 11718 * flag - bit flag, indicating open settings, 32/64 bit type 11719 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11720 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11721 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11722 * to use the USCSI "direct" chain and bypass the normal 11723 * command waitq. 11724 * 11725 * Return Code: 0 - successful completion of the given command 11726 * EIO - scsi_uscsi_handle_command() failed 11727 * ENXIO - soft state not found for specified dev 11728 * EINVAL 11729 * EFAULT - copyin/copyout error 11730 * return code of scsi_uscsi_handle_command(): 11731 * EIO 11732 * ENXIO 11733 * EACCES 11734 * 11735 * Context: Kernel Thread; 11736 * Waits for command to complete. Can sleep. 11737 */ 11738 static int 11739 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11740 enum uio_seg dataspace, int path_flag) 11741 { 11742 struct sd_uscsi_info *uip; 11743 struct uscsi_cmd *uscmd; 11744 struct sd_lun *un; 11745 dev_t dev; 11746 11747 int format = 0; 11748 int rval; 11749 11750 ASSERT(ssc != NULL); 11751 un = ssc->ssc_un; 11752 ASSERT(un != NULL); 11753 uscmd = ssc->ssc_uscsi_cmd; 11754 ASSERT(uscmd != NULL); 11755 ASSERT(!mutex_owned(SD_MUTEX(un))); 11756 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11757 /* 11758 * If enter here, it indicates that the previous uscsi 11759 * command has not been processed by sd_ssc_assessment. 11760 * This is violating our rules of FMA telemetry processing. 11761 * We should print out this message and the last undisposed 11762 * uscsi command. 11763 */ 11764 if (uscmd->uscsi_cdb != NULL) { 11765 SD_INFO(SD_LOG_SDTEST, un, 11766 "sd_ssc_send is missing the alternative " 11767 "sd_ssc_assessment when running command 0x%x.\n", 11768 uscmd->uscsi_cdb[0]); 11769 } 11770 /* 11771 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11772 * the initial status. 11773 */ 11774 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11775 } 11776 11777 /* 11778 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11779 * followed to avoid missing FMA telemetries. 11780 */ 11781 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11782 11783 #ifdef SDDEBUG 11784 switch (dataspace) { 11785 case UIO_USERSPACE: 11786 SD_TRACE(SD_LOG_IO, un, 11787 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11788 break; 11789 case UIO_SYSSPACE: 11790 SD_TRACE(SD_LOG_IO, un, 11791 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11792 break; 11793 default: 11794 SD_TRACE(SD_LOG_IO, un, 11795 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11796 break; 11797 } 11798 #endif 11799 11800 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11801 SD_ADDRESS(un), &uscmd); 11802 if (rval != 0) { 11803 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11804 "scsi_uscsi_alloc_and_copyin failed\n", un); 11805 return (rval); 11806 } 11807 11808 if ((uscmd->uscsi_cdb != NULL) && 11809 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11810 mutex_enter(SD_MUTEX(un)); 11811 un->un_f_format_in_progress = TRUE; 11812 mutex_exit(SD_MUTEX(un)); 11813 format = 1; 11814 } 11815 11816 /* 11817 * Allocate an sd_uscsi_info struct and fill it with the info 11818 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11819 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11820 * since we allocate the buf here in this function, we do not 11821 * need to preserve the prior contents of b_private. 11822 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11823 */ 11824 uip = ssc->ssc_uscsi_info; 11825 uip->ui_flags = path_flag; 11826 uip->ui_cmdp = uscmd; 11827 11828 /* 11829 * Commands sent with priority are intended for error recovery 11830 * situations, and do not have retries performed. 11831 */ 11832 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11833 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11834 } 11835 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11836 11837 dev = SD_GET_DEV(un); 11838 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11839 sd_uscsi_strategy, NULL, uip); 11840 11841 /* 11842 * mark ssc_flags right after handle_cmd to make sure 11843 * the uscsi has been sent 11844 */ 11845 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11846 11847 #ifdef SDDEBUG 11848 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11849 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11850 uscmd->uscsi_status, uscmd->uscsi_resid); 11851 if (uscmd->uscsi_bufaddr != NULL) { 11852 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11853 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11854 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11855 if (dataspace == UIO_SYSSPACE) { 11856 SD_DUMP_MEMORY(un, SD_LOG_IO, 11857 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11858 uscmd->uscsi_buflen, SD_LOG_HEX); 11859 } 11860 } 11861 #endif 11862 11863 if (format == 1) { 11864 mutex_enter(SD_MUTEX(un)); 11865 un->un_f_format_in_progress = FALSE; 11866 mutex_exit(SD_MUTEX(un)); 11867 } 11868 11869 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11870 11871 return (rval); 11872 } 11873 11874 /* 11875 * Function: sd_ssc_print 11876 * 11877 * Description: Print information available to the console. 11878 * 11879 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11880 * sd_uscsi_info in. 11881 * sd_severity - log level. 11882 * Context: Kernel thread or interrupt context. 11883 */ 11884 static void 11885 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11886 { 11887 struct uscsi_cmd *ucmdp; 11888 struct scsi_device *devp; 11889 dev_info_t *devinfo; 11890 uchar_t *sensep; 11891 int senlen; 11892 union scsi_cdb *cdbp; 11893 uchar_t com; 11894 extern struct scsi_key_strings scsi_cmds[]; 11895 11896 ASSERT(ssc != NULL); 11897 ASSERT(ssc->ssc_un != NULL); 11898 11899 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 11900 return; 11901 ucmdp = ssc->ssc_uscsi_cmd; 11902 devp = SD_SCSI_DEVP(ssc->ssc_un); 11903 devinfo = SD_DEVINFO(ssc->ssc_un); 11904 ASSERT(ucmdp != NULL); 11905 ASSERT(devp != NULL); 11906 ASSERT(devinfo != NULL); 11907 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11908 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11909 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11910 11911 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11912 if (cdbp == NULL) 11913 return; 11914 /* We don't print log if no sense data available. */ 11915 if (senlen == 0) 11916 sensep = NULL; 11917 com = cdbp->scc_cmd; 11918 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11919 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11920 } 11921 11922 /* 11923 * Function: sd_ssc_assessment 11924 * 11925 * Description: We use this function to make an assessment at the point 11926 * where SD driver may encounter a potential error. 11927 * 11928 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11929 * sd_uscsi_info in. 11930 * tp_assess - a hint of strategy for ereport posting. 11931 * Possible values of tp_assess include: 11932 * SD_FMT_IGNORE - we don't post any ereport because we're 11933 * sure that it is ok to ignore the underlying problems. 11934 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11935 * but it might be not correct to ignore the underlying hardware 11936 * error. 11937 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11938 * payload driver-assessment of value "fail" or 11939 * "fatal"(depending on what information we have here). This 11940 * assessment value is usually set when SD driver think there 11941 * is a potential error occurred(Typically, when return value 11942 * of the SCSI command is EIO). 11943 * SD_FMT_STANDARD - we will post an ereport with the payload 11944 * driver-assessment of value "info". This assessment value is 11945 * set when the SCSI command returned successfully and with 11946 * sense data sent back. 11947 * 11948 * Context: Kernel thread. 11949 */ 11950 static void 11951 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11952 { 11953 int senlen = 0; 11954 struct uscsi_cmd *ucmdp = NULL; 11955 struct sd_lun *un; 11956 11957 ASSERT(ssc != NULL); 11958 un = ssc->ssc_un; 11959 ASSERT(un != NULL); 11960 ucmdp = ssc->ssc_uscsi_cmd; 11961 ASSERT(ucmdp != NULL); 11962 11963 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11964 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11965 } else { 11966 /* 11967 * If enter here, it indicates that we have a wrong 11968 * calling sequence of sd_ssc_send and sd_ssc_assessment, 11969 * both of which should be called in a pair in case of 11970 * loss of FMA telemetries. 11971 */ 11972 if (ucmdp->uscsi_cdb != NULL) { 11973 SD_INFO(SD_LOG_SDTEST, un, 11974 "sd_ssc_assessment is missing the " 11975 "alternative sd_ssc_send when running 0x%x, " 11976 "or there are superfluous sd_ssc_assessment for " 11977 "the same sd_ssc_send.\n", 11978 ucmdp->uscsi_cdb[0]); 11979 } 11980 /* 11981 * Set the ssc_flags to the initial value to avoid passing 11982 * down dirty flags to the following sd_ssc_send function. 11983 */ 11984 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11985 return; 11986 } 11987 11988 /* 11989 * Only handle an issued command which is waiting for assessment. 11990 * A command which is not issued will not have 11991 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 11992 */ 11993 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11994 sd_ssc_print(ssc, SCSI_ERR_INFO); 11995 return; 11996 } else { 11997 /* 11998 * For an issued command, we should clear this flag in 11999 * order to make the sd_ssc_t structure be used off 12000 * multiple uscsi commands. 12001 */ 12002 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12003 } 12004 12005 /* 12006 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12007 * commands here. And we should clear the ssc_flags before return. 12008 */ 12009 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12010 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12011 return; 12012 } 12013 12014 switch (tp_assess) { 12015 case SD_FMT_IGNORE: 12016 case SD_FMT_IGNORE_COMPROMISE: 12017 break; 12018 case SD_FMT_STATUS_CHECK: 12019 /* 12020 * For a failed command(including the succeeded command 12021 * with invalid data sent back). 12022 */ 12023 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12024 break; 12025 case SD_FMT_STANDARD: 12026 /* 12027 * Always for the succeeded commands probably with sense 12028 * data sent back. 12029 * Limitation: 12030 * We can only handle a succeeded command with sense 12031 * data sent back when auto-request-sense is enabled. 12032 */ 12033 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12034 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12035 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12036 (un->un_f_arq_enabled == TRUE) && 12037 senlen > 0 && 12038 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12039 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12040 } 12041 break; 12042 default: 12043 /* 12044 * Should not have other type of assessment. 12045 */ 12046 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12047 "sd_ssc_assessment got wrong " 12048 "sd_type_assessment %d.\n", tp_assess); 12049 break; 12050 } 12051 /* 12052 * Clear up the ssc_flags before return. 12053 */ 12054 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12055 } 12056 12057 /* 12058 * Function: sd_ssc_post 12059 * 12060 * Description: 1. read the driver property to get fm-scsi-log flag. 12061 * 2. print log if fm_log_capable is non-zero. 12062 * 3. call sd_ssc_ereport_post to post ereport if possible. 12063 * 12064 * Context: May be called from kernel thread or interrupt context. 12065 */ 12066 static void 12067 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12068 { 12069 struct sd_lun *un; 12070 int sd_severity; 12071 12072 ASSERT(ssc != NULL); 12073 un = ssc->ssc_un; 12074 ASSERT(un != NULL); 12075 12076 /* 12077 * We may enter here from sd_ssc_assessment(for USCSI command) or 12078 * by directly called from sdintr context. 12079 * We don't handle a non-disk drive(CD-ROM, removable media). 12080 * Clear the ssc_flags before return in case we've set 12081 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12082 * driver. 12083 */ 12084 if (ISCD(un) || un->un_f_has_removable_media) { 12085 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12086 return; 12087 } 12088 12089 switch (sd_assess) { 12090 case SD_FM_DRV_FATAL: 12091 sd_severity = SCSI_ERR_FATAL; 12092 break; 12093 case SD_FM_DRV_RECOVERY: 12094 sd_severity = SCSI_ERR_RECOVERED; 12095 break; 12096 case SD_FM_DRV_RETRY: 12097 sd_severity = SCSI_ERR_RETRYABLE; 12098 break; 12099 case SD_FM_DRV_NOTICE: 12100 sd_severity = SCSI_ERR_INFO; 12101 break; 12102 default: 12103 sd_severity = SCSI_ERR_UNKNOWN; 12104 } 12105 /* print log */ 12106 sd_ssc_print(ssc, sd_severity); 12107 12108 /* always post ereport */ 12109 sd_ssc_ereport_post(ssc, sd_assess); 12110 } 12111 12112 /* 12113 * Function: sd_ssc_set_info 12114 * 12115 * Description: Mark ssc_flags and set ssc_info which would be the 12116 * payload of uderr ereport. This function will cause 12117 * sd_ssc_ereport_post to post uderr ereport only. 12118 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12119 * the function will also call SD_ERROR or scsi_log for a 12120 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12121 * 12122 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12123 * sd_uscsi_info in. 12124 * ssc_flags - indicate the sub-category of a uderr. 12125 * comp - this argument is meaningful only when 12126 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12127 * values include: 12128 * > 0, SD_ERROR is used with comp as the driver logging 12129 * component; 12130 * = 0, scsi-log is used to log error telemetries; 12131 * < 0, no log available for this telemetry. 12132 * 12133 * Context: Kernel thread or interrupt context 12134 */ 12135 static void 12136 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12137 { 12138 va_list ap; 12139 12140 ASSERT(ssc != NULL); 12141 ASSERT(ssc->ssc_un != NULL); 12142 12143 ssc->ssc_flags |= ssc_flags; 12144 va_start(ap, fmt); 12145 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12146 va_end(ap); 12147 12148 /* 12149 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12150 * with invalid data sent back. For non-uscsi command, the 12151 * following code will be bypassed. 12152 */ 12153 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12154 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12155 /* 12156 * If the error belong to certain component and we 12157 * do not want it to show up on the console, we 12158 * will use SD_ERROR, otherwise scsi_log is 12159 * preferred. 12160 */ 12161 if (comp > 0) { 12162 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12163 } else if (comp == 0) { 12164 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12165 CE_WARN, ssc->ssc_info); 12166 } 12167 } 12168 } 12169 } 12170 12171 /* 12172 * Function: sd_buf_iodone 12173 * 12174 * Description: Frees the sd_xbuf & returns the buf to its originator. 12175 * 12176 * Context: May be called from interrupt context. 12177 */ 12178 /* ARGSUSED */ 12179 static void 12180 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12181 { 12182 struct sd_xbuf *xp; 12183 12184 ASSERT(un != NULL); 12185 ASSERT(bp != NULL); 12186 ASSERT(!mutex_owned(SD_MUTEX(un))); 12187 12188 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12189 12190 xp = SD_GET_XBUF(bp); 12191 ASSERT(xp != NULL); 12192 12193 /* xbuf is gone after this */ 12194 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12195 mutex_enter(SD_MUTEX(un)); 12196 12197 /* 12198 * Grab time when the cmd completed. 12199 * This is used for determining if the system has been 12200 * idle long enough to make it idle to the PM framework. 12201 * This is for lowering the overhead, and therefore improving 12202 * performance per I/O operation. 12203 */ 12204 un->un_pm_idle_time = ddi_get_time(); 12205 12206 un->un_ncmds_in_driver--; 12207 ASSERT(un->un_ncmds_in_driver >= 0); 12208 SD_INFO(SD_LOG_IO, un, 12209 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12210 un->un_ncmds_in_driver); 12211 12212 mutex_exit(SD_MUTEX(un)); 12213 } 12214 12215 biodone(bp); /* bp is gone after this */ 12216 12217 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12218 } 12219 12220 12221 /* 12222 * Function: sd_uscsi_iodone 12223 * 12224 * Description: Frees the sd_xbuf & returns the buf to its originator. 12225 * 12226 * Context: May be called from interrupt context. 12227 */ 12228 /* ARGSUSED */ 12229 static void 12230 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12231 { 12232 struct sd_xbuf *xp; 12233 12234 ASSERT(un != NULL); 12235 ASSERT(bp != NULL); 12236 12237 xp = SD_GET_XBUF(bp); 12238 ASSERT(xp != NULL); 12239 ASSERT(!mutex_owned(SD_MUTEX(un))); 12240 12241 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12242 12243 bp->b_private = xp->xb_private; 12244 12245 mutex_enter(SD_MUTEX(un)); 12246 12247 /* 12248 * Grab time when the cmd completed. 12249 * This is used for determining if the system has been 12250 * idle long enough to make it idle to the PM framework. 12251 * This is for lowering the overhead, and therefore improving 12252 * performance per I/O operation. 12253 */ 12254 un->un_pm_idle_time = ddi_get_time(); 12255 12256 un->un_ncmds_in_driver--; 12257 ASSERT(un->un_ncmds_in_driver >= 0); 12258 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12259 un->un_ncmds_in_driver); 12260 12261 mutex_exit(SD_MUTEX(un)); 12262 12263 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12264 SENSE_LENGTH) { 12265 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12266 MAX_SENSE_LENGTH); 12267 } else { 12268 kmem_free(xp, sizeof (struct sd_xbuf)); 12269 } 12270 12271 biodone(bp); 12272 12273 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12274 } 12275 12276 12277 /* 12278 * Function: sd_mapblockaddr_iostart 12279 * 12280 * Description: Verify request lies within the partition limits for 12281 * the indicated minor device. Issue "overrun" buf if 12282 * request would exceed partition range. Converts 12283 * partition-relative block address to absolute. 12284 * 12285 * Upon exit of this function: 12286 * 1.I/O is aligned 12287 * xp->xb_blkno represents the absolute sector address 12288 * 2.I/O is misaligned 12289 * xp->xb_blkno represents the absolute logical block address 12290 * based on DEV_BSIZE. The logical block address will be 12291 * converted to physical sector address in sd_mapblocksize_\ 12292 * iostart. 12293 * 3.I/O is misaligned but is aligned in "overrun" buf 12294 * xp->xb_blkno represents the absolute logical block address 12295 * based on DEV_BSIZE. The logical block address will be 12296 * converted to physical sector address in sd_mapblocksize_\ 12297 * iostart. But no RMW will be issued in this case. 12298 * 12299 * Context: Can sleep 12300 * 12301 * Issues: This follows what the old code did, in terms of accessing 12302 * some of the partition info in the unit struct without holding 12303 * the mutext. This is a general issue, if the partition info 12304 * can be altered while IO is in progress... as soon as we send 12305 * a buf, its partitioning can be invalid before it gets to the 12306 * device. Probably the right fix is to move partitioning out 12307 * of the driver entirely. 12308 */ 12309 12310 static void 12311 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12312 { 12313 diskaddr_t nblocks; /* #blocks in the given partition */ 12314 daddr_t blocknum; /* Block number specified by the buf */ 12315 size_t requested_nblocks; 12316 size_t available_nblocks; 12317 int partition; 12318 diskaddr_t partition_offset; 12319 struct sd_xbuf *xp; 12320 int secmask = 0, blknomask = 0; 12321 ushort_t is_aligned = TRUE; 12322 12323 ASSERT(un != NULL); 12324 ASSERT(bp != NULL); 12325 ASSERT(!mutex_owned(SD_MUTEX(un))); 12326 12327 SD_TRACE(SD_LOG_IO_PARTITION, un, 12328 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12329 12330 xp = SD_GET_XBUF(bp); 12331 ASSERT(xp != NULL); 12332 12333 /* 12334 * If the geometry is not indicated as valid, attempt to access 12335 * the unit & verify the geometry/label. This can be the case for 12336 * removable-media devices, of if the device was opened in 12337 * NDELAY/NONBLOCK mode. 12338 */ 12339 partition = SDPART(bp->b_edev); 12340 12341 if (!SD_IS_VALID_LABEL(un)) { 12342 sd_ssc_t *ssc; 12343 /* 12344 * Initialize sd_ssc_t for internal uscsi commands 12345 * In case of potential porformance issue, we need 12346 * to alloc memory only if there is invalid label 12347 */ 12348 ssc = sd_ssc_init(un); 12349 12350 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12351 /* 12352 * For removable devices it is possible to start an 12353 * I/O without a media by opening the device in nodelay 12354 * mode. Also for writable CDs there can be many 12355 * scenarios where there is no geometry yet but volume 12356 * manager is trying to issue a read() just because 12357 * it can see TOC on the CD. So do not print a message 12358 * for removables. 12359 */ 12360 if (!un->un_f_has_removable_media) { 12361 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12362 "i/o to invalid geometry\n"); 12363 } 12364 bioerror(bp, EIO); 12365 bp->b_resid = bp->b_bcount; 12366 SD_BEGIN_IODONE(index, un, bp); 12367 12368 sd_ssc_fini(ssc); 12369 return; 12370 } 12371 sd_ssc_fini(ssc); 12372 } 12373 12374 nblocks = 0; 12375 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12376 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12377 12378 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12379 secmask = un->un_tgt_blocksize - 1; 12380 12381 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12382 is_aligned = FALSE; 12383 } 12384 12385 if (!(NOT_DEVBSIZE(un))) { 12386 /* 12387 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12388 * Convert the logical block number to target's physical sector 12389 * number. 12390 */ 12391 if (is_aligned) { 12392 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12393 } else { 12394 switch (un->un_f_rmw_type) { 12395 case SD_RMW_TYPE_RETURN_ERROR: 12396 bp->b_flags |= B_ERROR; 12397 goto error_exit; 12398 12399 case SD_RMW_TYPE_DEFAULT: 12400 mutex_enter(SD_MUTEX(un)); 12401 if (un->un_rmw_msg_timeid == NULL) { 12402 scsi_log(SD_DEVINFO(un), sd_label, 12403 CE_WARN, "I/O request is not " 12404 "aligned with %d disk sector size. " 12405 "It is handled through Read Modify " 12406 "Write but the performance is " 12407 "very low.\n", 12408 un->un_tgt_blocksize); 12409 un->un_rmw_msg_timeid = 12410 timeout(sd_rmw_msg_print_handler, 12411 un, SD_RMW_MSG_PRINT_TIMEOUT); 12412 } else { 12413 un->un_rmw_incre_count ++; 12414 } 12415 mutex_exit(SD_MUTEX(un)); 12416 break; 12417 12418 case SD_RMW_TYPE_NO_WARNING: 12419 default: 12420 break; 12421 } 12422 12423 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12424 partition_offset = SD_TGT2SYSBLOCK(un, 12425 partition_offset); 12426 } 12427 } 12428 12429 /* 12430 * blocknum is the starting block number of the request. At this 12431 * point it is still relative to the start of the minor device. 12432 */ 12433 blocknum = xp->xb_blkno; 12434 12435 /* 12436 * Legacy: If the starting block number is one past the last block 12437 * in the partition, do not set B_ERROR in the buf. 12438 */ 12439 if (blocknum == nblocks) { 12440 goto error_exit; 12441 } 12442 12443 /* 12444 * Confirm that the first block of the request lies within the 12445 * partition limits. Also the requested number of bytes must be 12446 * a multiple of the system block size. 12447 */ 12448 if ((blocknum < 0) || (blocknum >= nblocks) || 12449 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12450 bp->b_flags |= B_ERROR; 12451 goto error_exit; 12452 } 12453 12454 /* 12455 * If the requsted # blocks exceeds the available # blocks, that 12456 * is an overrun of the partition. 12457 */ 12458 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12459 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12460 } else { 12461 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12462 } 12463 12464 available_nblocks = (size_t)(nblocks - blocknum); 12465 ASSERT(nblocks >= blocknum); 12466 12467 if (requested_nblocks > available_nblocks) { 12468 size_t resid; 12469 12470 /* 12471 * Allocate an "overrun" buf to allow the request to proceed 12472 * for the amount of space available in the partition. The 12473 * amount not transferred will be added into the b_resid 12474 * when the operation is complete. The overrun buf 12475 * replaces the original buf here, and the original buf 12476 * is saved inside the overrun buf, for later use. 12477 */ 12478 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12479 resid = SD_TGTBLOCKS2BYTES(un, 12480 (offset_t)(requested_nblocks - available_nblocks)); 12481 } else { 12482 resid = SD_SYSBLOCKS2BYTES( 12483 (offset_t)(requested_nblocks - available_nblocks)); 12484 } 12485 12486 size_t count = bp->b_bcount - resid; 12487 /* 12488 * Note: count is an unsigned entity thus it'll NEVER 12489 * be less than 0 so ASSERT the original values are 12490 * correct. 12491 */ 12492 ASSERT(bp->b_bcount >= resid); 12493 12494 bp = sd_bioclone_alloc(bp, count, blocknum, 12495 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12496 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12497 ASSERT(xp != NULL); 12498 } 12499 12500 /* At this point there should be no residual for this buf. */ 12501 ASSERT(bp->b_resid == 0); 12502 12503 /* Convert the block number to an absolute address. */ 12504 xp->xb_blkno += partition_offset; 12505 12506 SD_NEXT_IOSTART(index, un, bp); 12507 12508 SD_TRACE(SD_LOG_IO_PARTITION, un, 12509 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12510 12511 return; 12512 12513 error_exit: 12514 bp->b_resid = bp->b_bcount; 12515 SD_BEGIN_IODONE(index, un, bp); 12516 SD_TRACE(SD_LOG_IO_PARTITION, un, 12517 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12518 } 12519 12520 12521 /* 12522 * Function: sd_mapblockaddr_iodone 12523 * 12524 * Description: Completion-side processing for partition management. 12525 * 12526 * Context: May be called under interrupt context 12527 */ 12528 12529 static void 12530 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12531 { 12532 /* int partition; */ /* Not used, see below. */ 12533 ASSERT(un != NULL); 12534 ASSERT(bp != NULL); 12535 ASSERT(!mutex_owned(SD_MUTEX(un))); 12536 12537 SD_TRACE(SD_LOG_IO_PARTITION, un, 12538 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12539 12540 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12541 /* 12542 * We have an "overrun" buf to deal with... 12543 */ 12544 struct sd_xbuf *xp; 12545 struct buf *obp; /* ptr to the original buf */ 12546 12547 xp = SD_GET_XBUF(bp); 12548 ASSERT(xp != NULL); 12549 12550 /* Retrieve the pointer to the original buf */ 12551 obp = (struct buf *)xp->xb_private; 12552 ASSERT(obp != NULL); 12553 12554 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12555 bioerror(obp, bp->b_error); 12556 12557 sd_bioclone_free(bp); 12558 12559 /* 12560 * Get back the original buf. 12561 * Note that since the restoration of xb_blkno below 12562 * was removed, the sd_xbuf is not needed. 12563 */ 12564 bp = obp; 12565 /* 12566 * xp = SD_GET_XBUF(bp); 12567 * ASSERT(xp != NULL); 12568 */ 12569 } 12570 12571 /* 12572 * Convert sd->xb_blkno back to a minor-device relative value. 12573 * Note: this has been commented out, as it is not needed in the 12574 * current implementation of the driver (ie, since this function 12575 * is at the top of the layering chains, so the info will be 12576 * discarded) and it is in the "hot" IO path. 12577 * 12578 * partition = getminor(bp->b_edev) & SDPART_MASK; 12579 * xp->xb_blkno -= un->un_offset[partition]; 12580 */ 12581 12582 SD_NEXT_IODONE(index, un, bp); 12583 12584 SD_TRACE(SD_LOG_IO_PARTITION, un, 12585 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12586 } 12587 12588 12589 /* 12590 * Function: sd_mapblocksize_iostart 12591 * 12592 * Description: Convert between system block size (un->un_sys_blocksize) 12593 * and target block size (un->un_tgt_blocksize). 12594 * 12595 * Context: Can sleep to allocate resources. 12596 * 12597 * Assumptions: A higher layer has already performed any partition validation, 12598 * and converted the xp->xb_blkno to an absolute value relative 12599 * to the start of the device. 12600 * 12601 * It is also assumed that the higher layer has implemented 12602 * an "overrun" mechanism for the case where the request would 12603 * read/write beyond the end of a partition. In this case we 12604 * assume (and ASSERT) that bp->b_resid == 0. 12605 * 12606 * Note: The implementation for this routine assumes the target 12607 * block size remains constant between allocation and transport. 12608 */ 12609 12610 static void 12611 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12612 { 12613 struct sd_mapblocksize_info *bsp; 12614 struct sd_xbuf *xp; 12615 offset_t first_byte; 12616 daddr_t start_block, end_block; 12617 daddr_t request_bytes; 12618 ushort_t is_aligned = FALSE; 12619 12620 ASSERT(un != NULL); 12621 ASSERT(bp != NULL); 12622 ASSERT(!mutex_owned(SD_MUTEX(un))); 12623 ASSERT(bp->b_resid == 0); 12624 12625 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12626 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12627 12628 /* 12629 * For a non-writable CD, a write request is an error 12630 */ 12631 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12632 (un->un_f_mmc_writable_media == FALSE)) { 12633 bioerror(bp, EIO); 12634 bp->b_resid = bp->b_bcount; 12635 SD_BEGIN_IODONE(index, un, bp); 12636 return; 12637 } 12638 12639 /* 12640 * We do not need a shadow buf if the device is using 12641 * un->un_sys_blocksize as its block size or if bcount == 0. 12642 * In this case there is no layer-private data block allocated. 12643 */ 12644 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12645 (bp->b_bcount == 0)) { 12646 goto done; 12647 } 12648 12649 #if defined(__i386) || defined(__amd64) 12650 /* We do not support non-block-aligned transfers for ROD devices */ 12651 ASSERT(!ISROD(un)); 12652 #endif 12653 12654 xp = SD_GET_XBUF(bp); 12655 ASSERT(xp != NULL); 12656 12657 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12658 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12659 un->un_tgt_blocksize, DEV_BSIZE); 12660 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12661 "request start block:0x%x\n", xp->xb_blkno); 12662 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12663 "request len:0x%x\n", bp->b_bcount); 12664 12665 /* 12666 * Allocate the layer-private data area for the mapblocksize layer. 12667 * Layers are allowed to use the xp_private member of the sd_xbuf 12668 * struct to store the pointer to their layer-private data block, but 12669 * each layer also has the responsibility of restoring the prior 12670 * contents of xb_private before returning the buf/xbuf to the 12671 * higher layer that sent it. 12672 * 12673 * Here we save the prior contents of xp->xb_private into the 12674 * bsp->mbs_oprivate field of our layer-private data area. This value 12675 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12676 * the layer-private area and returning the buf/xbuf to the layer 12677 * that sent it. 12678 * 12679 * Note that here we use kmem_zalloc for the allocation as there are 12680 * parts of the mapblocksize code that expect certain fields to be 12681 * zero unless explicitly set to a required value. 12682 */ 12683 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12684 bsp->mbs_oprivate = xp->xb_private; 12685 xp->xb_private = bsp; 12686 12687 /* 12688 * This treats the data on the disk (target) as an array of bytes. 12689 * first_byte is the byte offset, from the beginning of the device, 12690 * to the location of the request. This is converted from a 12691 * un->un_sys_blocksize block address to a byte offset, and then back 12692 * to a block address based upon a un->un_tgt_blocksize block size. 12693 * 12694 * xp->xb_blkno should be absolute upon entry into this function, 12695 * but, but it is based upon partitions that use the "system" 12696 * block size. It must be adjusted to reflect the block size of 12697 * the target. 12698 * 12699 * Note that end_block is actually the block that follows the last 12700 * block of the request, but that's what is needed for the computation. 12701 */ 12702 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12703 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12704 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12705 un->un_tgt_blocksize; 12706 12707 /* request_bytes is rounded up to a multiple of the target block size */ 12708 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12709 12710 /* 12711 * See if the starting address of the request and the request 12712 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12713 * then we do not need to allocate a shadow buf to handle the request. 12714 */ 12715 if (((first_byte % un->un_tgt_blocksize) == 0) && 12716 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12717 is_aligned = TRUE; 12718 } 12719 12720 if ((bp->b_flags & B_READ) == 0) { 12721 /* 12722 * Lock the range for a write operation. An aligned request is 12723 * considered a simple write; otherwise the request must be a 12724 * read-modify-write. 12725 */ 12726 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12727 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12728 } 12729 12730 /* 12731 * Alloc a shadow buf if the request is not aligned. Also, this is 12732 * where the READ command is generated for a read-modify-write. (The 12733 * write phase is deferred until after the read completes.) 12734 */ 12735 if (is_aligned == FALSE) { 12736 12737 struct sd_mapblocksize_info *shadow_bsp; 12738 struct sd_xbuf *shadow_xp; 12739 struct buf *shadow_bp; 12740 12741 /* 12742 * Allocate the shadow buf and it associated xbuf. Note that 12743 * after this call the xb_blkno value in both the original 12744 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12745 * same: absolute relative to the start of the device, and 12746 * adjusted for the target block size. The b_blkno in the 12747 * shadow buf will also be set to this value. We should never 12748 * change b_blkno in the original bp however. 12749 * 12750 * Note also that the shadow buf will always need to be a 12751 * READ command, regardless of whether the incoming command 12752 * is a READ or a WRITE. 12753 */ 12754 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12755 xp->xb_blkno, 12756 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12757 12758 shadow_xp = SD_GET_XBUF(shadow_bp); 12759 12760 /* 12761 * Allocate the layer-private data for the shadow buf. 12762 * (No need to preserve xb_private in the shadow xbuf.) 12763 */ 12764 shadow_xp->xb_private = shadow_bsp = 12765 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12766 12767 /* 12768 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12769 * to figure out where the start of the user data is (based upon 12770 * the system block size) in the data returned by the READ 12771 * command (which will be based upon the target blocksize). Note 12772 * that this is only really used if the request is unaligned. 12773 */ 12774 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12775 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12776 ASSERT((bsp->mbs_copy_offset >= 0) && 12777 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12778 12779 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12780 12781 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12782 12783 /* Transfer the wmap (if any) to the shadow buf */ 12784 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12785 bsp->mbs_wmp = NULL; 12786 12787 /* 12788 * The shadow buf goes on from here in place of the 12789 * original buf. 12790 */ 12791 shadow_bsp->mbs_orig_bp = bp; 12792 bp = shadow_bp; 12793 } 12794 12795 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12796 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12797 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12798 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12799 request_bytes); 12800 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12801 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12802 12803 done: 12804 SD_NEXT_IOSTART(index, un, bp); 12805 12806 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12807 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12808 } 12809 12810 12811 /* 12812 * Function: sd_mapblocksize_iodone 12813 * 12814 * Description: Completion side processing for block-size mapping. 12815 * 12816 * Context: May be called under interrupt context 12817 */ 12818 12819 static void 12820 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12821 { 12822 struct sd_mapblocksize_info *bsp; 12823 struct sd_xbuf *xp; 12824 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12825 struct buf *orig_bp; /* ptr to the original buf */ 12826 offset_t shadow_end; 12827 offset_t request_end; 12828 offset_t shadow_start; 12829 ssize_t copy_offset; 12830 size_t copy_length; 12831 size_t shortfall; 12832 uint_t is_write; /* TRUE if this bp is a WRITE */ 12833 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12834 12835 ASSERT(un != NULL); 12836 ASSERT(bp != NULL); 12837 12838 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12839 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12840 12841 /* 12842 * There is no shadow buf or layer-private data if the target is 12843 * using un->un_sys_blocksize as its block size or if bcount == 0. 12844 */ 12845 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12846 (bp->b_bcount == 0)) { 12847 goto exit; 12848 } 12849 12850 xp = SD_GET_XBUF(bp); 12851 ASSERT(xp != NULL); 12852 12853 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12854 bsp = xp->xb_private; 12855 12856 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12857 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12858 12859 if (is_write) { 12860 /* 12861 * For a WRITE request we must free up the block range that 12862 * we have locked up. This holds regardless of whether this is 12863 * an aligned write request or a read-modify-write request. 12864 */ 12865 sd_range_unlock(un, bsp->mbs_wmp); 12866 bsp->mbs_wmp = NULL; 12867 } 12868 12869 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12870 /* 12871 * An aligned read or write command will have no shadow buf; 12872 * there is not much else to do with it. 12873 */ 12874 goto done; 12875 } 12876 12877 orig_bp = bsp->mbs_orig_bp; 12878 ASSERT(orig_bp != NULL); 12879 orig_xp = SD_GET_XBUF(orig_bp); 12880 ASSERT(orig_xp != NULL); 12881 ASSERT(!mutex_owned(SD_MUTEX(un))); 12882 12883 if (!is_write && has_wmap) { 12884 /* 12885 * A READ with a wmap means this is the READ phase of a 12886 * read-modify-write. If an error occurred on the READ then 12887 * we do not proceed with the WRITE phase or copy any data. 12888 * Just release the write maps and return with an error. 12889 */ 12890 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12891 orig_bp->b_resid = orig_bp->b_bcount; 12892 bioerror(orig_bp, bp->b_error); 12893 sd_range_unlock(un, bsp->mbs_wmp); 12894 goto freebuf_done; 12895 } 12896 } 12897 12898 /* 12899 * Here is where we set up to copy the data from the shadow buf 12900 * into the space associated with the original buf. 12901 * 12902 * To deal with the conversion between block sizes, these 12903 * computations treat the data as an array of bytes, with the 12904 * first byte (byte 0) corresponding to the first byte in the 12905 * first block on the disk. 12906 */ 12907 12908 /* 12909 * shadow_start and shadow_len indicate the location and size of 12910 * the data returned with the shadow IO request. 12911 */ 12912 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12913 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12914 12915 /* 12916 * copy_offset gives the offset (in bytes) from the start of the first 12917 * block of the READ request to the beginning of the data. We retrieve 12918 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12919 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12920 * data to be copied (in bytes). 12921 */ 12922 copy_offset = bsp->mbs_copy_offset; 12923 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12924 copy_length = orig_bp->b_bcount; 12925 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12926 12927 /* 12928 * Set up the resid and error fields of orig_bp as appropriate. 12929 */ 12930 if (shadow_end >= request_end) { 12931 /* We got all the requested data; set resid to zero */ 12932 orig_bp->b_resid = 0; 12933 } else { 12934 /* 12935 * We failed to get enough data to fully satisfy the original 12936 * request. Just copy back whatever data we got and set 12937 * up the residual and error code as required. 12938 * 12939 * 'shortfall' is the amount by which the data received with the 12940 * shadow buf has "fallen short" of the requested amount. 12941 */ 12942 shortfall = (size_t)(request_end - shadow_end); 12943 12944 if (shortfall > orig_bp->b_bcount) { 12945 /* 12946 * We did not get enough data to even partially 12947 * fulfill the original request. The residual is 12948 * equal to the amount requested. 12949 */ 12950 orig_bp->b_resid = orig_bp->b_bcount; 12951 } else { 12952 /* 12953 * We did not get all the data that we requested 12954 * from the device, but we will try to return what 12955 * portion we did get. 12956 */ 12957 orig_bp->b_resid = shortfall; 12958 } 12959 ASSERT(copy_length >= orig_bp->b_resid); 12960 copy_length -= orig_bp->b_resid; 12961 } 12962 12963 /* Propagate the error code from the shadow buf to the original buf */ 12964 bioerror(orig_bp, bp->b_error); 12965 12966 if (is_write) { 12967 goto freebuf_done; /* No data copying for a WRITE */ 12968 } 12969 12970 if (has_wmap) { 12971 /* 12972 * This is a READ command from the READ phase of a 12973 * read-modify-write request. We have to copy the data given 12974 * by the user OVER the data returned by the READ command, 12975 * then convert the command from a READ to a WRITE and send 12976 * it back to the target. 12977 */ 12978 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12979 copy_length); 12980 12981 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12982 12983 /* 12984 * Dispatch the WRITE command to the taskq thread, which 12985 * will in turn send the command to the target. When the 12986 * WRITE command completes, we (sd_mapblocksize_iodone()) 12987 * will get called again as part of the iodone chain 12988 * processing for it. Note that we will still be dealing 12989 * with the shadow buf at that point. 12990 */ 12991 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12992 KM_NOSLEEP) != 0) { 12993 /* 12994 * Dispatch was successful so we are done. Return 12995 * without going any higher up the iodone chain. Do 12996 * not free up any layer-private data until after the 12997 * WRITE completes. 12998 */ 12999 return; 13000 } 13001 13002 /* 13003 * Dispatch of the WRITE command failed; set up the error 13004 * condition and send this IO back up the iodone chain. 13005 */ 13006 bioerror(orig_bp, EIO); 13007 orig_bp->b_resid = orig_bp->b_bcount; 13008 13009 } else { 13010 /* 13011 * This is a regular READ request (ie, not a RMW). Copy the 13012 * data from the shadow buf into the original buf. The 13013 * copy_offset compensates for any "misalignment" between the 13014 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13015 * original buf (with its un->un_sys_blocksize blocks). 13016 */ 13017 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13018 copy_length); 13019 } 13020 13021 freebuf_done: 13022 13023 /* 13024 * At this point we still have both the shadow buf AND the original 13025 * buf to deal with, as well as the layer-private data area in each. 13026 * Local variables are as follows: 13027 * 13028 * bp -- points to shadow buf 13029 * xp -- points to xbuf of shadow buf 13030 * bsp -- points to layer-private data area of shadow buf 13031 * orig_bp -- points to original buf 13032 * 13033 * First free the shadow buf and its associated xbuf, then free the 13034 * layer-private data area from the shadow buf. There is no need to 13035 * restore xb_private in the shadow xbuf. 13036 */ 13037 sd_shadow_buf_free(bp); 13038 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13039 13040 /* 13041 * Now update the local variables to point to the original buf, xbuf, 13042 * and layer-private area. 13043 */ 13044 bp = orig_bp; 13045 xp = SD_GET_XBUF(bp); 13046 ASSERT(xp != NULL); 13047 ASSERT(xp == orig_xp); 13048 bsp = xp->xb_private; 13049 ASSERT(bsp != NULL); 13050 13051 done: 13052 /* 13053 * Restore xb_private to whatever it was set to by the next higher 13054 * layer in the chain, then free the layer-private data area. 13055 */ 13056 xp->xb_private = bsp->mbs_oprivate; 13057 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13058 13059 exit: 13060 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13061 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13062 13063 SD_NEXT_IODONE(index, un, bp); 13064 } 13065 13066 13067 /* 13068 * Function: sd_checksum_iostart 13069 * 13070 * Description: A stub function for a layer that's currently not used. 13071 * For now just a placeholder. 13072 * 13073 * Context: Kernel thread context 13074 */ 13075 13076 static void 13077 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13078 { 13079 ASSERT(un != NULL); 13080 ASSERT(bp != NULL); 13081 ASSERT(!mutex_owned(SD_MUTEX(un))); 13082 SD_NEXT_IOSTART(index, un, bp); 13083 } 13084 13085 13086 /* 13087 * Function: sd_checksum_iodone 13088 * 13089 * Description: A stub function for a layer that's currently not used. 13090 * For now just a placeholder. 13091 * 13092 * Context: May be called under interrupt context 13093 */ 13094 13095 static void 13096 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13097 { 13098 ASSERT(un != NULL); 13099 ASSERT(bp != NULL); 13100 ASSERT(!mutex_owned(SD_MUTEX(un))); 13101 SD_NEXT_IODONE(index, un, bp); 13102 } 13103 13104 13105 /* 13106 * Function: sd_checksum_uscsi_iostart 13107 * 13108 * Description: A stub function for a layer that's currently not used. 13109 * For now just a placeholder. 13110 * 13111 * Context: Kernel thread context 13112 */ 13113 13114 static void 13115 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13116 { 13117 ASSERT(un != NULL); 13118 ASSERT(bp != NULL); 13119 ASSERT(!mutex_owned(SD_MUTEX(un))); 13120 SD_NEXT_IOSTART(index, un, bp); 13121 } 13122 13123 13124 /* 13125 * Function: sd_checksum_uscsi_iodone 13126 * 13127 * Description: A stub function for a layer that's currently not used. 13128 * For now just a placeholder. 13129 * 13130 * Context: May be called under interrupt context 13131 */ 13132 13133 static void 13134 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13135 { 13136 ASSERT(un != NULL); 13137 ASSERT(bp != NULL); 13138 ASSERT(!mutex_owned(SD_MUTEX(un))); 13139 SD_NEXT_IODONE(index, un, bp); 13140 } 13141 13142 13143 /* 13144 * Function: sd_pm_iostart 13145 * 13146 * Description: iostart-side routine for Power mangement. 13147 * 13148 * Context: Kernel thread context 13149 */ 13150 13151 static void 13152 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13153 { 13154 ASSERT(un != NULL); 13155 ASSERT(bp != NULL); 13156 ASSERT(!mutex_owned(SD_MUTEX(un))); 13157 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13158 13159 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13160 13161 if (sd_pm_entry(un) != DDI_SUCCESS) { 13162 /* 13163 * Set up to return the failed buf back up the 'iodone' 13164 * side of the calling chain. 13165 */ 13166 bioerror(bp, EIO); 13167 bp->b_resid = bp->b_bcount; 13168 13169 SD_BEGIN_IODONE(index, un, bp); 13170 13171 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13172 return; 13173 } 13174 13175 SD_NEXT_IOSTART(index, un, bp); 13176 13177 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13178 } 13179 13180 13181 /* 13182 * Function: sd_pm_iodone 13183 * 13184 * Description: iodone-side routine for power mangement. 13185 * 13186 * Context: may be called from interrupt context 13187 */ 13188 13189 static void 13190 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13191 { 13192 ASSERT(un != NULL); 13193 ASSERT(bp != NULL); 13194 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13195 13196 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13197 13198 /* 13199 * After attach the following flag is only read, so don't 13200 * take the penalty of acquiring a mutex for it. 13201 */ 13202 if (un->un_f_pm_is_enabled == TRUE) { 13203 sd_pm_exit(un); 13204 } 13205 13206 SD_NEXT_IODONE(index, un, bp); 13207 13208 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13209 } 13210 13211 13212 /* 13213 * Function: sd_core_iostart 13214 * 13215 * Description: Primary driver function for enqueuing buf(9S) structs from 13216 * the system and initiating IO to the target device 13217 * 13218 * Context: Kernel thread context. Can sleep. 13219 * 13220 * Assumptions: - The given xp->xb_blkno is absolute 13221 * (ie, relative to the start of the device). 13222 * - The IO is to be done using the native blocksize of 13223 * the device, as specified in un->un_tgt_blocksize. 13224 */ 13225 /* ARGSUSED */ 13226 static void 13227 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13228 { 13229 struct sd_xbuf *xp; 13230 13231 ASSERT(un != NULL); 13232 ASSERT(bp != NULL); 13233 ASSERT(!mutex_owned(SD_MUTEX(un))); 13234 ASSERT(bp->b_resid == 0); 13235 13236 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13237 13238 xp = SD_GET_XBUF(bp); 13239 ASSERT(xp != NULL); 13240 13241 mutex_enter(SD_MUTEX(un)); 13242 13243 /* 13244 * If we are currently in the failfast state, fail any new IO 13245 * that has B_FAILFAST set, then return. 13246 */ 13247 if ((bp->b_flags & B_FAILFAST) && 13248 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13249 mutex_exit(SD_MUTEX(un)); 13250 bioerror(bp, EIO); 13251 bp->b_resid = bp->b_bcount; 13252 SD_BEGIN_IODONE(index, un, bp); 13253 return; 13254 } 13255 13256 if (SD_IS_DIRECT_PRIORITY(xp)) { 13257 /* 13258 * Priority command -- transport it immediately. 13259 * 13260 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13261 * because all direct priority commands should be associated 13262 * with error recovery actions which we don't want to retry. 13263 */ 13264 sd_start_cmds(un, bp); 13265 } else { 13266 /* 13267 * Normal command -- add it to the wait queue, then start 13268 * transporting commands from the wait queue. 13269 */ 13270 sd_add_buf_to_waitq(un, bp); 13271 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13272 sd_start_cmds(un, NULL); 13273 } 13274 13275 mutex_exit(SD_MUTEX(un)); 13276 13277 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13278 } 13279 13280 13281 /* 13282 * Function: sd_init_cdb_limits 13283 * 13284 * Description: This is to handle scsi_pkt initialization differences 13285 * between the driver platforms. 13286 * 13287 * Legacy behaviors: 13288 * 13289 * If the block number or the sector count exceeds the 13290 * capabilities of a Group 0 command, shift over to a 13291 * Group 1 command. We don't blindly use Group 1 13292 * commands because a) some drives (CDC Wren IVs) get a 13293 * bit confused, and b) there is probably a fair amount 13294 * of speed difference for a target to receive and decode 13295 * a 10 byte command instead of a 6 byte command. 13296 * 13297 * The xfer time difference of 6 vs 10 byte CDBs is 13298 * still significant so this code is still worthwhile. 13299 * 10 byte CDBs are very inefficient with the fas HBA driver 13300 * and older disks. Each CDB byte took 1 usec with some 13301 * popular disks. 13302 * 13303 * Context: Must be called at attach time 13304 */ 13305 13306 static void 13307 sd_init_cdb_limits(struct sd_lun *un) 13308 { 13309 int hba_cdb_limit; 13310 13311 /* 13312 * Use CDB_GROUP1 commands for most devices except for 13313 * parallel SCSI fixed drives in which case we get better 13314 * performance using CDB_GROUP0 commands (where applicable). 13315 */ 13316 un->un_mincdb = SD_CDB_GROUP1; 13317 #if !defined(__fibre) 13318 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13319 !un->un_f_has_removable_media) { 13320 un->un_mincdb = SD_CDB_GROUP0; 13321 } 13322 #endif 13323 13324 /* 13325 * Try to read the max-cdb-length supported by HBA. 13326 */ 13327 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13328 if (0 >= un->un_max_hba_cdb) { 13329 un->un_max_hba_cdb = CDB_GROUP4; 13330 hba_cdb_limit = SD_CDB_GROUP4; 13331 } else if (0 < un->un_max_hba_cdb && 13332 un->un_max_hba_cdb < CDB_GROUP1) { 13333 hba_cdb_limit = SD_CDB_GROUP0; 13334 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13335 un->un_max_hba_cdb < CDB_GROUP5) { 13336 hba_cdb_limit = SD_CDB_GROUP1; 13337 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13338 un->un_max_hba_cdb < CDB_GROUP4) { 13339 hba_cdb_limit = SD_CDB_GROUP5; 13340 } else { 13341 hba_cdb_limit = SD_CDB_GROUP4; 13342 } 13343 13344 /* 13345 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13346 * commands for fixed disks unless we are building for a 32 bit 13347 * kernel. 13348 */ 13349 #ifdef _LP64 13350 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13351 min(hba_cdb_limit, SD_CDB_GROUP4); 13352 #else 13353 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13354 min(hba_cdb_limit, SD_CDB_GROUP1); 13355 #endif 13356 13357 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13358 ? sizeof (struct scsi_arq_status) : 1); 13359 un->un_cmd_timeout = (ushort_t)sd_io_time; 13360 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13361 } 13362 13363 13364 /* 13365 * Function: sd_initpkt_for_buf 13366 * 13367 * Description: Allocate and initialize for transport a scsi_pkt struct, 13368 * based upon the info specified in the given buf struct. 13369 * 13370 * Assumes the xb_blkno in the request is absolute (ie, 13371 * relative to the start of the device (NOT partition!). 13372 * Also assumes that the request is using the native block 13373 * size of the device (as returned by the READ CAPACITY 13374 * command). 13375 * 13376 * Return Code: SD_PKT_ALLOC_SUCCESS 13377 * SD_PKT_ALLOC_FAILURE 13378 * SD_PKT_ALLOC_FAILURE_NO_DMA 13379 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13380 * 13381 * Context: Kernel thread and may be called from software interrupt context 13382 * as part of a sdrunout callback. This function may not block or 13383 * call routines that block 13384 */ 13385 13386 static int 13387 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13388 { 13389 struct sd_xbuf *xp; 13390 struct scsi_pkt *pktp = NULL; 13391 struct sd_lun *un; 13392 size_t blockcount; 13393 daddr_t startblock; 13394 int rval; 13395 int cmd_flags; 13396 13397 ASSERT(bp != NULL); 13398 ASSERT(pktpp != NULL); 13399 xp = SD_GET_XBUF(bp); 13400 ASSERT(xp != NULL); 13401 un = SD_GET_UN(bp); 13402 ASSERT(un != NULL); 13403 ASSERT(mutex_owned(SD_MUTEX(un))); 13404 ASSERT(bp->b_resid == 0); 13405 13406 SD_TRACE(SD_LOG_IO_CORE, un, 13407 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13408 13409 mutex_exit(SD_MUTEX(un)); 13410 13411 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13412 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13413 /* 13414 * Already have a scsi_pkt -- just need DMA resources. 13415 * We must recompute the CDB in case the mapping returns 13416 * a nonzero pkt_resid. 13417 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13418 * that is being retried, the unmap/remap of the DMA resouces 13419 * will result in the entire transfer starting over again 13420 * from the very first block. 13421 */ 13422 ASSERT(xp->xb_pktp != NULL); 13423 pktp = xp->xb_pktp; 13424 } else { 13425 pktp = NULL; 13426 } 13427 #endif /* __i386 || __amd64 */ 13428 13429 startblock = xp->xb_blkno; /* Absolute block num. */ 13430 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13431 13432 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13433 13434 /* 13435 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13436 * call scsi_init_pkt, and build the CDB. 13437 */ 13438 rval = sd_setup_rw_pkt(un, &pktp, bp, 13439 cmd_flags, sdrunout, (caddr_t)un, 13440 startblock, blockcount); 13441 13442 if (rval == 0) { 13443 /* 13444 * Success. 13445 * 13446 * If partial DMA is being used and required for this transfer. 13447 * set it up here. 13448 */ 13449 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13450 (pktp->pkt_resid != 0)) { 13451 13452 /* 13453 * Save the CDB length and pkt_resid for the 13454 * next xfer 13455 */ 13456 xp->xb_dma_resid = pktp->pkt_resid; 13457 13458 /* rezero resid */ 13459 pktp->pkt_resid = 0; 13460 13461 } else { 13462 xp->xb_dma_resid = 0; 13463 } 13464 13465 pktp->pkt_flags = un->un_tagflags; 13466 pktp->pkt_time = un->un_cmd_timeout; 13467 pktp->pkt_comp = sdintr; 13468 13469 pktp->pkt_private = bp; 13470 *pktpp = pktp; 13471 13472 SD_TRACE(SD_LOG_IO_CORE, un, 13473 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13474 13475 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13476 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13477 #endif 13478 13479 mutex_enter(SD_MUTEX(un)); 13480 return (SD_PKT_ALLOC_SUCCESS); 13481 13482 } 13483 13484 /* 13485 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13486 * from sd_setup_rw_pkt. 13487 */ 13488 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13489 13490 if (rval == SD_PKT_ALLOC_FAILURE) { 13491 *pktpp = NULL; 13492 /* 13493 * Set the driver state to RWAIT to indicate the driver 13494 * is waiting on resource allocations. The driver will not 13495 * suspend, pm_suspend, or detatch while the state is RWAIT. 13496 */ 13497 mutex_enter(SD_MUTEX(un)); 13498 New_state(un, SD_STATE_RWAIT); 13499 13500 SD_ERROR(SD_LOG_IO_CORE, un, 13501 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13502 13503 if ((bp->b_flags & B_ERROR) != 0) { 13504 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13505 } 13506 return (SD_PKT_ALLOC_FAILURE); 13507 } else { 13508 /* 13509 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13510 * 13511 * This should never happen. Maybe someone messed with the 13512 * kernel's minphys? 13513 */ 13514 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13515 "Request rejected: too large for CDB: " 13516 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13517 SD_ERROR(SD_LOG_IO_CORE, un, 13518 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13519 mutex_enter(SD_MUTEX(un)); 13520 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13521 13522 } 13523 } 13524 13525 13526 /* 13527 * Function: sd_destroypkt_for_buf 13528 * 13529 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13530 * 13531 * Context: Kernel thread or interrupt context 13532 */ 13533 13534 static void 13535 sd_destroypkt_for_buf(struct buf *bp) 13536 { 13537 ASSERT(bp != NULL); 13538 ASSERT(SD_GET_UN(bp) != NULL); 13539 13540 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13541 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13542 13543 ASSERT(SD_GET_PKTP(bp) != NULL); 13544 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13545 13546 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13547 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13548 } 13549 13550 /* 13551 * Function: sd_setup_rw_pkt 13552 * 13553 * Description: Determines appropriate CDB group for the requested LBA 13554 * and transfer length, calls scsi_init_pkt, and builds 13555 * the CDB. Do not use for partial DMA transfers except 13556 * for the initial transfer since the CDB size must 13557 * remain constant. 13558 * 13559 * Context: Kernel thread and may be called from software interrupt 13560 * context as part of a sdrunout callback. This function may not 13561 * block or call routines that block 13562 */ 13563 13564 13565 int 13566 sd_setup_rw_pkt(struct sd_lun *un, 13567 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13568 int (*callback)(caddr_t), caddr_t callback_arg, 13569 diskaddr_t lba, uint32_t blockcount) 13570 { 13571 struct scsi_pkt *return_pktp; 13572 union scsi_cdb *cdbp; 13573 struct sd_cdbinfo *cp = NULL; 13574 int i; 13575 13576 /* 13577 * See which size CDB to use, based upon the request. 13578 */ 13579 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13580 13581 /* 13582 * Check lba and block count against sd_cdbtab limits. 13583 * In the partial DMA case, we have to use the same size 13584 * CDB for all the transfers. Check lba + blockcount 13585 * against the max LBA so we know that segment of the 13586 * transfer can use the CDB we select. 13587 */ 13588 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13589 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13590 13591 /* 13592 * The command will fit into the CDB type 13593 * specified by sd_cdbtab[i]. 13594 */ 13595 cp = sd_cdbtab + i; 13596 13597 /* 13598 * Call scsi_init_pkt so we can fill in the 13599 * CDB. 13600 */ 13601 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13602 bp, cp->sc_grpcode, un->un_status_len, 0, 13603 flags, callback, callback_arg); 13604 13605 if (return_pktp != NULL) { 13606 13607 /* 13608 * Return new value of pkt 13609 */ 13610 *pktpp = return_pktp; 13611 13612 /* 13613 * To be safe, zero the CDB insuring there is 13614 * no leftover data from a previous command. 13615 */ 13616 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13617 13618 /* 13619 * Handle partial DMA mapping 13620 */ 13621 if (return_pktp->pkt_resid != 0) { 13622 13623 /* 13624 * Not going to xfer as many blocks as 13625 * originally expected 13626 */ 13627 blockcount -= 13628 SD_BYTES2TGTBLOCKS(un, 13629 return_pktp->pkt_resid); 13630 } 13631 13632 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13633 13634 /* 13635 * Set command byte based on the CDB 13636 * type we matched. 13637 */ 13638 cdbp->scc_cmd = cp->sc_grpmask | 13639 ((bp->b_flags & B_READ) ? 13640 SCMD_READ : SCMD_WRITE); 13641 13642 SD_FILL_SCSI1_LUN(un, return_pktp); 13643 13644 /* 13645 * Fill in LBA and length 13646 */ 13647 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13648 (cp->sc_grpcode == CDB_GROUP4) || 13649 (cp->sc_grpcode == CDB_GROUP0) || 13650 (cp->sc_grpcode == CDB_GROUP5)); 13651 13652 if (cp->sc_grpcode == CDB_GROUP1) { 13653 FORMG1ADDR(cdbp, lba); 13654 FORMG1COUNT(cdbp, blockcount); 13655 return (0); 13656 } else if (cp->sc_grpcode == CDB_GROUP4) { 13657 FORMG4LONGADDR(cdbp, lba); 13658 FORMG4COUNT(cdbp, blockcount); 13659 return (0); 13660 } else if (cp->sc_grpcode == CDB_GROUP0) { 13661 FORMG0ADDR(cdbp, lba); 13662 FORMG0COUNT(cdbp, blockcount); 13663 return (0); 13664 } else if (cp->sc_grpcode == CDB_GROUP5) { 13665 FORMG5ADDR(cdbp, lba); 13666 FORMG5COUNT(cdbp, blockcount); 13667 return (0); 13668 } 13669 13670 /* 13671 * It should be impossible to not match one 13672 * of the CDB types above, so we should never 13673 * reach this point. Set the CDB command byte 13674 * to test-unit-ready to avoid writing 13675 * to somewhere we don't intend. 13676 */ 13677 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13678 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13679 } else { 13680 /* 13681 * Couldn't get scsi_pkt 13682 */ 13683 return (SD_PKT_ALLOC_FAILURE); 13684 } 13685 } 13686 } 13687 13688 /* 13689 * None of the available CDB types were suitable. This really 13690 * should never happen: on a 64 bit system we support 13691 * READ16/WRITE16 which will hold an entire 64 bit disk address 13692 * and on a 32 bit system we will refuse to bind to a device 13693 * larger than 2TB so addresses will never be larger than 32 bits. 13694 */ 13695 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13696 } 13697 13698 /* 13699 * Function: sd_setup_next_rw_pkt 13700 * 13701 * Description: Setup packet for partial DMA transfers, except for the 13702 * initial transfer. sd_setup_rw_pkt should be used for 13703 * the initial transfer. 13704 * 13705 * Context: Kernel thread and may be called from interrupt context. 13706 */ 13707 13708 int 13709 sd_setup_next_rw_pkt(struct sd_lun *un, 13710 struct scsi_pkt *pktp, struct buf *bp, 13711 diskaddr_t lba, uint32_t blockcount) 13712 { 13713 uchar_t com; 13714 union scsi_cdb *cdbp; 13715 uchar_t cdb_group_id; 13716 13717 ASSERT(pktp != NULL); 13718 ASSERT(pktp->pkt_cdbp != NULL); 13719 13720 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13721 com = cdbp->scc_cmd; 13722 cdb_group_id = CDB_GROUPID(com); 13723 13724 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13725 (cdb_group_id == CDB_GROUPID_1) || 13726 (cdb_group_id == CDB_GROUPID_4) || 13727 (cdb_group_id == CDB_GROUPID_5)); 13728 13729 /* 13730 * Move pkt to the next portion of the xfer. 13731 * func is NULL_FUNC so we do not have to release 13732 * the disk mutex here. 13733 */ 13734 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13735 NULL_FUNC, NULL) == pktp) { 13736 /* Success. Handle partial DMA */ 13737 if (pktp->pkt_resid != 0) { 13738 blockcount -= 13739 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13740 } 13741 13742 cdbp->scc_cmd = com; 13743 SD_FILL_SCSI1_LUN(un, pktp); 13744 if (cdb_group_id == CDB_GROUPID_1) { 13745 FORMG1ADDR(cdbp, lba); 13746 FORMG1COUNT(cdbp, blockcount); 13747 return (0); 13748 } else if (cdb_group_id == CDB_GROUPID_4) { 13749 FORMG4LONGADDR(cdbp, lba); 13750 FORMG4COUNT(cdbp, blockcount); 13751 return (0); 13752 } else if (cdb_group_id == CDB_GROUPID_0) { 13753 FORMG0ADDR(cdbp, lba); 13754 FORMG0COUNT(cdbp, blockcount); 13755 return (0); 13756 } else if (cdb_group_id == CDB_GROUPID_5) { 13757 FORMG5ADDR(cdbp, lba); 13758 FORMG5COUNT(cdbp, blockcount); 13759 return (0); 13760 } 13761 13762 /* Unreachable */ 13763 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13764 } 13765 13766 /* 13767 * Error setting up next portion of cmd transfer. 13768 * Something is definitely very wrong and this 13769 * should not happen. 13770 */ 13771 return (SD_PKT_ALLOC_FAILURE); 13772 } 13773 13774 /* 13775 * Function: sd_initpkt_for_uscsi 13776 * 13777 * Description: Allocate and initialize for transport a scsi_pkt struct, 13778 * based upon the info specified in the given uscsi_cmd struct. 13779 * 13780 * Return Code: SD_PKT_ALLOC_SUCCESS 13781 * SD_PKT_ALLOC_FAILURE 13782 * SD_PKT_ALLOC_FAILURE_NO_DMA 13783 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13784 * 13785 * Context: Kernel thread and may be called from software interrupt context 13786 * as part of a sdrunout callback. This function may not block or 13787 * call routines that block 13788 */ 13789 13790 static int 13791 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13792 { 13793 struct uscsi_cmd *uscmd; 13794 struct sd_xbuf *xp; 13795 struct scsi_pkt *pktp; 13796 struct sd_lun *un; 13797 uint32_t flags = 0; 13798 13799 ASSERT(bp != NULL); 13800 ASSERT(pktpp != NULL); 13801 xp = SD_GET_XBUF(bp); 13802 ASSERT(xp != NULL); 13803 un = SD_GET_UN(bp); 13804 ASSERT(un != NULL); 13805 ASSERT(mutex_owned(SD_MUTEX(un))); 13806 13807 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13808 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13809 ASSERT(uscmd != NULL); 13810 13811 SD_TRACE(SD_LOG_IO_CORE, un, 13812 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13813 13814 /* 13815 * Allocate the scsi_pkt for the command. 13816 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13817 * during scsi_init_pkt time and will continue to use the 13818 * same path as long as the same scsi_pkt is used without 13819 * intervening scsi_dma_free(). Since uscsi command does 13820 * not call scsi_dmafree() before retry failed command, it 13821 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13822 * set such that scsi_vhci can use other available path for 13823 * retry. Besides, ucsci command does not allow DMA breakup, 13824 * so there is no need to set PKT_DMA_PARTIAL flag. 13825 */ 13826 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13827 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13828 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13829 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13830 - sizeof (struct scsi_extended_sense)), 0, 13831 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13832 sdrunout, (caddr_t)un); 13833 } else { 13834 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13835 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13836 sizeof (struct scsi_arq_status), 0, 13837 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13838 sdrunout, (caddr_t)un); 13839 } 13840 13841 if (pktp == NULL) { 13842 *pktpp = NULL; 13843 /* 13844 * Set the driver state to RWAIT to indicate the driver 13845 * is waiting on resource allocations. The driver will not 13846 * suspend, pm_suspend, or detatch while the state is RWAIT. 13847 */ 13848 New_state(un, SD_STATE_RWAIT); 13849 13850 SD_ERROR(SD_LOG_IO_CORE, un, 13851 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13852 13853 if ((bp->b_flags & B_ERROR) != 0) { 13854 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13855 } 13856 return (SD_PKT_ALLOC_FAILURE); 13857 } 13858 13859 /* 13860 * We do not do DMA breakup for USCSI commands, so return failure 13861 * here if all the needed DMA resources were not allocated. 13862 */ 13863 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13864 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13865 scsi_destroy_pkt(pktp); 13866 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13867 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13868 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13869 } 13870 13871 /* Init the cdb from the given uscsi struct */ 13872 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13873 uscmd->uscsi_cdb[0], 0, 0, 0); 13874 13875 SD_FILL_SCSI1_LUN(un, pktp); 13876 13877 /* 13878 * Set up the optional USCSI flags. See the uscsi (7I) man page 13879 * for listing of the supported flags. 13880 */ 13881 13882 if (uscmd->uscsi_flags & USCSI_SILENT) { 13883 flags |= FLAG_SILENT; 13884 } 13885 13886 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13887 flags |= FLAG_DIAGNOSE; 13888 } 13889 13890 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13891 flags |= FLAG_ISOLATE; 13892 } 13893 13894 if (un->un_f_is_fibre == FALSE) { 13895 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13896 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13897 } 13898 } 13899 13900 /* 13901 * Set the pkt flags here so we save time later. 13902 * Note: These flags are NOT in the uscsi man page!!! 13903 */ 13904 if (uscmd->uscsi_flags & USCSI_HEAD) { 13905 flags |= FLAG_HEAD; 13906 } 13907 13908 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13909 flags |= FLAG_NOINTR; 13910 } 13911 13912 /* 13913 * For tagged queueing, things get a bit complicated. 13914 * Check first for head of queue and last for ordered queue. 13915 * If neither head nor order, use the default driver tag flags. 13916 */ 13917 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13918 if (uscmd->uscsi_flags & USCSI_HTAG) { 13919 flags |= FLAG_HTAG; 13920 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13921 flags |= FLAG_OTAG; 13922 } else { 13923 flags |= un->un_tagflags & FLAG_TAGMASK; 13924 } 13925 } 13926 13927 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13928 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13929 } 13930 13931 pktp->pkt_flags = flags; 13932 13933 /* Transfer uscsi information to scsi_pkt */ 13934 (void) scsi_uscsi_pktinit(uscmd, pktp); 13935 13936 /* Copy the caller's CDB into the pkt... */ 13937 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13938 13939 if (uscmd->uscsi_timeout == 0) { 13940 pktp->pkt_time = un->un_uscsi_timeout; 13941 } else { 13942 pktp->pkt_time = uscmd->uscsi_timeout; 13943 } 13944 13945 /* need it later to identify USCSI request in sdintr */ 13946 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13947 13948 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13949 13950 pktp->pkt_private = bp; 13951 pktp->pkt_comp = sdintr; 13952 *pktpp = pktp; 13953 13954 SD_TRACE(SD_LOG_IO_CORE, un, 13955 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13956 13957 return (SD_PKT_ALLOC_SUCCESS); 13958 } 13959 13960 13961 /* 13962 * Function: sd_destroypkt_for_uscsi 13963 * 13964 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13965 * IOs.. Also saves relevant info into the associated uscsi_cmd 13966 * struct. 13967 * 13968 * Context: May be called under interrupt context 13969 */ 13970 13971 static void 13972 sd_destroypkt_for_uscsi(struct buf *bp) 13973 { 13974 struct uscsi_cmd *uscmd; 13975 struct sd_xbuf *xp; 13976 struct scsi_pkt *pktp; 13977 struct sd_lun *un; 13978 struct sd_uscsi_info *suip; 13979 13980 ASSERT(bp != NULL); 13981 xp = SD_GET_XBUF(bp); 13982 ASSERT(xp != NULL); 13983 un = SD_GET_UN(bp); 13984 ASSERT(un != NULL); 13985 ASSERT(!mutex_owned(SD_MUTEX(un))); 13986 pktp = SD_GET_PKTP(bp); 13987 ASSERT(pktp != NULL); 13988 13989 SD_TRACE(SD_LOG_IO_CORE, un, 13990 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13991 13992 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13993 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13994 ASSERT(uscmd != NULL); 13995 13996 /* Save the status and the residual into the uscsi_cmd struct */ 13997 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13998 uscmd->uscsi_resid = bp->b_resid; 13999 14000 /* Transfer scsi_pkt information to uscsi */ 14001 (void) scsi_uscsi_pktfini(pktp, uscmd); 14002 14003 /* 14004 * If enabled, copy any saved sense data into the area specified 14005 * by the uscsi command. 14006 */ 14007 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14008 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14009 /* 14010 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14011 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14012 */ 14013 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14014 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14015 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14016 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14017 MAX_SENSE_LENGTH); 14018 } else { 14019 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14020 SENSE_LENGTH); 14021 } 14022 } 14023 /* 14024 * The following assignments are for SCSI FMA. 14025 */ 14026 ASSERT(xp->xb_private != NULL); 14027 suip = (struct sd_uscsi_info *)xp->xb_private; 14028 suip->ui_pkt_reason = pktp->pkt_reason; 14029 suip->ui_pkt_state = pktp->pkt_state; 14030 suip->ui_pkt_statistics = pktp->pkt_statistics; 14031 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14032 14033 /* We are done with the scsi_pkt; free it now */ 14034 ASSERT(SD_GET_PKTP(bp) != NULL); 14035 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14036 14037 SD_TRACE(SD_LOG_IO_CORE, un, 14038 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14039 } 14040 14041 14042 /* 14043 * Function: sd_bioclone_alloc 14044 * 14045 * Description: Allocate a buf(9S) and init it as per the given buf 14046 * and the various arguments. The associated sd_xbuf 14047 * struct is (nearly) duplicated. The struct buf *bp 14048 * argument is saved in new_xp->xb_private. 14049 * 14050 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14051 * datalen - size of data area for the shadow bp 14052 * blkno - starting LBA 14053 * func - function pointer for b_iodone in the shadow buf. (May 14054 * be NULL if none.) 14055 * 14056 * Return Code: Pointer to allocates buf(9S) struct 14057 * 14058 * Context: Can sleep. 14059 */ 14060 14061 static struct buf * 14062 sd_bioclone_alloc(struct buf *bp, size_t datalen, 14063 daddr_t blkno, int (*func)(struct buf *)) 14064 { 14065 struct sd_lun *un; 14066 struct sd_xbuf *xp; 14067 struct sd_xbuf *new_xp; 14068 struct buf *new_bp; 14069 14070 ASSERT(bp != NULL); 14071 xp = SD_GET_XBUF(bp); 14072 ASSERT(xp != NULL); 14073 un = SD_GET_UN(bp); 14074 ASSERT(un != NULL); 14075 ASSERT(!mutex_owned(SD_MUTEX(un))); 14076 14077 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14078 NULL, KM_SLEEP); 14079 14080 new_bp->b_lblkno = blkno; 14081 14082 /* 14083 * Allocate an xbuf for the shadow bp and copy the contents of the 14084 * original xbuf into it. 14085 */ 14086 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14087 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14088 14089 /* 14090 * The given bp is automatically saved in the xb_private member 14091 * of the new xbuf. Callers are allowed to depend on this. 14092 */ 14093 new_xp->xb_private = bp; 14094 14095 new_bp->b_private = new_xp; 14096 14097 return (new_bp); 14098 } 14099 14100 /* 14101 * Function: sd_shadow_buf_alloc 14102 * 14103 * Description: Allocate a buf(9S) and init it as per the given buf 14104 * and the various arguments. The associated sd_xbuf 14105 * struct is (nearly) duplicated. The struct buf *bp 14106 * argument is saved in new_xp->xb_private. 14107 * 14108 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14109 * datalen - size of data area for the shadow bp 14110 * bflags - B_READ or B_WRITE (pseudo flag) 14111 * blkno - starting LBA 14112 * func - function pointer for b_iodone in the shadow buf. (May 14113 * be NULL if none.) 14114 * 14115 * Return Code: Pointer to allocates buf(9S) struct 14116 * 14117 * Context: Can sleep. 14118 */ 14119 14120 static struct buf * 14121 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14122 daddr_t blkno, int (*func)(struct buf *)) 14123 { 14124 struct sd_lun *un; 14125 struct sd_xbuf *xp; 14126 struct sd_xbuf *new_xp; 14127 struct buf *new_bp; 14128 14129 ASSERT(bp != NULL); 14130 xp = SD_GET_XBUF(bp); 14131 ASSERT(xp != NULL); 14132 un = SD_GET_UN(bp); 14133 ASSERT(un != NULL); 14134 ASSERT(!mutex_owned(SD_MUTEX(un))); 14135 14136 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14137 bp_mapin(bp); 14138 } 14139 14140 bflags &= (B_READ | B_WRITE); 14141 #if defined(__i386) || defined(__amd64) 14142 new_bp = getrbuf(KM_SLEEP); 14143 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14144 new_bp->b_bcount = datalen; 14145 new_bp->b_flags = bflags | 14146 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14147 #else 14148 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14149 datalen, bflags, SLEEP_FUNC, NULL); 14150 #endif 14151 new_bp->av_forw = NULL; 14152 new_bp->av_back = NULL; 14153 new_bp->b_dev = bp->b_dev; 14154 new_bp->b_blkno = blkno; 14155 new_bp->b_iodone = func; 14156 new_bp->b_edev = bp->b_edev; 14157 new_bp->b_resid = 0; 14158 14159 /* We need to preserve the B_FAILFAST flag */ 14160 if (bp->b_flags & B_FAILFAST) { 14161 new_bp->b_flags |= B_FAILFAST; 14162 } 14163 14164 /* 14165 * Allocate an xbuf for the shadow bp and copy the contents of the 14166 * original xbuf into it. 14167 */ 14168 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14169 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14170 14171 /* Need later to copy data between the shadow buf & original buf! */ 14172 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14173 14174 /* 14175 * The given bp is automatically saved in the xb_private member 14176 * of the new xbuf. Callers are allowed to depend on this. 14177 */ 14178 new_xp->xb_private = bp; 14179 14180 new_bp->b_private = new_xp; 14181 14182 return (new_bp); 14183 } 14184 14185 /* 14186 * Function: sd_bioclone_free 14187 * 14188 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14189 * in the larger than partition operation. 14190 * 14191 * Context: May be called under interrupt context 14192 */ 14193 14194 static void 14195 sd_bioclone_free(struct buf *bp) 14196 { 14197 struct sd_xbuf *xp; 14198 14199 ASSERT(bp != NULL); 14200 xp = SD_GET_XBUF(bp); 14201 ASSERT(xp != NULL); 14202 14203 /* 14204 * Call bp_mapout() before freeing the buf, in case a lower 14205 * layer or HBA had done a bp_mapin(). we must do this here 14206 * as we are the "originator" of the shadow buf. 14207 */ 14208 bp_mapout(bp); 14209 14210 /* 14211 * Null out b_iodone before freeing the bp, to ensure that the driver 14212 * never gets confused by a stale value in this field. (Just a little 14213 * extra defensiveness here.) 14214 */ 14215 bp->b_iodone = NULL; 14216 14217 freerbuf(bp); 14218 14219 kmem_free(xp, sizeof (struct sd_xbuf)); 14220 } 14221 14222 /* 14223 * Function: sd_shadow_buf_free 14224 * 14225 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14226 * 14227 * Context: May be called under interrupt context 14228 */ 14229 14230 static void 14231 sd_shadow_buf_free(struct buf *bp) 14232 { 14233 struct sd_xbuf *xp; 14234 14235 ASSERT(bp != NULL); 14236 xp = SD_GET_XBUF(bp); 14237 ASSERT(xp != NULL); 14238 14239 #if defined(__sparc) 14240 /* 14241 * Call bp_mapout() before freeing the buf, in case a lower 14242 * layer or HBA had done a bp_mapin(). we must do this here 14243 * as we are the "originator" of the shadow buf. 14244 */ 14245 bp_mapout(bp); 14246 #endif 14247 14248 /* 14249 * Null out b_iodone before freeing the bp, to ensure that the driver 14250 * never gets confused by a stale value in this field. (Just a little 14251 * extra defensiveness here.) 14252 */ 14253 bp->b_iodone = NULL; 14254 14255 #if defined(__i386) || defined(__amd64) 14256 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14257 freerbuf(bp); 14258 #else 14259 scsi_free_consistent_buf(bp); 14260 #endif 14261 14262 kmem_free(xp, sizeof (struct sd_xbuf)); 14263 } 14264 14265 14266 /* 14267 * Function: sd_print_transport_rejected_message 14268 * 14269 * Description: This implements the ludicrously complex rules for printing 14270 * a "transport rejected" message. This is to address the 14271 * specific problem of having a flood of this error message 14272 * produced when a failover occurs. 14273 * 14274 * Context: Any. 14275 */ 14276 14277 static void 14278 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14279 int code) 14280 { 14281 ASSERT(un != NULL); 14282 ASSERT(mutex_owned(SD_MUTEX(un))); 14283 ASSERT(xp != NULL); 14284 14285 /* 14286 * Print the "transport rejected" message under the following 14287 * conditions: 14288 * 14289 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14290 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14291 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14292 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14293 * scsi_transport(9F) (which indicates that the target might have 14294 * gone off-line). This uses the un->un_tran_fatal_count 14295 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14296 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14297 * from scsi_transport(). 14298 * 14299 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14300 * the preceeding cases in order for the message to be printed. 14301 */ 14302 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14303 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14304 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14305 (code != TRAN_FATAL_ERROR) || 14306 (un->un_tran_fatal_count == 1)) { 14307 switch (code) { 14308 case TRAN_BADPKT: 14309 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14310 "transport rejected bad packet\n"); 14311 break; 14312 case TRAN_FATAL_ERROR: 14313 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14314 "transport rejected fatal error\n"); 14315 break; 14316 default: 14317 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14318 "transport rejected (%d)\n", code); 14319 break; 14320 } 14321 } 14322 } 14323 } 14324 14325 14326 /* 14327 * Function: sd_add_buf_to_waitq 14328 * 14329 * Description: Add the given buf(9S) struct to the wait queue for the 14330 * instance. If sorting is enabled, then the buf is added 14331 * to the queue via an elevator sort algorithm (a la 14332 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14333 * If sorting is not enabled, then the buf is just added 14334 * to the end of the wait queue. 14335 * 14336 * Return Code: void 14337 * 14338 * Context: Does not sleep/block, therefore technically can be called 14339 * from any context. However if sorting is enabled then the 14340 * execution time is indeterminate, and may take long if 14341 * the wait queue grows large. 14342 */ 14343 14344 static void 14345 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14346 { 14347 struct buf *ap; 14348 14349 ASSERT(bp != NULL); 14350 ASSERT(un != NULL); 14351 ASSERT(mutex_owned(SD_MUTEX(un))); 14352 14353 /* If the queue is empty, add the buf as the only entry & return. */ 14354 if (un->un_waitq_headp == NULL) { 14355 ASSERT(un->un_waitq_tailp == NULL); 14356 un->un_waitq_headp = un->un_waitq_tailp = bp; 14357 bp->av_forw = NULL; 14358 return; 14359 } 14360 14361 ASSERT(un->un_waitq_tailp != NULL); 14362 14363 /* 14364 * If sorting is disabled, just add the buf to the tail end of 14365 * the wait queue and return. 14366 */ 14367 if (un->un_f_disksort_disabled) { 14368 un->un_waitq_tailp->av_forw = bp; 14369 un->un_waitq_tailp = bp; 14370 bp->av_forw = NULL; 14371 return; 14372 } 14373 14374 /* 14375 * Sort thru the list of requests currently on the wait queue 14376 * and add the new buf request at the appropriate position. 14377 * 14378 * The un->un_waitq_headp is an activity chain pointer on which 14379 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14380 * first queue holds those requests which are positioned after 14381 * the current SD_GET_BLKNO() (in the first request); the second holds 14382 * requests which came in after their SD_GET_BLKNO() number was passed. 14383 * Thus we implement a one way scan, retracting after reaching 14384 * the end of the drive to the first request on the second 14385 * queue, at which time it becomes the first queue. 14386 * A one-way scan is natural because of the way UNIX read-ahead 14387 * blocks are allocated. 14388 * 14389 * If we lie after the first request, then we must locate the 14390 * second request list and add ourselves to it. 14391 */ 14392 ap = un->un_waitq_headp; 14393 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14394 while (ap->av_forw != NULL) { 14395 /* 14396 * Look for an "inversion" in the (normally 14397 * ascending) block numbers. This indicates 14398 * the start of the second request list. 14399 */ 14400 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14401 /* 14402 * Search the second request list for the 14403 * first request at a larger block number. 14404 * We go before that; however if there is 14405 * no such request, we go at the end. 14406 */ 14407 do { 14408 if (SD_GET_BLKNO(bp) < 14409 SD_GET_BLKNO(ap->av_forw)) { 14410 goto insert; 14411 } 14412 ap = ap->av_forw; 14413 } while (ap->av_forw != NULL); 14414 goto insert; /* after last */ 14415 } 14416 ap = ap->av_forw; 14417 } 14418 14419 /* 14420 * No inversions... we will go after the last, and 14421 * be the first request in the second request list. 14422 */ 14423 goto insert; 14424 } 14425 14426 /* 14427 * Request is at/after the current request... 14428 * sort in the first request list. 14429 */ 14430 while (ap->av_forw != NULL) { 14431 /* 14432 * We want to go after the current request (1) if 14433 * there is an inversion after it (i.e. it is the end 14434 * of the first request list), or (2) if the next 14435 * request is a larger block no. than our request. 14436 */ 14437 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14438 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14439 goto insert; 14440 } 14441 ap = ap->av_forw; 14442 } 14443 14444 /* 14445 * Neither a second list nor a larger request, therefore 14446 * we go at the end of the first list (which is the same 14447 * as the end of the whole schebang). 14448 */ 14449 insert: 14450 bp->av_forw = ap->av_forw; 14451 ap->av_forw = bp; 14452 14453 /* 14454 * If we inserted onto the tail end of the waitq, make sure the 14455 * tail pointer is updated. 14456 */ 14457 if (ap == un->un_waitq_tailp) { 14458 un->un_waitq_tailp = bp; 14459 } 14460 } 14461 14462 14463 /* 14464 * Function: sd_start_cmds 14465 * 14466 * Description: Remove and transport cmds from the driver queues. 14467 * 14468 * Arguments: un - pointer to the unit (soft state) struct for the target. 14469 * 14470 * immed_bp - ptr to a buf to be transported immediately. Only 14471 * the immed_bp is transported; bufs on the waitq are not 14472 * processed and the un_retry_bp is not checked. If immed_bp is 14473 * NULL, then normal queue processing is performed. 14474 * 14475 * Context: May be called from kernel thread context, interrupt context, 14476 * or runout callback context. This function may not block or 14477 * call routines that block. 14478 */ 14479 14480 static void 14481 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14482 { 14483 struct sd_xbuf *xp; 14484 struct buf *bp; 14485 void (*statp)(kstat_io_t *); 14486 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14487 void (*saved_statp)(kstat_io_t *); 14488 #endif 14489 int rval; 14490 struct sd_fm_internal *sfip = NULL; 14491 14492 ASSERT(un != NULL); 14493 ASSERT(mutex_owned(SD_MUTEX(un))); 14494 ASSERT(un->un_ncmds_in_transport >= 0); 14495 ASSERT(un->un_throttle >= 0); 14496 14497 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14498 14499 do { 14500 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14501 saved_statp = NULL; 14502 #endif 14503 14504 /* 14505 * If we are syncing or dumping, fail the command to 14506 * avoid recursively calling back into scsi_transport(). 14507 * The dump I/O itself uses a separate code path so this 14508 * only prevents non-dump I/O from being sent while dumping. 14509 * File system sync takes place before dumping begins. 14510 * During panic, filesystem I/O is allowed provided 14511 * un_in_callback is <= 1. This is to prevent recursion 14512 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14513 * sd_start_cmds and so on. See panic.c for more information 14514 * about the states the system can be in during panic. 14515 */ 14516 if ((un->un_state == SD_STATE_DUMPING) || 14517 (ddi_in_panic() && (un->un_in_callback > 1))) { 14518 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14519 "sd_start_cmds: panicking\n"); 14520 goto exit; 14521 } 14522 14523 if ((bp = immed_bp) != NULL) { 14524 /* 14525 * We have a bp that must be transported immediately. 14526 * It's OK to transport the immed_bp here without doing 14527 * the throttle limit check because the immed_bp is 14528 * always used in a retry/recovery case. This means 14529 * that we know we are not at the throttle limit by 14530 * virtue of the fact that to get here we must have 14531 * already gotten a command back via sdintr(). This also 14532 * relies on (1) the command on un_retry_bp preventing 14533 * further commands from the waitq from being issued; 14534 * and (2) the code in sd_retry_command checking the 14535 * throttle limit before issuing a delayed or immediate 14536 * retry. This holds even if the throttle limit is 14537 * currently ratcheted down from its maximum value. 14538 */ 14539 statp = kstat_runq_enter; 14540 if (bp == un->un_retry_bp) { 14541 ASSERT((un->un_retry_statp == NULL) || 14542 (un->un_retry_statp == kstat_waitq_enter) || 14543 (un->un_retry_statp == 14544 kstat_runq_back_to_waitq)); 14545 /* 14546 * If the waitq kstat was incremented when 14547 * sd_set_retry_bp() queued this bp for a retry, 14548 * then we must set up statp so that the waitq 14549 * count will get decremented correctly below. 14550 * Also we must clear un->un_retry_statp to 14551 * ensure that we do not act on a stale value 14552 * in this field. 14553 */ 14554 if ((un->un_retry_statp == kstat_waitq_enter) || 14555 (un->un_retry_statp == 14556 kstat_runq_back_to_waitq)) { 14557 statp = kstat_waitq_to_runq; 14558 } 14559 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14560 saved_statp = un->un_retry_statp; 14561 #endif 14562 un->un_retry_statp = NULL; 14563 14564 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14565 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14566 "un_throttle:%d un_ncmds_in_transport:%d\n", 14567 un, un->un_retry_bp, un->un_throttle, 14568 un->un_ncmds_in_transport); 14569 } else { 14570 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14571 "processing priority bp:0x%p\n", bp); 14572 } 14573 14574 } else if ((bp = un->un_waitq_headp) != NULL) { 14575 /* 14576 * A command on the waitq is ready to go, but do not 14577 * send it if: 14578 * 14579 * (1) the throttle limit has been reached, or 14580 * (2) a retry is pending, or 14581 * (3) a START_STOP_UNIT callback pending, or 14582 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14583 * command is pending. 14584 * 14585 * For all of these conditions, IO processing will 14586 * restart after the condition is cleared. 14587 */ 14588 if (un->un_ncmds_in_transport >= un->un_throttle) { 14589 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14590 "sd_start_cmds: exiting, " 14591 "throttle limit reached!\n"); 14592 goto exit; 14593 } 14594 if (un->un_retry_bp != NULL) { 14595 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14596 "sd_start_cmds: exiting, retry pending!\n"); 14597 goto exit; 14598 } 14599 if (un->un_startstop_timeid != NULL) { 14600 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14601 "sd_start_cmds: exiting, " 14602 "START_STOP pending!\n"); 14603 goto exit; 14604 } 14605 if (un->un_direct_priority_timeid != NULL) { 14606 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14607 "sd_start_cmds: exiting, " 14608 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14609 goto exit; 14610 } 14611 14612 /* Dequeue the command */ 14613 un->un_waitq_headp = bp->av_forw; 14614 if (un->un_waitq_headp == NULL) { 14615 un->un_waitq_tailp = NULL; 14616 } 14617 bp->av_forw = NULL; 14618 statp = kstat_waitq_to_runq; 14619 SD_TRACE(SD_LOG_IO_CORE, un, 14620 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14621 14622 } else { 14623 /* No work to do so bail out now */ 14624 SD_TRACE(SD_LOG_IO_CORE, un, 14625 "sd_start_cmds: no more work, exiting!\n"); 14626 goto exit; 14627 } 14628 14629 /* 14630 * Reset the state to normal. This is the mechanism by which 14631 * the state transitions from either SD_STATE_RWAIT or 14632 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14633 * If state is SD_STATE_PM_CHANGING then this command is 14634 * part of the device power control and the state must 14635 * not be put back to normal. Doing so would would 14636 * allow new commands to proceed when they shouldn't, 14637 * the device may be going off. 14638 */ 14639 if ((un->un_state != SD_STATE_SUSPENDED) && 14640 (un->un_state != SD_STATE_PM_CHANGING)) { 14641 New_state(un, SD_STATE_NORMAL); 14642 } 14643 14644 xp = SD_GET_XBUF(bp); 14645 ASSERT(xp != NULL); 14646 14647 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14648 /* 14649 * Allocate the scsi_pkt if we need one, or attach DMA 14650 * resources if we have a scsi_pkt that needs them. The 14651 * latter should only occur for commands that are being 14652 * retried. 14653 */ 14654 if ((xp->xb_pktp == NULL) || 14655 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14656 #else 14657 if (xp->xb_pktp == NULL) { 14658 #endif 14659 /* 14660 * There is no scsi_pkt allocated for this buf. Call 14661 * the initpkt function to allocate & init one. 14662 * 14663 * The scsi_init_pkt runout callback functionality is 14664 * implemented as follows: 14665 * 14666 * 1) The initpkt function always calls 14667 * scsi_init_pkt(9F) with sdrunout specified as the 14668 * callback routine. 14669 * 2) A successful packet allocation is initialized and 14670 * the I/O is transported. 14671 * 3) The I/O associated with an allocation resource 14672 * failure is left on its queue to be retried via 14673 * runout or the next I/O. 14674 * 4) The I/O associated with a DMA error is removed 14675 * from the queue and failed with EIO. Processing of 14676 * the transport queues is also halted to be 14677 * restarted via runout or the next I/O. 14678 * 5) The I/O associated with a CDB size or packet 14679 * size error is removed from the queue and failed 14680 * with EIO. Processing of the transport queues is 14681 * continued. 14682 * 14683 * Note: there is no interface for canceling a runout 14684 * callback. To prevent the driver from detaching or 14685 * suspending while a runout is pending the driver 14686 * state is set to SD_STATE_RWAIT 14687 * 14688 * Note: using the scsi_init_pkt callback facility can 14689 * result in an I/O request persisting at the head of 14690 * the list which cannot be satisfied even after 14691 * multiple retries. In the future the driver may 14692 * implement some kind of maximum runout count before 14693 * failing an I/O. 14694 * 14695 * Note: the use of funcp below may seem superfluous, 14696 * but it helps warlock figure out the correct 14697 * initpkt function calls (see [s]sd.wlcmd). 14698 */ 14699 struct scsi_pkt *pktp; 14700 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14701 14702 ASSERT(bp != un->un_rqs_bp); 14703 14704 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14705 switch ((*funcp)(bp, &pktp)) { 14706 case SD_PKT_ALLOC_SUCCESS: 14707 xp->xb_pktp = pktp; 14708 SD_TRACE(SD_LOG_IO_CORE, un, 14709 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14710 pktp); 14711 goto got_pkt; 14712 14713 case SD_PKT_ALLOC_FAILURE: 14714 /* 14715 * Temporary (hopefully) resource depletion. 14716 * Since retries and RQS commands always have a 14717 * scsi_pkt allocated, these cases should never 14718 * get here. So the only cases this needs to 14719 * handle is a bp from the waitq (which we put 14720 * back onto the waitq for sdrunout), or a bp 14721 * sent as an immed_bp (which we just fail). 14722 */ 14723 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14724 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14725 14726 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14727 14728 if (bp == immed_bp) { 14729 /* 14730 * If SD_XB_DMA_FREED is clear, then 14731 * this is a failure to allocate a 14732 * scsi_pkt, and we must fail the 14733 * command. 14734 */ 14735 if ((xp->xb_pkt_flags & 14736 SD_XB_DMA_FREED) == 0) { 14737 break; 14738 } 14739 14740 /* 14741 * If this immediate command is NOT our 14742 * un_retry_bp, then we must fail it. 14743 */ 14744 if (bp != un->un_retry_bp) { 14745 break; 14746 } 14747 14748 /* 14749 * We get here if this cmd is our 14750 * un_retry_bp that was DMAFREED, but 14751 * scsi_init_pkt() failed to reallocate 14752 * DMA resources when we attempted to 14753 * retry it. This can happen when an 14754 * mpxio failover is in progress, but 14755 * we don't want to just fail the 14756 * command in this case. 14757 * 14758 * Use timeout(9F) to restart it after 14759 * a 100ms delay. We don't want to 14760 * let sdrunout() restart it, because 14761 * sdrunout() is just supposed to start 14762 * commands that are sitting on the 14763 * wait queue. The un_retry_bp stays 14764 * set until the command completes, but 14765 * sdrunout can be called many times 14766 * before that happens. Since sdrunout 14767 * cannot tell if the un_retry_bp is 14768 * already in the transport, it could 14769 * end up calling scsi_transport() for 14770 * the un_retry_bp multiple times. 14771 * 14772 * Also: don't schedule the callback 14773 * if some other callback is already 14774 * pending. 14775 */ 14776 if (un->un_retry_statp == NULL) { 14777 /* 14778 * restore the kstat pointer to 14779 * keep kstat counts coherent 14780 * when we do retry the command. 14781 */ 14782 un->un_retry_statp = 14783 saved_statp; 14784 } 14785 14786 if ((un->un_startstop_timeid == NULL) && 14787 (un->un_retry_timeid == NULL) && 14788 (un->un_direct_priority_timeid == 14789 NULL)) { 14790 14791 un->un_retry_timeid = 14792 timeout( 14793 sd_start_retry_command, 14794 un, SD_RESTART_TIMEOUT); 14795 } 14796 goto exit; 14797 } 14798 14799 #else 14800 if (bp == immed_bp) { 14801 break; /* Just fail the command */ 14802 } 14803 #endif 14804 14805 /* Add the buf back to the head of the waitq */ 14806 bp->av_forw = un->un_waitq_headp; 14807 un->un_waitq_headp = bp; 14808 if (un->un_waitq_tailp == NULL) { 14809 un->un_waitq_tailp = bp; 14810 } 14811 goto exit; 14812 14813 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14814 /* 14815 * HBA DMA resource failure. Fail the command 14816 * and continue processing of the queues. 14817 */ 14818 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14819 "sd_start_cmds: " 14820 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14821 break; 14822 14823 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14824 /* 14825 * Note:x86: Partial DMA mapping not supported 14826 * for USCSI commands, and all the needed DMA 14827 * resources were not allocated. 14828 */ 14829 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14830 "sd_start_cmds: " 14831 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14832 break; 14833 14834 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14835 /* 14836 * Note:x86: Request cannot fit into CDB based 14837 * on lba and len. 14838 */ 14839 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14840 "sd_start_cmds: " 14841 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14842 break; 14843 14844 default: 14845 /* Should NEVER get here! */ 14846 panic("scsi_initpkt error"); 14847 /*NOTREACHED*/ 14848 } 14849 14850 /* 14851 * Fatal error in allocating a scsi_pkt for this buf. 14852 * Update kstats & return the buf with an error code. 14853 * We must use sd_return_failed_command_no_restart() to 14854 * avoid a recursive call back into sd_start_cmds(). 14855 * However this also means that we must keep processing 14856 * the waitq here in order to avoid stalling. 14857 */ 14858 if (statp == kstat_waitq_to_runq) { 14859 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14860 } 14861 sd_return_failed_command_no_restart(un, bp, EIO); 14862 if (bp == immed_bp) { 14863 /* immed_bp is gone by now, so clear this */ 14864 immed_bp = NULL; 14865 } 14866 continue; 14867 } 14868 got_pkt: 14869 if (bp == immed_bp) { 14870 /* goto the head of the class.... */ 14871 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14872 } 14873 14874 un->un_ncmds_in_transport++; 14875 SD_UPDATE_KSTATS(un, statp, bp); 14876 14877 /* 14878 * Call scsi_transport() to send the command to the target. 14879 * According to SCSA architecture, we must drop the mutex here 14880 * before calling scsi_transport() in order to avoid deadlock. 14881 * Note that the scsi_pkt's completion routine can be executed 14882 * (from interrupt context) even before the call to 14883 * scsi_transport() returns. 14884 */ 14885 SD_TRACE(SD_LOG_IO_CORE, un, 14886 "sd_start_cmds: calling scsi_transport()\n"); 14887 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14888 14889 mutex_exit(SD_MUTEX(un)); 14890 rval = scsi_transport(xp->xb_pktp); 14891 mutex_enter(SD_MUTEX(un)); 14892 14893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14894 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14895 14896 switch (rval) { 14897 case TRAN_ACCEPT: 14898 /* Clear this with every pkt accepted by the HBA */ 14899 un->un_tran_fatal_count = 0; 14900 break; /* Success; try the next cmd (if any) */ 14901 14902 case TRAN_BUSY: 14903 un->un_ncmds_in_transport--; 14904 ASSERT(un->un_ncmds_in_transport >= 0); 14905 14906 /* 14907 * Don't retry request sense, the sense data 14908 * is lost when another request is sent. 14909 * Free up the rqs buf and retry 14910 * the original failed cmd. Update kstat. 14911 */ 14912 if (bp == un->un_rqs_bp) { 14913 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14914 bp = sd_mark_rqs_idle(un, xp); 14915 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14916 NULL, NULL, EIO, un->un_busy_timeout / 500, 14917 kstat_waitq_enter); 14918 goto exit; 14919 } 14920 14921 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14922 /* 14923 * Free the DMA resources for the scsi_pkt. This will 14924 * allow mpxio to select another path the next time 14925 * we call scsi_transport() with this scsi_pkt. 14926 * See sdintr() for the rationalization behind this. 14927 */ 14928 if ((un->un_f_is_fibre == TRUE) && 14929 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14930 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14931 scsi_dmafree(xp->xb_pktp); 14932 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14933 } 14934 #endif 14935 14936 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14937 /* 14938 * Commands that are SD_PATH_DIRECT_PRIORITY 14939 * are for error recovery situations. These do 14940 * not use the normal command waitq, so if they 14941 * get a TRAN_BUSY we cannot put them back onto 14942 * the waitq for later retry. One possible 14943 * problem is that there could already be some 14944 * other command on un_retry_bp that is waiting 14945 * for this one to complete, so we would be 14946 * deadlocked if we put this command back onto 14947 * the waitq for later retry (since un_retry_bp 14948 * must complete before the driver gets back to 14949 * commands on the waitq). 14950 * 14951 * To avoid deadlock we must schedule a callback 14952 * that will restart this command after a set 14953 * interval. This should keep retrying for as 14954 * long as the underlying transport keeps 14955 * returning TRAN_BUSY (just like for other 14956 * commands). Use the same timeout interval as 14957 * for the ordinary TRAN_BUSY retry. 14958 */ 14959 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14960 "sd_start_cmds: scsi_transport() returned " 14961 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14962 14963 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14964 un->un_direct_priority_timeid = 14965 timeout(sd_start_direct_priority_command, 14966 bp, un->un_busy_timeout / 500); 14967 14968 goto exit; 14969 } 14970 14971 /* 14972 * For TRAN_BUSY, we want to reduce the throttle value, 14973 * unless we are retrying a command. 14974 */ 14975 if (bp != un->un_retry_bp) { 14976 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14977 } 14978 14979 /* 14980 * Set up the bp to be tried again 10 ms later. 14981 * Note:x86: Is there a timeout value in the sd_lun 14982 * for this condition? 14983 */ 14984 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14985 kstat_runq_back_to_waitq); 14986 goto exit; 14987 14988 case TRAN_FATAL_ERROR: 14989 un->un_tran_fatal_count++; 14990 /* FALLTHRU */ 14991 14992 case TRAN_BADPKT: 14993 default: 14994 un->un_ncmds_in_transport--; 14995 ASSERT(un->un_ncmds_in_transport >= 0); 14996 14997 /* 14998 * If this is our REQUEST SENSE command with a 14999 * transport error, we must get back the pointers 15000 * to the original buf, and mark the REQUEST 15001 * SENSE command as "available". 15002 */ 15003 if (bp == un->un_rqs_bp) { 15004 bp = sd_mark_rqs_idle(un, xp); 15005 xp = SD_GET_XBUF(bp); 15006 } else { 15007 /* 15008 * Legacy behavior: do not update transport 15009 * error count for request sense commands. 15010 */ 15011 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15012 } 15013 15014 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15015 sd_print_transport_rejected_message(un, xp, rval); 15016 15017 /* 15018 * This command will be terminated by SD driver due 15019 * to a fatal transport error. We should post 15020 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15021 * of "fail" for any command to indicate this 15022 * situation. 15023 */ 15024 if (xp->xb_ena > 0) { 15025 ASSERT(un->un_fm_private != NULL); 15026 sfip = un->un_fm_private; 15027 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15028 sd_ssc_extract_info(&sfip->fm_ssc, un, 15029 xp->xb_pktp, bp, xp); 15030 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15031 } 15032 15033 /* 15034 * We must use sd_return_failed_command_no_restart() to 15035 * avoid a recursive call back into sd_start_cmds(). 15036 * However this also means that we must keep processing 15037 * the waitq here in order to avoid stalling. 15038 */ 15039 sd_return_failed_command_no_restart(un, bp, EIO); 15040 15041 /* 15042 * Notify any threads waiting in sd_ddi_suspend() that 15043 * a command completion has occurred. 15044 */ 15045 if (un->un_state == SD_STATE_SUSPENDED) { 15046 cv_broadcast(&un->un_disk_busy_cv); 15047 } 15048 15049 if (bp == immed_bp) { 15050 /* immed_bp is gone by now, so clear this */ 15051 immed_bp = NULL; 15052 } 15053 break; 15054 } 15055 15056 } while (immed_bp == NULL); 15057 15058 exit: 15059 ASSERT(mutex_owned(SD_MUTEX(un))); 15060 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15061 } 15062 15063 15064 /* 15065 * Function: sd_return_command 15066 * 15067 * Description: Returns a command to its originator (with or without an 15068 * error). Also starts commands waiting to be transported 15069 * to the target. 15070 * 15071 * Context: May be called from interrupt, kernel, or timeout context 15072 */ 15073 15074 static void 15075 sd_return_command(struct sd_lun *un, struct buf *bp) 15076 { 15077 struct sd_xbuf *xp; 15078 struct scsi_pkt *pktp; 15079 struct sd_fm_internal *sfip; 15080 15081 ASSERT(bp != NULL); 15082 ASSERT(un != NULL); 15083 ASSERT(mutex_owned(SD_MUTEX(un))); 15084 ASSERT(bp != un->un_rqs_bp); 15085 xp = SD_GET_XBUF(bp); 15086 ASSERT(xp != NULL); 15087 15088 pktp = SD_GET_PKTP(bp); 15089 sfip = (struct sd_fm_internal *)un->un_fm_private; 15090 ASSERT(sfip != NULL); 15091 15092 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15093 15094 /* 15095 * Note: check for the "sdrestart failed" case. 15096 */ 15097 if ((un->un_partial_dma_supported == 1) && 15098 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15099 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15100 (xp->xb_pktp->pkt_resid == 0)) { 15101 15102 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15103 /* 15104 * Successfully set up next portion of cmd 15105 * transfer, try sending it 15106 */ 15107 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15108 NULL, NULL, 0, (clock_t)0, NULL); 15109 sd_start_cmds(un, NULL); 15110 return; /* Note:x86: need a return here? */ 15111 } 15112 } 15113 15114 /* 15115 * If this is the failfast bp, clear it from un_failfast_bp. This 15116 * can happen if upon being re-tried the failfast bp either 15117 * succeeded or encountered another error (possibly even a different 15118 * error than the one that precipitated the failfast state, but in 15119 * that case it would have had to exhaust retries as well). Regardless, 15120 * this should not occur whenever the instance is in the active 15121 * failfast state. 15122 */ 15123 if (bp == un->un_failfast_bp) { 15124 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15125 un->un_failfast_bp = NULL; 15126 } 15127 15128 /* 15129 * Clear the failfast state upon successful completion of ANY cmd. 15130 */ 15131 if (bp->b_error == 0) { 15132 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15133 /* 15134 * If this is a successful command, but used to be retried, 15135 * we will take it as a recovered command and post an 15136 * ereport with driver-assessment of "recovered". 15137 */ 15138 if (xp->xb_ena > 0) { 15139 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15140 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15141 } 15142 } else { 15143 /* 15144 * If this is a failed non-USCSI command we will post an 15145 * ereport with driver-assessment set accordingly("fail" or 15146 * "fatal"). 15147 */ 15148 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15149 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15150 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15151 } 15152 } 15153 15154 /* 15155 * This is used if the command was retried one or more times. Show that 15156 * we are done with it, and allow processing of the waitq to resume. 15157 */ 15158 if (bp == un->un_retry_bp) { 15159 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15160 "sd_return_command: un:0x%p: " 15161 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15162 un->un_retry_bp = NULL; 15163 un->un_retry_statp = NULL; 15164 } 15165 15166 SD_UPDATE_RDWR_STATS(un, bp); 15167 SD_UPDATE_PARTITION_STATS(un, bp); 15168 15169 switch (un->un_state) { 15170 case SD_STATE_SUSPENDED: 15171 /* 15172 * Notify any threads waiting in sd_ddi_suspend() that 15173 * a command completion has occurred. 15174 */ 15175 cv_broadcast(&un->un_disk_busy_cv); 15176 break; 15177 default: 15178 sd_start_cmds(un, NULL); 15179 break; 15180 } 15181 15182 /* Return this command up the iodone chain to its originator. */ 15183 mutex_exit(SD_MUTEX(un)); 15184 15185 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15186 xp->xb_pktp = NULL; 15187 15188 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15189 15190 ASSERT(!mutex_owned(SD_MUTEX(un))); 15191 mutex_enter(SD_MUTEX(un)); 15192 15193 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15194 } 15195 15196 15197 /* 15198 * Function: sd_return_failed_command 15199 * 15200 * Description: Command completion when an error occurred. 15201 * 15202 * Context: May be called from interrupt context 15203 */ 15204 15205 static void 15206 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15207 { 15208 ASSERT(bp != NULL); 15209 ASSERT(un != NULL); 15210 ASSERT(mutex_owned(SD_MUTEX(un))); 15211 15212 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15213 "sd_return_failed_command: entry\n"); 15214 15215 /* 15216 * b_resid could already be nonzero due to a partial data 15217 * transfer, so do not change it here. 15218 */ 15219 SD_BIOERROR(bp, errcode); 15220 15221 sd_return_command(un, bp); 15222 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15223 "sd_return_failed_command: exit\n"); 15224 } 15225 15226 15227 /* 15228 * Function: sd_return_failed_command_no_restart 15229 * 15230 * Description: Same as sd_return_failed_command, but ensures that no 15231 * call back into sd_start_cmds will be issued. 15232 * 15233 * Context: May be called from interrupt context 15234 */ 15235 15236 static void 15237 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15238 int errcode) 15239 { 15240 struct sd_xbuf *xp; 15241 15242 ASSERT(bp != NULL); 15243 ASSERT(un != NULL); 15244 ASSERT(mutex_owned(SD_MUTEX(un))); 15245 xp = SD_GET_XBUF(bp); 15246 ASSERT(xp != NULL); 15247 ASSERT(errcode != 0); 15248 15249 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15250 "sd_return_failed_command_no_restart: entry\n"); 15251 15252 /* 15253 * b_resid could already be nonzero due to a partial data 15254 * transfer, so do not change it here. 15255 */ 15256 SD_BIOERROR(bp, errcode); 15257 15258 /* 15259 * If this is the failfast bp, clear it. This can happen if the 15260 * failfast bp encounterd a fatal error when we attempted to 15261 * re-try it (such as a scsi_transport(9F) failure). However 15262 * we should NOT be in an active failfast state if the failfast 15263 * bp is not NULL. 15264 */ 15265 if (bp == un->un_failfast_bp) { 15266 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15267 un->un_failfast_bp = NULL; 15268 } 15269 15270 if (bp == un->un_retry_bp) { 15271 /* 15272 * This command was retried one or more times. Show that we are 15273 * done with it, and allow processing of the waitq to resume. 15274 */ 15275 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15276 "sd_return_failed_command_no_restart: " 15277 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15278 un->un_retry_bp = NULL; 15279 un->un_retry_statp = NULL; 15280 } 15281 15282 SD_UPDATE_RDWR_STATS(un, bp); 15283 SD_UPDATE_PARTITION_STATS(un, bp); 15284 15285 mutex_exit(SD_MUTEX(un)); 15286 15287 if (xp->xb_pktp != NULL) { 15288 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15289 xp->xb_pktp = NULL; 15290 } 15291 15292 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15293 15294 mutex_enter(SD_MUTEX(un)); 15295 15296 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15297 "sd_return_failed_command_no_restart: exit\n"); 15298 } 15299 15300 15301 /* 15302 * Function: sd_retry_command 15303 * 15304 * Description: queue up a command for retry, or (optionally) fail it 15305 * if retry counts are exhausted. 15306 * 15307 * Arguments: un - Pointer to the sd_lun struct for the target. 15308 * 15309 * bp - Pointer to the buf for the command to be retried. 15310 * 15311 * retry_check_flag - Flag to see which (if any) of the retry 15312 * counts should be decremented/checked. If the indicated 15313 * retry count is exhausted, then the command will not be 15314 * retried; it will be failed instead. This should use a 15315 * value equal to one of the following: 15316 * 15317 * SD_RETRIES_NOCHECK 15318 * SD_RESD_RETRIES_STANDARD 15319 * SD_RETRIES_VICTIM 15320 * 15321 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15322 * if the check should be made to see of FLAG_ISOLATE is set 15323 * in the pkt. If FLAG_ISOLATE is set, then the command is 15324 * not retried, it is simply failed. 15325 * 15326 * user_funcp - Ptr to function to call before dispatching the 15327 * command. May be NULL if no action needs to be performed. 15328 * (Primarily intended for printing messages.) 15329 * 15330 * user_arg - Optional argument to be passed along to 15331 * the user_funcp call. 15332 * 15333 * failure_code - errno return code to set in the bp if the 15334 * command is going to be failed. 15335 * 15336 * retry_delay - Retry delay interval in (clock_t) units. May 15337 * be zero which indicates that the retry should be retried 15338 * immediately (ie, without an intervening delay). 15339 * 15340 * statp - Ptr to kstat function to be updated if the command 15341 * is queued for a delayed retry. May be NULL if no kstat 15342 * update is desired. 15343 * 15344 * Context: May be called from interrupt context. 15345 */ 15346 15347 static void 15348 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15349 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15350 code), void *user_arg, int failure_code, clock_t retry_delay, 15351 void (*statp)(kstat_io_t *)) 15352 { 15353 struct sd_xbuf *xp; 15354 struct scsi_pkt *pktp; 15355 struct sd_fm_internal *sfip; 15356 15357 ASSERT(un != NULL); 15358 ASSERT(mutex_owned(SD_MUTEX(un))); 15359 ASSERT(bp != NULL); 15360 xp = SD_GET_XBUF(bp); 15361 ASSERT(xp != NULL); 15362 pktp = SD_GET_PKTP(bp); 15363 ASSERT(pktp != NULL); 15364 15365 sfip = (struct sd_fm_internal *)un->un_fm_private; 15366 ASSERT(sfip != NULL); 15367 15368 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15369 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15370 15371 /* 15372 * If we are syncing or dumping, fail the command to avoid 15373 * recursively calling back into scsi_transport(). 15374 */ 15375 if (ddi_in_panic()) { 15376 goto fail_command_no_log; 15377 } 15378 15379 /* 15380 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15381 * log an error and fail the command. 15382 */ 15383 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15384 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15385 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15386 sd_dump_memory(un, SD_LOG_IO, "CDB", 15387 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15388 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15389 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15390 goto fail_command; 15391 } 15392 15393 /* 15394 * If we are suspended, then put the command onto head of the 15395 * wait queue since we don't want to start more commands, and 15396 * clear the un_retry_bp. Next time when we are resumed, will 15397 * handle the command in the wait queue. 15398 */ 15399 switch (un->un_state) { 15400 case SD_STATE_SUSPENDED: 15401 case SD_STATE_DUMPING: 15402 bp->av_forw = un->un_waitq_headp; 15403 un->un_waitq_headp = bp; 15404 if (un->un_waitq_tailp == NULL) { 15405 un->un_waitq_tailp = bp; 15406 } 15407 if (bp == un->un_retry_bp) { 15408 un->un_retry_bp = NULL; 15409 un->un_retry_statp = NULL; 15410 } 15411 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15412 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15413 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15414 return; 15415 default: 15416 break; 15417 } 15418 15419 /* 15420 * If the caller wants us to check FLAG_ISOLATE, then see if that 15421 * is set; if it is then we do not want to retry the command. 15422 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15423 */ 15424 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15425 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15426 goto fail_command; 15427 } 15428 } 15429 15430 15431 /* 15432 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15433 * command timeout or a selection timeout has occurred. This means 15434 * that we were unable to establish an kind of communication with 15435 * the target, and subsequent retries and/or commands are likely 15436 * to encounter similar results and take a long time to complete. 15437 * 15438 * If this is a failfast error condition, we need to update the 15439 * failfast state, even if this bp does not have B_FAILFAST set. 15440 */ 15441 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15442 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15443 ASSERT(un->un_failfast_bp == NULL); 15444 /* 15445 * If we are already in the active failfast state, and 15446 * another failfast error condition has been detected, 15447 * then fail this command if it has B_FAILFAST set. 15448 * If B_FAILFAST is clear, then maintain the legacy 15449 * behavior of retrying heroically, even tho this will 15450 * take a lot more time to fail the command. 15451 */ 15452 if (bp->b_flags & B_FAILFAST) { 15453 goto fail_command; 15454 } 15455 } else { 15456 /* 15457 * We're not in the active failfast state, but we 15458 * have a failfast error condition, so we must begin 15459 * transition to the next state. We do this regardless 15460 * of whether or not this bp has B_FAILFAST set. 15461 */ 15462 if (un->un_failfast_bp == NULL) { 15463 /* 15464 * This is the first bp to meet a failfast 15465 * condition so save it on un_failfast_bp & 15466 * do normal retry processing. Do not enter 15467 * active failfast state yet. This marks 15468 * entry into the "failfast pending" state. 15469 */ 15470 un->un_failfast_bp = bp; 15471 15472 } else if (un->un_failfast_bp == bp) { 15473 /* 15474 * This is the second time *this* bp has 15475 * encountered a failfast error condition, 15476 * so enter active failfast state & flush 15477 * queues as appropriate. 15478 */ 15479 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15480 un->un_failfast_bp = NULL; 15481 sd_failfast_flushq(un); 15482 15483 /* 15484 * Fail this bp now if B_FAILFAST set; 15485 * otherwise continue with retries. (It would 15486 * be pretty ironic if this bp succeeded on a 15487 * subsequent retry after we just flushed all 15488 * the queues). 15489 */ 15490 if (bp->b_flags & B_FAILFAST) { 15491 goto fail_command; 15492 } 15493 15494 #if !defined(lint) && !defined(__lint) 15495 } else { 15496 /* 15497 * If neither of the preceeding conditionals 15498 * was true, it means that there is some 15499 * *other* bp that has met an inital failfast 15500 * condition and is currently either being 15501 * retried or is waiting to be retried. In 15502 * that case we should perform normal retry 15503 * processing on *this* bp, since there is a 15504 * chance that the current failfast condition 15505 * is transient and recoverable. If that does 15506 * not turn out to be the case, then retries 15507 * will be cleared when the wait queue is 15508 * flushed anyway. 15509 */ 15510 #endif 15511 } 15512 } 15513 } else { 15514 /* 15515 * SD_RETRIES_FAILFAST is clear, which indicates that we 15516 * likely were able to at least establish some level of 15517 * communication with the target and subsequent commands 15518 * and/or retries are likely to get through to the target, 15519 * In this case we want to be aggressive about clearing 15520 * the failfast state. Note that this does not affect 15521 * the "failfast pending" condition. 15522 */ 15523 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15524 } 15525 15526 15527 /* 15528 * Check the specified retry count to see if we can still do 15529 * any retries with this pkt before we should fail it. 15530 */ 15531 switch (retry_check_flag & SD_RETRIES_MASK) { 15532 case SD_RETRIES_VICTIM: 15533 /* 15534 * Check the victim retry count. If exhausted, then fall 15535 * thru & check against the standard retry count. 15536 */ 15537 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15538 /* Increment count & proceed with the retry */ 15539 xp->xb_victim_retry_count++; 15540 break; 15541 } 15542 /* Victim retries exhausted, fall back to std. retries... */ 15543 /* FALLTHRU */ 15544 15545 case SD_RETRIES_STANDARD: 15546 if (xp->xb_retry_count >= un->un_retry_count) { 15547 /* Retries exhausted, fail the command */ 15548 SD_TRACE(SD_LOG_IO_CORE, un, 15549 "sd_retry_command: retries exhausted!\n"); 15550 /* 15551 * update b_resid for failed SCMD_READ & SCMD_WRITE 15552 * commands with nonzero pkt_resid. 15553 */ 15554 if ((pktp->pkt_reason == CMD_CMPLT) && 15555 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15556 (pktp->pkt_resid != 0)) { 15557 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15558 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15559 SD_UPDATE_B_RESID(bp, pktp); 15560 } 15561 } 15562 goto fail_command; 15563 } 15564 xp->xb_retry_count++; 15565 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15566 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15567 break; 15568 15569 case SD_RETRIES_UA: 15570 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15571 /* Retries exhausted, fail the command */ 15572 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15573 "Unit Attention retries exhausted. " 15574 "Check the target.\n"); 15575 goto fail_command; 15576 } 15577 xp->xb_ua_retry_count++; 15578 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15579 "sd_retry_command: retry count:%d\n", 15580 xp->xb_ua_retry_count); 15581 break; 15582 15583 case SD_RETRIES_BUSY: 15584 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15585 /* Retries exhausted, fail the command */ 15586 SD_TRACE(SD_LOG_IO_CORE, un, 15587 "sd_retry_command: retries exhausted!\n"); 15588 goto fail_command; 15589 } 15590 xp->xb_retry_count++; 15591 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15592 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15593 break; 15594 15595 case SD_RETRIES_NOCHECK: 15596 default: 15597 /* No retry count to check. Just proceed with the retry */ 15598 break; 15599 } 15600 15601 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15602 15603 /* 15604 * If this is a non-USCSI command being retried 15605 * during execution last time, we should post an ereport with 15606 * driver-assessment of the value "retry". 15607 * For partial DMA, request sense and STATUS_QFULL, there are no 15608 * hardware errors, we bypass ereport posting. 15609 */ 15610 if (failure_code != 0) { 15611 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15612 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15613 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15614 } 15615 } 15616 15617 /* 15618 * If we were given a zero timeout, we must attempt to retry the 15619 * command immediately (ie, without a delay). 15620 */ 15621 if (retry_delay == 0) { 15622 /* 15623 * Check some limiting conditions to see if we can actually 15624 * do the immediate retry. If we cannot, then we must 15625 * fall back to queueing up a delayed retry. 15626 */ 15627 if (un->un_ncmds_in_transport >= un->un_throttle) { 15628 /* 15629 * We are at the throttle limit for the target, 15630 * fall back to delayed retry. 15631 */ 15632 retry_delay = un->un_busy_timeout; 15633 statp = kstat_waitq_enter; 15634 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15635 "sd_retry_command: immed. retry hit " 15636 "throttle!\n"); 15637 } else { 15638 /* 15639 * We're clear to proceed with the immediate retry. 15640 * First call the user-provided function (if any) 15641 */ 15642 if (user_funcp != NULL) { 15643 (*user_funcp)(un, bp, user_arg, 15644 SD_IMMEDIATE_RETRY_ISSUED); 15645 #ifdef __lock_lint 15646 sd_print_incomplete_msg(un, bp, user_arg, 15647 SD_IMMEDIATE_RETRY_ISSUED); 15648 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15649 SD_IMMEDIATE_RETRY_ISSUED); 15650 sd_print_sense_failed_msg(un, bp, user_arg, 15651 SD_IMMEDIATE_RETRY_ISSUED); 15652 #endif 15653 } 15654 15655 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15656 "sd_retry_command: issuing immediate retry\n"); 15657 15658 /* 15659 * Call sd_start_cmds() to transport the command to 15660 * the target. 15661 */ 15662 sd_start_cmds(un, bp); 15663 15664 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15665 "sd_retry_command exit\n"); 15666 return; 15667 } 15668 } 15669 15670 /* 15671 * Set up to retry the command after a delay. 15672 * First call the user-provided function (if any) 15673 */ 15674 if (user_funcp != NULL) { 15675 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15676 } 15677 15678 sd_set_retry_bp(un, bp, retry_delay, statp); 15679 15680 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15681 return; 15682 15683 fail_command: 15684 15685 if (user_funcp != NULL) { 15686 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15687 } 15688 15689 fail_command_no_log: 15690 15691 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15692 "sd_retry_command: returning failed command\n"); 15693 15694 sd_return_failed_command(un, bp, failure_code); 15695 15696 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15697 } 15698 15699 15700 /* 15701 * Function: sd_set_retry_bp 15702 * 15703 * Description: Set up the given bp for retry. 15704 * 15705 * Arguments: un - ptr to associated softstate 15706 * bp - ptr to buf(9S) for the command 15707 * retry_delay - time interval before issuing retry (may be 0) 15708 * statp - optional pointer to kstat function 15709 * 15710 * Context: May be called under interrupt context 15711 */ 15712 15713 static void 15714 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15715 void (*statp)(kstat_io_t *)) 15716 { 15717 ASSERT(un != NULL); 15718 ASSERT(mutex_owned(SD_MUTEX(un))); 15719 ASSERT(bp != NULL); 15720 15721 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15722 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15723 15724 /* 15725 * Indicate that the command is being retried. This will not allow any 15726 * other commands on the wait queue to be transported to the target 15727 * until this command has been completed (success or failure). The 15728 * "retry command" is not transported to the target until the given 15729 * time delay expires, unless the user specified a 0 retry_delay. 15730 * 15731 * Note: the timeout(9F) callback routine is what actually calls 15732 * sd_start_cmds() to transport the command, with the exception of a 15733 * zero retry_delay. The only current implementor of a zero retry delay 15734 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15735 */ 15736 if (un->un_retry_bp == NULL) { 15737 ASSERT(un->un_retry_statp == NULL); 15738 un->un_retry_bp = bp; 15739 15740 /* 15741 * If the user has not specified a delay the command should 15742 * be queued and no timeout should be scheduled. 15743 */ 15744 if (retry_delay == 0) { 15745 /* 15746 * Save the kstat pointer that will be used in the 15747 * call to SD_UPDATE_KSTATS() below, so that 15748 * sd_start_cmds() can correctly decrement the waitq 15749 * count when it is time to transport this command. 15750 */ 15751 un->un_retry_statp = statp; 15752 goto done; 15753 } 15754 } 15755 15756 if (un->un_retry_bp == bp) { 15757 /* 15758 * Save the kstat pointer that will be used in the call to 15759 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15760 * correctly decrement the waitq count when it is time to 15761 * transport this command. 15762 */ 15763 un->un_retry_statp = statp; 15764 15765 /* 15766 * Schedule a timeout if: 15767 * 1) The user has specified a delay. 15768 * 2) There is not a START_STOP_UNIT callback pending. 15769 * 15770 * If no delay has been specified, then it is up to the caller 15771 * to ensure that IO processing continues without stalling. 15772 * Effectively, this means that the caller will issue the 15773 * required call to sd_start_cmds(). The START_STOP_UNIT 15774 * callback does this after the START STOP UNIT command has 15775 * completed. In either of these cases we should not schedule 15776 * a timeout callback here. Also don't schedule the timeout if 15777 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15778 */ 15779 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15780 (un->un_direct_priority_timeid == NULL)) { 15781 un->un_retry_timeid = 15782 timeout(sd_start_retry_command, un, retry_delay); 15783 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15784 "sd_set_retry_bp: setting timeout: un: 0x%p" 15785 " bp:0x%p un_retry_timeid:0x%p\n", 15786 un, bp, un->un_retry_timeid); 15787 } 15788 } else { 15789 /* 15790 * We only get in here if there is already another command 15791 * waiting to be retried. In this case, we just put the 15792 * given command onto the wait queue, so it can be transported 15793 * after the current retry command has completed. 15794 * 15795 * Also we have to make sure that if the command at the head 15796 * of the wait queue is the un_failfast_bp, that we do not 15797 * put ahead of it any other commands that are to be retried. 15798 */ 15799 if ((un->un_failfast_bp != NULL) && 15800 (un->un_failfast_bp == un->un_waitq_headp)) { 15801 /* 15802 * Enqueue this command AFTER the first command on 15803 * the wait queue (which is also un_failfast_bp). 15804 */ 15805 bp->av_forw = un->un_waitq_headp->av_forw; 15806 un->un_waitq_headp->av_forw = bp; 15807 if (un->un_waitq_headp == un->un_waitq_tailp) { 15808 un->un_waitq_tailp = bp; 15809 } 15810 } else { 15811 /* Enqueue this command at the head of the waitq. */ 15812 bp->av_forw = un->un_waitq_headp; 15813 un->un_waitq_headp = bp; 15814 if (un->un_waitq_tailp == NULL) { 15815 un->un_waitq_tailp = bp; 15816 } 15817 } 15818 15819 if (statp == NULL) { 15820 statp = kstat_waitq_enter; 15821 } 15822 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15823 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15824 } 15825 15826 done: 15827 if (statp != NULL) { 15828 SD_UPDATE_KSTATS(un, statp, bp); 15829 } 15830 15831 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15832 "sd_set_retry_bp: exit un:0x%p\n", un); 15833 } 15834 15835 15836 /* 15837 * Function: sd_start_retry_command 15838 * 15839 * Description: Start the command that has been waiting on the target's 15840 * retry queue. Called from timeout(9F) context after the 15841 * retry delay interval has expired. 15842 * 15843 * Arguments: arg - pointer to associated softstate for the device. 15844 * 15845 * Context: timeout(9F) thread context. May not sleep. 15846 */ 15847 15848 static void 15849 sd_start_retry_command(void *arg) 15850 { 15851 struct sd_lun *un = arg; 15852 15853 ASSERT(un != NULL); 15854 ASSERT(!mutex_owned(SD_MUTEX(un))); 15855 15856 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15857 "sd_start_retry_command: entry\n"); 15858 15859 mutex_enter(SD_MUTEX(un)); 15860 15861 un->un_retry_timeid = NULL; 15862 15863 if (un->un_retry_bp != NULL) { 15864 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15865 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15866 un, un->un_retry_bp); 15867 sd_start_cmds(un, un->un_retry_bp); 15868 } 15869 15870 mutex_exit(SD_MUTEX(un)); 15871 15872 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15873 "sd_start_retry_command: exit\n"); 15874 } 15875 15876 /* 15877 * Function: sd_rmw_msg_print_handler 15878 * 15879 * Description: If RMW mode is enabled and warning message is triggered 15880 * print I/O count during a fixed interval. 15881 * 15882 * Arguments: arg - pointer to associated softstate for the device. 15883 * 15884 * Context: timeout(9F) thread context. May not sleep. 15885 */ 15886 static void 15887 sd_rmw_msg_print_handler(void *arg) 15888 { 15889 struct sd_lun *un = arg; 15890 15891 ASSERT(un != NULL); 15892 ASSERT(!mutex_owned(SD_MUTEX(un))); 15893 15894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15895 "sd_rmw_msg_print_handler: entry\n"); 15896 15897 mutex_enter(SD_MUTEX(un)); 15898 15899 if (un->un_rmw_incre_count > 0) { 15900 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15901 "%"PRIu64" I/O requests are not aligned with %d disk " 15902 "sector size in %ld seconds. They are handled through " 15903 "Read Modify Write but the performance is very low!\n", 15904 un->un_rmw_incre_count, un->un_tgt_blocksize, 15905 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 15906 un->un_rmw_incre_count = 0; 15907 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 15908 un, SD_RMW_MSG_PRINT_TIMEOUT); 15909 } else { 15910 un->un_rmw_msg_timeid = NULL; 15911 } 15912 15913 mutex_exit(SD_MUTEX(un)); 15914 15915 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15916 "sd_rmw_msg_print_handler: exit\n"); 15917 } 15918 15919 /* 15920 * Function: sd_start_direct_priority_command 15921 * 15922 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15923 * received TRAN_BUSY when we called scsi_transport() to send it 15924 * to the underlying HBA. This function is called from timeout(9F) 15925 * context after the delay interval has expired. 15926 * 15927 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15928 * 15929 * Context: timeout(9F) thread context. May not sleep. 15930 */ 15931 15932 static void 15933 sd_start_direct_priority_command(void *arg) 15934 { 15935 struct buf *priority_bp = arg; 15936 struct sd_lun *un; 15937 15938 ASSERT(priority_bp != NULL); 15939 un = SD_GET_UN(priority_bp); 15940 ASSERT(un != NULL); 15941 ASSERT(!mutex_owned(SD_MUTEX(un))); 15942 15943 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15944 "sd_start_direct_priority_command: entry\n"); 15945 15946 mutex_enter(SD_MUTEX(un)); 15947 un->un_direct_priority_timeid = NULL; 15948 sd_start_cmds(un, priority_bp); 15949 mutex_exit(SD_MUTEX(un)); 15950 15951 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15952 "sd_start_direct_priority_command: exit\n"); 15953 } 15954 15955 15956 /* 15957 * Function: sd_send_request_sense_command 15958 * 15959 * Description: Sends a REQUEST SENSE command to the target 15960 * 15961 * Context: May be called from interrupt context. 15962 */ 15963 15964 static void 15965 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15966 struct scsi_pkt *pktp) 15967 { 15968 ASSERT(bp != NULL); 15969 ASSERT(un != NULL); 15970 ASSERT(mutex_owned(SD_MUTEX(un))); 15971 15972 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15973 "entry: buf:0x%p\n", bp); 15974 15975 /* 15976 * If we are syncing or dumping, then fail the command to avoid a 15977 * recursive callback into scsi_transport(). Also fail the command 15978 * if we are suspended (legacy behavior). 15979 */ 15980 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15981 (un->un_state == SD_STATE_DUMPING)) { 15982 sd_return_failed_command(un, bp, EIO); 15983 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15984 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15985 return; 15986 } 15987 15988 /* 15989 * Retry the failed command and don't issue the request sense if: 15990 * 1) the sense buf is busy 15991 * 2) we have 1 or more outstanding commands on the target 15992 * (the sense data will be cleared or invalidated any way) 15993 * 15994 * Note: There could be an issue with not checking a retry limit here, 15995 * the problem is determining which retry limit to check. 15996 */ 15997 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15998 /* Don't retry if the command is flagged as non-retryable */ 15999 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16000 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16001 NULL, NULL, 0, un->un_busy_timeout, 16002 kstat_waitq_enter); 16003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16004 "sd_send_request_sense_command: " 16005 "at full throttle, retrying exit\n"); 16006 } else { 16007 sd_return_failed_command(un, bp, EIO); 16008 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16009 "sd_send_request_sense_command: " 16010 "at full throttle, non-retryable exit\n"); 16011 } 16012 return; 16013 } 16014 16015 sd_mark_rqs_busy(un, bp); 16016 sd_start_cmds(un, un->un_rqs_bp); 16017 16018 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16019 "sd_send_request_sense_command: exit\n"); 16020 } 16021 16022 16023 /* 16024 * Function: sd_mark_rqs_busy 16025 * 16026 * Description: Indicate that the request sense bp for this instance is 16027 * in use. 16028 * 16029 * Context: May be called under interrupt context 16030 */ 16031 16032 static void 16033 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16034 { 16035 struct sd_xbuf *sense_xp; 16036 16037 ASSERT(un != NULL); 16038 ASSERT(bp != NULL); 16039 ASSERT(mutex_owned(SD_MUTEX(un))); 16040 ASSERT(un->un_sense_isbusy == 0); 16041 16042 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16043 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16044 16045 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16046 ASSERT(sense_xp != NULL); 16047 16048 SD_INFO(SD_LOG_IO, un, 16049 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16050 16051 ASSERT(sense_xp->xb_pktp != NULL); 16052 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16053 == (FLAG_SENSING | FLAG_HEAD)); 16054 16055 un->un_sense_isbusy = 1; 16056 un->un_rqs_bp->b_resid = 0; 16057 sense_xp->xb_pktp->pkt_resid = 0; 16058 sense_xp->xb_pktp->pkt_reason = 0; 16059 16060 /* So we can get back the bp at interrupt time! */ 16061 sense_xp->xb_sense_bp = bp; 16062 16063 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16064 16065 /* 16066 * Mark this buf as awaiting sense data. (This is already set in 16067 * the pkt_flags for the RQS packet.) 16068 */ 16069 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16070 16071 /* Request sense down same path */ 16072 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16073 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16074 sense_xp->xb_pktp->pkt_path_instance = 16075 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16076 16077 sense_xp->xb_retry_count = 0; 16078 sense_xp->xb_victim_retry_count = 0; 16079 sense_xp->xb_ua_retry_count = 0; 16080 sense_xp->xb_nr_retry_count = 0; 16081 sense_xp->xb_dma_resid = 0; 16082 16083 /* Clean up the fields for auto-request sense */ 16084 sense_xp->xb_sense_status = 0; 16085 sense_xp->xb_sense_state = 0; 16086 sense_xp->xb_sense_resid = 0; 16087 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16088 16089 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16090 } 16091 16092 16093 /* 16094 * Function: sd_mark_rqs_idle 16095 * 16096 * Description: SD_MUTEX must be held continuously through this routine 16097 * to prevent reuse of the rqs struct before the caller can 16098 * complete it's processing. 16099 * 16100 * Return Code: Pointer to the RQS buf 16101 * 16102 * Context: May be called under interrupt context 16103 */ 16104 16105 static struct buf * 16106 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16107 { 16108 struct buf *bp; 16109 ASSERT(un != NULL); 16110 ASSERT(sense_xp != NULL); 16111 ASSERT(mutex_owned(SD_MUTEX(un))); 16112 ASSERT(un->un_sense_isbusy != 0); 16113 16114 un->un_sense_isbusy = 0; 16115 bp = sense_xp->xb_sense_bp; 16116 sense_xp->xb_sense_bp = NULL; 16117 16118 /* This pkt is no longer interested in getting sense data */ 16119 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16120 16121 return (bp); 16122 } 16123 16124 16125 16126 /* 16127 * Function: sd_alloc_rqs 16128 * 16129 * Description: Set up the unit to receive auto request sense data 16130 * 16131 * Return Code: DDI_SUCCESS or DDI_FAILURE 16132 * 16133 * Context: Called under attach(9E) context 16134 */ 16135 16136 static int 16137 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16138 { 16139 struct sd_xbuf *xp; 16140 16141 ASSERT(un != NULL); 16142 ASSERT(!mutex_owned(SD_MUTEX(un))); 16143 ASSERT(un->un_rqs_bp == NULL); 16144 ASSERT(un->un_rqs_pktp == NULL); 16145 16146 /* 16147 * First allocate the required buf and scsi_pkt structs, then set up 16148 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16149 */ 16150 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16151 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16152 if (un->un_rqs_bp == NULL) { 16153 return (DDI_FAILURE); 16154 } 16155 16156 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16157 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16158 16159 if (un->un_rqs_pktp == NULL) { 16160 sd_free_rqs(un); 16161 return (DDI_FAILURE); 16162 } 16163 16164 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16165 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16166 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16167 16168 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16169 16170 /* Set up the other needed members in the ARQ scsi_pkt. */ 16171 un->un_rqs_pktp->pkt_comp = sdintr; 16172 un->un_rqs_pktp->pkt_time = sd_io_time; 16173 un->un_rqs_pktp->pkt_flags |= 16174 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16175 16176 /* 16177 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16178 * provide any intpkt, destroypkt routines as we take care of 16179 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16180 */ 16181 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16182 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16183 xp->xb_pktp = un->un_rqs_pktp; 16184 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16185 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16186 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16187 16188 /* 16189 * Save the pointer to the request sense private bp so it can 16190 * be retrieved in sdintr. 16191 */ 16192 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16193 ASSERT(un->un_rqs_bp->b_private == xp); 16194 16195 /* 16196 * See if the HBA supports auto-request sense for the specified 16197 * target/lun. If it does, then try to enable it (if not already 16198 * enabled). 16199 * 16200 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16201 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16202 * return success. However, in both of these cases ARQ is always 16203 * enabled and scsi_ifgetcap will always return true. The best approach 16204 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16205 * 16206 * The 3rd case is the HBA (adp) always return enabled on 16207 * scsi_ifgetgetcap even when it's not enable, the best approach 16208 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16209 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16210 */ 16211 16212 if (un->un_f_is_fibre == TRUE) { 16213 un->un_f_arq_enabled = TRUE; 16214 } else { 16215 #if defined(__i386) || defined(__amd64) 16216 /* 16217 * Circumvent the Adaptec bug, remove this code when 16218 * the bug is fixed 16219 */ 16220 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16221 #endif 16222 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16223 case 0: 16224 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16225 "sd_alloc_rqs: HBA supports ARQ\n"); 16226 /* 16227 * ARQ is supported by this HBA but currently is not 16228 * enabled. Attempt to enable it and if successful then 16229 * mark this instance as ARQ enabled. 16230 */ 16231 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16232 == 1) { 16233 /* Successfully enabled ARQ in the HBA */ 16234 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16235 "sd_alloc_rqs: ARQ enabled\n"); 16236 un->un_f_arq_enabled = TRUE; 16237 } else { 16238 /* Could not enable ARQ in the HBA */ 16239 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16240 "sd_alloc_rqs: failed ARQ enable\n"); 16241 un->un_f_arq_enabled = FALSE; 16242 } 16243 break; 16244 case 1: 16245 /* 16246 * ARQ is supported by this HBA and is already enabled. 16247 * Just mark ARQ as enabled for this instance. 16248 */ 16249 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16250 "sd_alloc_rqs: ARQ already enabled\n"); 16251 un->un_f_arq_enabled = TRUE; 16252 break; 16253 default: 16254 /* 16255 * ARQ is not supported by this HBA; disable it for this 16256 * instance. 16257 */ 16258 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16259 "sd_alloc_rqs: HBA does not support ARQ\n"); 16260 un->un_f_arq_enabled = FALSE; 16261 break; 16262 } 16263 } 16264 16265 return (DDI_SUCCESS); 16266 } 16267 16268 16269 /* 16270 * Function: sd_free_rqs 16271 * 16272 * Description: Cleanup for the pre-instance RQS command. 16273 * 16274 * Context: Kernel thread context 16275 */ 16276 16277 static void 16278 sd_free_rqs(struct sd_lun *un) 16279 { 16280 ASSERT(un != NULL); 16281 16282 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16283 16284 /* 16285 * If consistent memory is bound to a scsi_pkt, the pkt 16286 * has to be destroyed *before* freeing the consistent memory. 16287 * Don't change the sequence of this operations. 16288 * scsi_destroy_pkt() might access memory, which isn't allowed, 16289 * after it was freed in scsi_free_consistent_buf(). 16290 */ 16291 if (un->un_rqs_pktp != NULL) { 16292 scsi_destroy_pkt(un->un_rqs_pktp); 16293 un->un_rqs_pktp = NULL; 16294 } 16295 16296 if (un->un_rqs_bp != NULL) { 16297 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16298 if (xp != NULL) { 16299 kmem_free(xp, sizeof (struct sd_xbuf)); 16300 } 16301 scsi_free_consistent_buf(un->un_rqs_bp); 16302 un->un_rqs_bp = NULL; 16303 } 16304 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16305 } 16306 16307 16308 16309 /* 16310 * Function: sd_reduce_throttle 16311 * 16312 * Description: Reduces the maximum # of outstanding commands on a 16313 * target to the current number of outstanding commands. 16314 * Queues a tiemout(9F) callback to restore the limit 16315 * after a specified interval has elapsed. 16316 * Typically used when we get a TRAN_BUSY return code 16317 * back from scsi_transport(). 16318 * 16319 * Arguments: un - ptr to the sd_lun softstate struct 16320 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16321 * 16322 * Context: May be called from interrupt context 16323 */ 16324 16325 static void 16326 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16327 { 16328 ASSERT(un != NULL); 16329 ASSERT(mutex_owned(SD_MUTEX(un))); 16330 ASSERT(un->un_ncmds_in_transport >= 0); 16331 16332 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16333 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16334 un, un->un_throttle, un->un_ncmds_in_transport); 16335 16336 if (un->un_throttle > 1) { 16337 if (un->un_f_use_adaptive_throttle == TRUE) { 16338 switch (throttle_type) { 16339 case SD_THROTTLE_TRAN_BUSY: 16340 if (un->un_busy_throttle == 0) { 16341 un->un_busy_throttle = un->un_throttle; 16342 } 16343 break; 16344 case SD_THROTTLE_QFULL: 16345 un->un_busy_throttle = 0; 16346 break; 16347 default: 16348 ASSERT(FALSE); 16349 } 16350 16351 if (un->un_ncmds_in_transport > 0) { 16352 un->un_throttle = un->un_ncmds_in_transport; 16353 } 16354 16355 } else { 16356 if (un->un_ncmds_in_transport == 0) { 16357 un->un_throttle = 1; 16358 } else { 16359 un->un_throttle = un->un_ncmds_in_transport; 16360 } 16361 } 16362 } 16363 16364 /* Reschedule the timeout if none is currently active */ 16365 if (un->un_reset_throttle_timeid == NULL) { 16366 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16367 un, SD_THROTTLE_RESET_INTERVAL); 16368 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16369 "sd_reduce_throttle: timeout scheduled!\n"); 16370 } 16371 16372 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16373 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16374 } 16375 16376 16377 16378 /* 16379 * Function: sd_restore_throttle 16380 * 16381 * Description: Callback function for timeout(9F). Resets the current 16382 * value of un->un_throttle to its default. 16383 * 16384 * Arguments: arg - pointer to associated softstate for the device. 16385 * 16386 * Context: May be called from interrupt context 16387 */ 16388 16389 static void 16390 sd_restore_throttle(void *arg) 16391 { 16392 struct sd_lun *un = arg; 16393 16394 ASSERT(un != NULL); 16395 ASSERT(!mutex_owned(SD_MUTEX(un))); 16396 16397 mutex_enter(SD_MUTEX(un)); 16398 16399 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16400 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16401 16402 un->un_reset_throttle_timeid = NULL; 16403 16404 if (un->un_f_use_adaptive_throttle == TRUE) { 16405 /* 16406 * If un_busy_throttle is nonzero, then it contains the 16407 * value that un_throttle was when we got a TRAN_BUSY back 16408 * from scsi_transport(). We want to revert back to this 16409 * value. 16410 * 16411 * In the QFULL case, the throttle limit will incrementally 16412 * increase until it reaches max throttle. 16413 */ 16414 if (un->un_busy_throttle > 0) { 16415 un->un_throttle = un->un_busy_throttle; 16416 un->un_busy_throttle = 0; 16417 } else { 16418 /* 16419 * increase throttle by 10% open gate slowly, schedule 16420 * another restore if saved throttle has not been 16421 * reached 16422 */ 16423 short throttle; 16424 if (sd_qfull_throttle_enable) { 16425 throttle = un->un_throttle + 16426 max((un->un_throttle / 10), 1); 16427 un->un_throttle = 16428 (throttle < un->un_saved_throttle) ? 16429 throttle : un->un_saved_throttle; 16430 if (un->un_throttle < un->un_saved_throttle) { 16431 un->un_reset_throttle_timeid = 16432 timeout(sd_restore_throttle, 16433 un, 16434 SD_QFULL_THROTTLE_RESET_INTERVAL); 16435 } 16436 } 16437 } 16438 16439 /* 16440 * If un_throttle has fallen below the low-water mark, we 16441 * restore the maximum value here (and allow it to ratchet 16442 * down again if necessary). 16443 */ 16444 if (un->un_throttle < un->un_min_throttle) { 16445 un->un_throttle = un->un_saved_throttle; 16446 } 16447 } else { 16448 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16449 "restoring limit from 0x%x to 0x%x\n", 16450 un->un_throttle, un->un_saved_throttle); 16451 un->un_throttle = un->un_saved_throttle; 16452 } 16453 16454 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16455 "sd_restore_throttle: calling sd_start_cmds!\n"); 16456 16457 sd_start_cmds(un, NULL); 16458 16459 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16460 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16461 un, un->un_throttle); 16462 16463 mutex_exit(SD_MUTEX(un)); 16464 16465 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16466 } 16467 16468 /* 16469 * Function: sdrunout 16470 * 16471 * Description: Callback routine for scsi_init_pkt when a resource allocation 16472 * fails. 16473 * 16474 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16475 * soft state instance. 16476 * 16477 * Return Code: The scsi_init_pkt routine allows for the callback function to 16478 * return a 0 indicating the callback should be rescheduled or a 1 16479 * indicating not to reschedule. This routine always returns 1 16480 * because the driver always provides a callback function to 16481 * scsi_init_pkt. This results in a callback always being scheduled 16482 * (via the scsi_init_pkt callback implementation) if a resource 16483 * failure occurs. 16484 * 16485 * Context: This callback function may not block or call routines that block 16486 * 16487 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16488 * request persisting at the head of the list which cannot be 16489 * satisfied even after multiple retries. In the future the driver 16490 * may implement some time of maximum runout count before failing 16491 * an I/O. 16492 */ 16493 16494 static int 16495 sdrunout(caddr_t arg) 16496 { 16497 struct sd_lun *un = (struct sd_lun *)arg; 16498 16499 ASSERT(un != NULL); 16500 ASSERT(!mutex_owned(SD_MUTEX(un))); 16501 16502 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16503 16504 mutex_enter(SD_MUTEX(un)); 16505 sd_start_cmds(un, NULL); 16506 mutex_exit(SD_MUTEX(un)); 16507 /* 16508 * This callback routine always returns 1 (i.e. do not reschedule) 16509 * because we always specify sdrunout as the callback handler for 16510 * scsi_init_pkt inside the call to sd_start_cmds. 16511 */ 16512 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16513 return (1); 16514 } 16515 16516 16517 /* 16518 * Function: sdintr 16519 * 16520 * Description: Completion callback routine for scsi_pkt(9S) structs 16521 * sent to the HBA driver via scsi_transport(9F). 16522 * 16523 * Context: Interrupt context 16524 */ 16525 16526 static void 16527 sdintr(struct scsi_pkt *pktp) 16528 { 16529 struct buf *bp; 16530 struct sd_xbuf *xp; 16531 struct sd_lun *un; 16532 size_t actual_len; 16533 sd_ssc_t *sscp; 16534 16535 ASSERT(pktp != NULL); 16536 bp = (struct buf *)pktp->pkt_private; 16537 ASSERT(bp != NULL); 16538 xp = SD_GET_XBUF(bp); 16539 ASSERT(xp != NULL); 16540 ASSERT(xp->xb_pktp != NULL); 16541 un = SD_GET_UN(bp); 16542 ASSERT(un != NULL); 16543 ASSERT(!mutex_owned(SD_MUTEX(un))); 16544 16545 #ifdef SD_FAULT_INJECTION 16546 16547 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16548 /* SD FaultInjection */ 16549 sd_faultinjection(pktp); 16550 16551 #endif /* SD_FAULT_INJECTION */ 16552 16553 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16554 " xp:0x%p, un:0x%p\n", bp, xp, un); 16555 16556 mutex_enter(SD_MUTEX(un)); 16557 16558 ASSERT(un->un_fm_private != NULL); 16559 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16560 ASSERT(sscp != NULL); 16561 16562 /* Reduce the count of the #commands currently in transport */ 16563 un->un_ncmds_in_transport--; 16564 ASSERT(un->un_ncmds_in_transport >= 0); 16565 16566 /* Increment counter to indicate that the callback routine is active */ 16567 un->un_in_callback++; 16568 16569 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16570 16571 #ifdef SDDEBUG 16572 if (bp == un->un_retry_bp) { 16573 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16574 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16575 un, un->un_retry_bp, un->un_ncmds_in_transport); 16576 } 16577 #endif 16578 16579 /* 16580 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16581 * state if needed. 16582 */ 16583 if (pktp->pkt_reason == CMD_DEV_GONE) { 16584 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16585 "Command failed to complete...Device is gone\n"); 16586 if (un->un_mediastate != DKIO_DEV_GONE) { 16587 un->un_mediastate = DKIO_DEV_GONE; 16588 cv_broadcast(&un->un_state_cv); 16589 } 16590 sd_return_failed_command(un, bp, EIO); 16591 goto exit; 16592 } 16593 16594 if (pktp->pkt_state & STATE_XARQ_DONE) { 16595 SD_TRACE(SD_LOG_COMMON, un, 16596 "sdintr: extra sense data received. pkt=%p\n", pktp); 16597 } 16598 16599 /* 16600 * First see if the pkt has auto-request sense data with it.... 16601 * Look at the packet state first so we don't take a performance 16602 * hit looking at the arq enabled flag unless absolutely necessary. 16603 */ 16604 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16605 (un->un_f_arq_enabled == TRUE)) { 16606 /* 16607 * The HBA did an auto request sense for this command so check 16608 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16609 * driver command that should not be retried. 16610 */ 16611 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16612 /* 16613 * Save the relevant sense info into the xp for the 16614 * original cmd. 16615 */ 16616 struct scsi_arq_status *asp; 16617 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16618 xp->xb_sense_status = 16619 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16620 xp->xb_sense_state = asp->sts_rqpkt_state; 16621 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16622 if (pktp->pkt_state & STATE_XARQ_DONE) { 16623 actual_len = MAX_SENSE_LENGTH - 16624 xp->xb_sense_resid; 16625 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16626 MAX_SENSE_LENGTH); 16627 } else { 16628 if (xp->xb_sense_resid > SENSE_LENGTH) { 16629 actual_len = MAX_SENSE_LENGTH - 16630 xp->xb_sense_resid; 16631 } else { 16632 actual_len = SENSE_LENGTH - 16633 xp->xb_sense_resid; 16634 } 16635 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16636 if ((((struct uscsi_cmd *) 16637 (xp->xb_pktinfo))->uscsi_rqlen) > 16638 actual_len) { 16639 xp->xb_sense_resid = 16640 (((struct uscsi_cmd *) 16641 (xp->xb_pktinfo))-> 16642 uscsi_rqlen) - actual_len; 16643 } else { 16644 xp->xb_sense_resid = 0; 16645 } 16646 } 16647 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16648 SENSE_LENGTH); 16649 } 16650 16651 /* fail the command */ 16652 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16653 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16654 sd_return_failed_command(un, bp, EIO); 16655 goto exit; 16656 } 16657 16658 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16659 /* 16660 * We want to either retry or fail this command, so free 16661 * the DMA resources here. If we retry the command then 16662 * the DMA resources will be reallocated in sd_start_cmds(). 16663 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16664 * causes the *entire* transfer to start over again from the 16665 * beginning of the request, even for PARTIAL chunks that 16666 * have already transferred successfully. 16667 */ 16668 if ((un->un_f_is_fibre == TRUE) && 16669 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16670 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16671 scsi_dmafree(pktp); 16672 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16673 } 16674 #endif 16675 16676 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16677 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16678 16679 sd_handle_auto_request_sense(un, bp, xp, pktp); 16680 goto exit; 16681 } 16682 16683 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16684 if (pktp->pkt_flags & FLAG_SENSING) { 16685 /* This pktp is from the unit's REQUEST_SENSE command */ 16686 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16687 "sdintr: sd_handle_request_sense\n"); 16688 sd_handle_request_sense(un, bp, xp, pktp); 16689 goto exit; 16690 } 16691 16692 /* 16693 * Check to see if the command successfully completed as requested; 16694 * this is the most common case (and also the hot performance path). 16695 * 16696 * Requirements for successful completion are: 16697 * pkt_reason is CMD_CMPLT and packet status is status good. 16698 * In addition: 16699 * - A residual of zero indicates successful completion no matter what 16700 * the command is. 16701 * - If the residual is not zero and the command is not a read or 16702 * write, then it's still defined as successful completion. In other 16703 * words, if the command is a read or write the residual must be 16704 * zero for successful completion. 16705 * - If the residual is not zero and the command is a read or 16706 * write, and it's a USCSICMD, then it's still defined as 16707 * successful completion. 16708 */ 16709 if ((pktp->pkt_reason == CMD_CMPLT) && 16710 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16711 16712 /* 16713 * Since this command is returned with a good status, we 16714 * can reset the count for Sonoma failover. 16715 */ 16716 un->un_sonoma_failure_count = 0; 16717 16718 /* 16719 * Return all USCSI commands on good status 16720 */ 16721 if (pktp->pkt_resid == 0) { 16722 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16723 "sdintr: returning command for resid == 0\n"); 16724 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16725 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16726 SD_UPDATE_B_RESID(bp, pktp); 16727 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16728 "sdintr: returning command for resid != 0\n"); 16729 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16730 SD_UPDATE_B_RESID(bp, pktp); 16731 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16732 "sdintr: returning uscsi command\n"); 16733 } else { 16734 goto not_successful; 16735 } 16736 sd_return_command(un, bp); 16737 16738 /* 16739 * Decrement counter to indicate that the callback routine 16740 * is done. 16741 */ 16742 un->un_in_callback--; 16743 ASSERT(un->un_in_callback >= 0); 16744 mutex_exit(SD_MUTEX(un)); 16745 16746 return; 16747 } 16748 16749 not_successful: 16750 16751 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16752 /* 16753 * The following is based upon knowledge of the underlying transport 16754 * and its use of DMA resources. This code should be removed when 16755 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16756 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16757 * and sd_start_cmds(). 16758 * 16759 * Free any DMA resources associated with this command if there 16760 * is a chance it could be retried or enqueued for later retry. 16761 * If we keep the DMA binding then mpxio cannot reissue the 16762 * command on another path whenever a path failure occurs. 16763 * 16764 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16765 * causes the *entire* transfer to start over again from the 16766 * beginning of the request, even for PARTIAL chunks that 16767 * have already transferred successfully. 16768 * 16769 * This is only done for non-uscsi commands (and also skipped for the 16770 * driver's internal RQS command). Also just do this for Fibre Channel 16771 * devices as these are the only ones that support mpxio. 16772 */ 16773 if ((un->un_f_is_fibre == TRUE) && 16774 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16775 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16776 scsi_dmafree(pktp); 16777 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16778 } 16779 #endif 16780 16781 /* 16782 * The command did not successfully complete as requested so check 16783 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16784 * driver command that should not be retried so just return. If 16785 * FLAG_DIAGNOSE is not set the error will be processed below. 16786 */ 16787 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16788 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16789 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16790 /* 16791 * Issue a request sense if a check condition caused the error 16792 * (we handle the auto request sense case above), otherwise 16793 * just fail the command. 16794 */ 16795 if ((pktp->pkt_reason == CMD_CMPLT) && 16796 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16797 sd_send_request_sense_command(un, bp, pktp); 16798 } else { 16799 sd_return_failed_command(un, bp, EIO); 16800 } 16801 goto exit; 16802 } 16803 16804 /* 16805 * The command did not successfully complete as requested so process 16806 * the error, retry, and/or attempt recovery. 16807 */ 16808 switch (pktp->pkt_reason) { 16809 case CMD_CMPLT: 16810 switch (SD_GET_PKT_STATUS(pktp)) { 16811 case STATUS_GOOD: 16812 /* 16813 * The command completed successfully with a non-zero 16814 * residual 16815 */ 16816 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16817 "sdintr: STATUS_GOOD \n"); 16818 sd_pkt_status_good(un, bp, xp, pktp); 16819 break; 16820 16821 case STATUS_CHECK: 16822 case STATUS_TERMINATED: 16823 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16824 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16825 sd_pkt_status_check_condition(un, bp, xp, pktp); 16826 break; 16827 16828 case STATUS_BUSY: 16829 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16830 "sdintr: STATUS_BUSY\n"); 16831 sd_pkt_status_busy(un, bp, xp, pktp); 16832 break; 16833 16834 case STATUS_RESERVATION_CONFLICT: 16835 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16836 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16837 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16838 break; 16839 16840 case STATUS_QFULL: 16841 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16842 "sdintr: STATUS_QFULL\n"); 16843 sd_pkt_status_qfull(un, bp, xp, pktp); 16844 break; 16845 16846 case STATUS_MET: 16847 case STATUS_INTERMEDIATE: 16848 case STATUS_SCSI2: 16849 case STATUS_INTERMEDIATE_MET: 16850 case STATUS_ACA_ACTIVE: 16851 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16852 "Unexpected SCSI status received: 0x%x\n", 16853 SD_GET_PKT_STATUS(pktp)); 16854 /* 16855 * Mark the ssc_flags when detected invalid status 16856 * code for non-USCSI command. 16857 */ 16858 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16859 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16860 0, "stat-code"); 16861 } 16862 sd_return_failed_command(un, bp, EIO); 16863 break; 16864 16865 default: 16866 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16867 "Invalid SCSI status received: 0x%x\n", 16868 SD_GET_PKT_STATUS(pktp)); 16869 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16870 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16871 0, "stat-code"); 16872 } 16873 sd_return_failed_command(un, bp, EIO); 16874 break; 16875 16876 } 16877 break; 16878 16879 case CMD_INCOMPLETE: 16880 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16881 "sdintr: CMD_INCOMPLETE\n"); 16882 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16883 break; 16884 case CMD_TRAN_ERR: 16885 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16886 "sdintr: CMD_TRAN_ERR\n"); 16887 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16888 break; 16889 case CMD_RESET: 16890 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16891 "sdintr: CMD_RESET \n"); 16892 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16893 break; 16894 case CMD_ABORTED: 16895 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16896 "sdintr: CMD_ABORTED \n"); 16897 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16898 break; 16899 case CMD_TIMEOUT: 16900 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16901 "sdintr: CMD_TIMEOUT\n"); 16902 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16903 break; 16904 case CMD_UNX_BUS_FREE: 16905 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16906 "sdintr: CMD_UNX_BUS_FREE \n"); 16907 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16908 break; 16909 case CMD_TAG_REJECT: 16910 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16911 "sdintr: CMD_TAG_REJECT\n"); 16912 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16913 break; 16914 default: 16915 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16916 "sdintr: default\n"); 16917 /* 16918 * Mark the ssc_flags for detecting invliad pkt_reason. 16919 */ 16920 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16921 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16922 0, "pkt-reason"); 16923 } 16924 sd_pkt_reason_default(un, bp, xp, pktp); 16925 break; 16926 } 16927 16928 exit: 16929 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16930 16931 /* Decrement counter to indicate that the callback routine is done. */ 16932 un->un_in_callback--; 16933 ASSERT(un->un_in_callback >= 0); 16934 16935 /* 16936 * At this point, the pkt has been dispatched, ie, it is either 16937 * being re-tried or has been returned to its caller and should 16938 * not be referenced. 16939 */ 16940 16941 mutex_exit(SD_MUTEX(un)); 16942 } 16943 16944 16945 /* 16946 * Function: sd_print_incomplete_msg 16947 * 16948 * Description: Prints the error message for a CMD_INCOMPLETE error. 16949 * 16950 * Arguments: un - ptr to associated softstate for the device. 16951 * bp - ptr to the buf(9S) for the command. 16952 * arg - message string ptr 16953 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16954 * or SD_NO_RETRY_ISSUED. 16955 * 16956 * Context: May be called under interrupt context 16957 */ 16958 16959 static void 16960 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16961 { 16962 struct scsi_pkt *pktp; 16963 char *msgp; 16964 char *cmdp = arg; 16965 16966 ASSERT(un != NULL); 16967 ASSERT(mutex_owned(SD_MUTEX(un))); 16968 ASSERT(bp != NULL); 16969 ASSERT(arg != NULL); 16970 pktp = SD_GET_PKTP(bp); 16971 ASSERT(pktp != NULL); 16972 16973 switch (code) { 16974 case SD_DELAYED_RETRY_ISSUED: 16975 case SD_IMMEDIATE_RETRY_ISSUED: 16976 msgp = "retrying"; 16977 break; 16978 case SD_NO_RETRY_ISSUED: 16979 default: 16980 msgp = "giving up"; 16981 break; 16982 } 16983 16984 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16985 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16986 "incomplete %s- %s\n", cmdp, msgp); 16987 } 16988 } 16989 16990 16991 16992 /* 16993 * Function: sd_pkt_status_good 16994 * 16995 * Description: Processing for a STATUS_GOOD code in pkt_status. 16996 * 16997 * Context: May be called under interrupt context 16998 */ 16999 17000 static void 17001 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17002 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17003 { 17004 char *cmdp; 17005 17006 ASSERT(un != NULL); 17007 ASSERT(mutex_owned(SD_MUTEX(un))); 17008 ASSERT(bp != NULL); 17009 ASSERT(xp != NULL); 17010 ASSERT(pktp != NULL); 17011 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17012 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17013 ASSERT(pktp->pkt_resid != 0); 17014 17015 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17016 17017 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17018 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17019 case SCMD_READ: 17020 cmdp = "read"; 17021 break; 17022 case SCMD_WRITE: 17023 cmdp = "write"; 17024 break; 17025 default: 17026 SD_UPDATE_B_RESID(bp, pktp); 17027 sd_return_command(un, bp); 17028 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17029 return; 17030 } 17031 17032 /* 17033 * See if we can retry the read/write, preferrably immediately. 17034 * If retries are exhaused, then sd_retry_command() will update 17035 * the b_resid count. 17036 */ 17037 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17038 cmdp, EIO, (clock_t)0, NULL); 17039 17040 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17041 } 17042 17043 17044 17045 17046 17047 /* 17048 * Function: sd_handle_request_sense 17049 * 17050 * Description: Processing for non-auto Request Sense command. 17051 * 17052 * Arguments: un - ptr to associated softstate 17053 * sense_bp - ptr to buf(9S) for the RQS command 17054 * sense_xp - ptr to the sd_xbuf for the RQS command 17055 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17056 * 17057 * Context: May be called under interrupt context 17058 */ 17059 17060 static void 17061 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17062 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17063 { 17064 struct buf *cmd_bp; /* buf for the original command */ 17065 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17066 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17067 size_t actual_len; /* actual sense data length */ 17068 17069 ASSERT(un != NULL); 17070 ASSERT(mutex_owned(SD_MUTEX(un))); 17071 ASSERT(sense_bp != NULL); 17072 ASSERT(sense_xp != NULL); 17073 ASSERT(sense_pktp != NULL); 17074 17075 /* 17076 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17077 * RQS command and not the original command. 17078 */ 17079 ASSERT(sense_pktp == un->un_rqs_pktp); 17080 ASSERT(sense_bp == un->un_rqs_bp); 17081 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17082 (FLAG_SENSING | FLAG_HEAD)); 17083 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17084 FLAG_SENSING) == FLAG_SENSING); 17085 17086 /* These are the bp, xp, and pktp for the original command */ 17087 cmd_bp = sense_xp->xb_sense_bp; 17088 cmd_xp = SD_GET_XBUF(cmd_bp); 17089 cmd_pktp = SD_GET_PKTP(cmd_bp); 17090 17091 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17092 /* 17093 * The REQUEST SENSE command failed. Release the REQUEST 17094 * SENSE command for re-use, get back the bp for the original 17095 * command, and attempt to re-try the original command if 17096 * FLAG_DIAGNOSE is not set in the original packet. 17097 */ 17098 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17099 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17100 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17101 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17102 NULL, NULL, EIO, (clock_t)0, NULL); 17103 return; 17104 } 17105 } 17106 17107 /* 17108 * Save the relevant sense info into the xp for the original cmd. 17109 * 17110 * Note: if the request sense failed the state info will be zero 17111 * as set in sd_mark_rqs_busy() 17112 */ 17113 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17114 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17115 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17116 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17117 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17118 SENSE_LENGTH)) { 17119 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17120 MAX_SENSE_LENGTH); 17121 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17122 } else { 17123 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17124 SENSE_LENGTH); 17125 if (actual_len < SENSE_LENGTH) { 17126 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17127 } else { 17128 cmd_xp->xb_sense_resid = 0; 17129 } 17130 } 17131 17132 /* 17133 * Free up the RQS command.... 17134 * NOTE: 17135 * Must do this BEFORE calling sd_validate_sense_data! 17136 * sd_validate_sense_data may return the original command in 17137 * which case the pkt will be freed and the flags can no 17138 * longer be touched. 17139 * SD_MUTEX is held through this process until the command 17140 * is dispatched based upon the sense data, so there are 17141 * no race conditions. 17142 */ 17143 (void) sd_mark_rqs_idle(un, sense_xp); 17144 17145 /* 17146 * For a retryable command see if we have valid sense data, if so then 17147 * turn it over to sd_decode_sense() to figure out the right course of 17148 * action. Just fail a non-retryable command. 17149 */ 17150 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17151 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17152 SD_SENSE_DATA_IS_VALID) { 17153 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17154 } 17155 } else { 17156 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17157 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17158 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17159 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17160 sd_return_failed_command(un, cmd_bp, EIO); 17161 } 17162 } 17163 17164 17165 17166 17167 /* 17168 * Function: sd_handle_auto_request_sense 17169 * 17170 * Description: Processing for auto-request sense information. 17171 * 17172 * Arguments: un - ptr to associated softstate 17173 * bp - ptr to buf(9S) for the command 17174 * xp - ptr to the sd_xbuf for the command 17175 * pktp - ptr to the scsi_pkt(9S) for the command 17176 * 17177 * Context: May be called under interrupt context 17178 */ 17179 17180 static void 17181 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17182 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17183 { 17184 struct scsi_arq_status *asp; 17185 size_t actual_len; 17186 17187 ASSERT(un != NULL); 17188 ASSERT(mutex_owned(SD_MUTEX(un))); 17189 ASSERT(bp != NULL); 17190 ASSERT(xp != NULL); 17191 ASSERT(pktp != NULL); 17192 ASSERT(pktp != un->un_rqs_pktp); 17193 ASSERT(bp != un->un_rqs_bp); 17194 17195 /* 17196 * For auto-request sense, we get a scsi_arq_status back from 17197 * the HBA, with the sense data in the sts_sensedata member. 17198 * The pkt_scbp of the packet points to this scsi_arq_status. 17199 */ 17200 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17201 17202 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17203 /* 17204 * The auto REQUEST SENSE failed; see if we can re-try 17205 * the original command. 17206 */ 17207 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17208 "auto request sense failed (reason=%s)\n", 17209 scsi_rname(asp->sts_rqpkt_reason)); 17210 17211 sd_reset_target(un, pktp); 17212 17213 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17214 NULL, NULL, EIO, (clock_t)0, NULL); 17215 return; 17216 } 17217 17218 /* Save the relevant sense info into the xp for the original cmd. */ 17219 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17220 xp->xb_sense_state = asp->sts_rqpkt_state; 17221 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17222 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17223 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17224 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17225 MAX_SENSE_LENGTH); 17226 } else { 17227 if (xp->xb_sense_resid > SENSE_LENGTH) { 17228 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17229 } else { 17230 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17231 } 17232 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17233 if ((((struct uscsi_cmd *) 17234 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17235 xp->xb_sense_resid = (((struct uscsi_cmd *) 17236 (xp->xb_pktinfo))->uscsi_rqlen) - 17237 actual_len; 17238 } else { 17239 xp->xb_sense_resid = 0; 17240 } 17241 } 17242 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17243 } 17244 17245 /* 17246 * See if we have valid sense data, if so then turn it over to 17247 * sd_decode_sense() to figure out the right course of action. 17248 */ 17249 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17250 SD_SENSE_DATA_IS_VALID) { 17251 sd_decode_sense(un, bp, xp, pktp); 17252 } 17253 } 17254 17255 17256 /* 17257 * Function: sd_print_sense_failed_msg 17258 * 17259 * Description: Print log message when RQS has failed. 17260 * 17261 * Arguments: un - ptr to associated softstate 17262 * bp - ptr to buf(9S) for the command 17263 * arg - generic message string ptr 17264 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17265 * or SD_NO_RETRY_ISSUED 17266 * 17267 * Context: May be called from interrupt context 17268 */ 17269 17270 static void 17271 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17272 int code) 17273 { 17274 char *msgp = arg; 17275 17276 ASSERT(un != NULL); 17277 ASSERT(mutex_owned(SD_MUTEX(un))); 17278 ASSERT(bp != NULL); 17279 17280 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17281 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17282 } 17283 } 17284 17285 17286 /* 17287 * Function: sd_validate_sense_data 17288 * 17289 * Description: Check the given sense data for validity. 17290 * If the sense data is not valid, the command will 17291 * be either failed or retried! 17292 * 17293 * Return Code: SD_SENSE_DATA_IS_INVALID 17294 * SD_SENSE_DATA_IS_VALID 17295 * 17296 * Context: May be called from interrupt context 17297 */ 17298 17299 static int 17300 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17301 size_t actual_len) 17302 { 17303 struct scsi_extended_sense *esp; 17304 struct scsi_pkt *pktp; 17305 char *msgp = NULL; 17306 sd_ssc_t *sscp; 17307 17308 ASSERT(un != NULL); 17309 ASSERT(mutex_owned(SD_MUTEX(un))); 17310 ASSERT(bp != NULL); 17311 ASSERT(bp != un->un_rqs_bp); 17312 ASSERT(xp != NULL); 17313 ASSERT(un->un_fm_private != NULL); 17314 17315 pktp = SD_GET_PKTP(bp); 17316 ASSERT(pktp != NULL); 17317 17318 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17319 ASSERT(sscp != NULL); 17320 17321 /* 17322 * Check the status of the RQS command (auto or manual). 17323 */ 17324 switch (xp->xb_sense_status & STATUS_MASK) { 17325 case STATUS_GOOD: 17326 break; 17327 17328 case STATUS_RESERVATION_CONFLICT: 17329 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17330 return (SD_SENSE_DATA_IS_INVALID); 17331 17332 case STATUS_BUSY: 17333 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17334 "Busy Status on REQUEST SENSE\n"); 17335 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17336 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17337 return (SD_SENSE_DATA_IS_INVALID); 17338 17339 case STATUS_QFULL: 17340 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17341 "QFULL Status on REQUEST SENSE\n"); 17342 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17343 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17344 return (SD_SENSE_DATA_IS_INVALID); 17345 17346 case STATUS_CHECK: 17347 case STATUS_TERMINATED: 17348 msgp = "Check Condition on REQUEST SENSE\n"; 17349 goto sense_failed; 17350 17351 default: 17352 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17353 goto sense_failed; 17354 } 17355 17356 /* 17357 * See if we got the minimum required amount of sense data. 17358 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17359 * or less. 17360 */ 17361 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17362 (actual_len == 0)) { 17363 msgp = "Request Sense couldn't get sense data\n"; 17364 goto sense_failed; 17365 } 17366 17367 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17368 msgp = "Not enough sense information\n"; 17369 /* Mark the ssc_flags for detecting invalid sense data */ 17370 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17371 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17372 "sense-data"); 17373 } 17374 goto sense_failed; 17375 } 17376 17377 /* 17378 * We require the extended sense data 17379 */ 17380 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17381 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17382 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17383 static char tmp[8]; 17384 static char buf[148]; 17385 char *p = (char *)(xp->xb_sense_data); 17386 int i; 17387 17388 mutex_enter(&sd_sense_mutex); 17389 (void) strcpy(buf, "undecodable sense information:"); 17390 for (i = 0; i < actual_len; i++) { 17391 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17392 (void) strcpy(&buf[strlen(buf)], tmp); 17393 } 17394 i = strlen(buf); 17395 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17396 17397 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17398 scsi_log(SD_DEVINFO(un), sd_label, 17399 CE_WARN, buf); 17400 } 17401 mutex_exit(&sd_sense_mutex); 17402 } 17403 17404 /* Mark the ssc_flags for detecting invalid sense data */ 17405 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17406 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17407 "sense-data"); 17408 } 17409 17410 /* Note: Legacy behavior, fail the command with no retry */ 17411 sd_return_failed_command(un, bp, EIO); 17412 return (SD_SENSE_DATA_IS_INVALID); 17413 } 17414 17415 /* 17416 * Check that es_code is valid (es_class concatenated with es_code 17417 * make up the "response code" field. es_class will always be 7, so 17418 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17419 * format. 17420 */ 17421 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17422 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17423 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17424 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17425 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17426 /* Mark the ssc_flags for detecting invalid sense data */ 17427 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17428 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17429 "sense-data"); 17430 } 17431 goto sense_failed; 17432 } 17433 17434 return (SD_SENSE_DATA_IS_VALID); 17435 17436 sense_failed: 17437 /* 17438 * If the request sense failed (for whatever reason), attempt 17439 * to retry the original command. 17440 */ 17441 #if defined(__i386) || defined(__amd64) 17442 /* 17443 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17444 * sddef.h for Sparc platform, and x86 uses 1 binary 17445 * for both SCSI/FC. 17446 * The SD_RETRY_DELAY value need to be adjusted here 17447 * when SD_RETRY_DELAY change in sddef.h 17448 */ 17449 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17450 sd_print_sense_failed_msg, msgp, EIO, 17451 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17452 #else 17453 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17454 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17455 #endif 17456 17457 return (SD_SENSE_DATA_IS_INVALID); 17458 } 17459 17460 /* 17461 * Function: sd_decode_sense 17462 * 17463 * Description: Take recovery action(s) when SCSI Sense Data is received. 17464 * 17465 * Context: Interrupt context. 17466 */ 17467 17468 static void 17469 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17470 struct scsi_pkt *pktp) 17471 { 17472 uint8_t sense_key; 17473 17474 ASSERT(un != NULL); 17475 ASSERT(mutex_owned(SD_MUTEX(un))); 17476 ASSERT(bp != NULL); 17477 ASSERT(bp != un->un_rqs_bp); 17478 ASSERT(xp != NULL); 17479 ASSERT(pktp != NULL); 17480 17481 sense_key = scsi_sense_key(xp->xb_sense_data); 17482 17483 switch (sense_key) { 17484 case KEY_NO_SENSE: 17485 sd_sense_key_no_sense(un, bp, xp, pktp); 17486 break; 17487 case KEY_RECOVERABLE_ERROR: 17488 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17489 bp, xp, pktp); 17490 break; 17491 case KEY_NOT_READY: 17492 sd_sense_key_not_ready(un, xp->xb_sense_data, 17493 bp, xp, pktp); 17494 break; 17495 case KEY_MEDIUM_ERROR: 17496 case KEY_HARDWARE_ERROR: 17497 sd_sense_key_medium_or_hardware_error(un, 17498 xp->xb_sense_data, bp, xp, pktp); 17499 break; 17500 case KEY_ILLEGAL_REQUEST: 17501 sd_sense_key_illegal_request(un, bp, xp, pktp); 17502 break; 17503 case KEY_UNIT_ATTENTION: 17504 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17505 bp, xp, pktp); 17506 break; 17507 case KEY_WRITE_PROTECT: 17508 case KEY_VOLUME_OVERFLOW: 17509 case KEY_MISCOMPARE: 17510 sd_sense_key_fail_command(un, bp, xp, pktp); 17511 break; 17512 case KEY_BLANK_CHECK: 17513 sd_sense_key_blank_check(un, bp, xp, pktp); 17514 break; 17515 case KEY_ABORTED_COMMAND: 17516 sd_sense_key_aborted_command(un, bp, xp, pktp); 17517 break; 17518 case KEY_VENDOR_UNIQUE: 17519 case KEY_COPY_ABORTED: 17520 case KEY_EQUAL: 17521 case KEY_RESERVED: 17522 default: 17523 sd_sense_key_default(un, xp->xb_sense_data, 17524 bp, xp, pktp); 17525 break; 17526 } 17527 } 17528 17529 17530 /* 17531 * Function: sd_dump_memory 17532 * 17533 * Description: Debug logging routine to print the contents of a user provided 17534 * buffer. The output of the buffer is broken up into 256 byte 17535 * segments due to a size constraint of the scsi_log. 17536 * implementation. 17537 * 17538 * Arguments: un - ptr to softstate 17539 * comp - component mask 17540 * title - "title" string to preceed data when printed 17541 * data - ptr to data block to be printed 17542 * len - size of data block to be printed 17543 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17544 * 17545 * Context: May be called from interrupt context 17546 */ 17547 17548 #define SD_DUMP_MEMORY_BUF_SIZE 256 17549 17550 static char *sd_dump_format_string[] = { 17551 " 0x%02x", 17552 " %c" 17553 }; 17554 17555 static void 17556 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17557 int len, int fmt) 17558 { 17559 int i, j; 17560 int avail_count; 17561 int start_offset; 17562 int end_offset; 17563 size_t entry_len; 17564 char *bufp; 17565 char *local_buf; 17566 char *format_string; 17567 17568 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17569 17570 /* 17571 * In the debug version of the driver, this function is called from a 17572 * number of places which are NOPs in the release driver. 17573 * The debug driver therefore has additional methods of filtering 17574 * debug output. 17575 */ 17576 #ifdef SDDEBUG 17577 /* 17578 * In the debug version of the driver we can reduce the amount of debug 17579 * messages by setting sd_error_level to something other than 17580 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17581 * sd_component_mask. 17582 */ 17583 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17584 (sd_error_level != SCSI_ERR_ALL)) { 17585 return; 17586 } 17587 if (((sd_component_mask & comp) == 0) || 17588 (sd_error_level != SCSI_ERR_ALL)) { 17589 return; 17590 } 17591 #else 17592 if (sd_error_level != SCSI_ERR_ALL) { 17593 return; 17594 } 17595 #endif 17596 17597 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17598 bufp = local_buf; 17599 /* 17600 * Available length is the length of local_buf[], minus the 17601 * length of the title string, minus one for the ":", minus 17602 * one for the newline, minus one for the NULL terminator. 17603 * This gives the #bytes available for holding the printed 17604 * values from the given data buffer. 17605 */ 17606 if (fmt == SD_LOG_HEX) { 17607 format_string = sd_dump_format_string[0]; 17608 } else /* SD_LOG_CHAR */ { 17609 format_string = sd_dump_format_string[1]; 17610 } 17611 /* 17612 * Available count is the number of elements from the given 17613 * data buffer that we can fit into the available length. 17614 * This is based upon the size of the format string used. 17615 * Make one entry and find it's size. 17616 */ 17617 (void) sprintf(bufp, format_string, data[0]); 17618 entry_len = strlen(bufp); 17619 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17620 17621 j = 0; 17622 while (j < len) { 17623 bufp = local_buf; 17624 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17625 start_offset = j; 17626 17627 end_offset = start_offset + avail_count; 17628 17629 (void) sprintf(bufp, "%s:", title); 17630 bufp += strlen(bufp); 17631 for (i = start_offset; ((i < end_offset) && (j < len)); 17632 i++, j++) { 17633 (void) sprintf(bufp, format_string, data[i]); 17634 bufp += entry_len; 17635 } 17636 (void) sprintf(bufp, "\n"); 17637 17638 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17639 } 17640 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17641 } 17642 17643 /* 17644 * Function: sd_print_sense_msg 17645 * 17646 * Description: Log a message based upon the given sense data. 17647 * 17648 * Arguments: un - ptr to associated softstate 17649 * bp - ptr to buf(9S) for the command 17650 * arg - ptr to associate sd_sense_info struct 17651 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17652 * or SD_NO_RETRY_ISSUED 17653 * 17654 * Context: May be called from interrupt context 17655 */ 17656 17657 static void 17658 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17659 { 17660 struct sd_xbuf *xp; 17661 struct scsi_pkt *pktp; 17662 uint8_t *sensep; 17663 daddr_t request_blkno; 17664 diskaddr_t err_blkno; 17665 int severity; 17666 int pfa_flag; 17667 extern struct scsi_key_strings scsi_cmds[]; 17668 17669 ASSERT(un != NULL); 17670 ASSERT(mutex_owned(SD_MUTEX(un))); 17671 ASSERT(bp != NULL); 17672 xp = SD_GET_XBUF(bp); 17673 ASSERT(xp != NULL); 17674 pktp = SD_GET_PKTP(bp); 17675 ASSERT(pktp != NULL); 17676 ASSERT(arg != NULL); 17677 17678 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17679 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17680 17681 if ((code == SD_DELAYED_RETRY_ISSUED) || 17682 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17683 severity = SCSI_ERR_RETRYABLE; 17684 } 17685 17686 /* Use absolute block number for the request block number */ 17687 request_blkno = xp->xb_blkno; 17688 17689 /* 17690 * Now try to get the error block number from the sense data 17691 */ 17692 sensep = xp->xb_sense_data; 17693 17694 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17695 (uint64_t *)&err_blkno)) { 17696 /* 17697 * We retrieved the error block number from the information 17698 * portion of the sense data. 17699 * 17700 * For USCSI commands we are better off using the error 17701 * block no. as the requested block no. (This is the best 17702 * we can estimate.) 17703 */ 17704 if ((SD_IS_BUFIO(xp) == FALSE) && 17705 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17706 request_blkno = err_blkno; 17707 } 17708 } else { 17709 /* 17710 * Without the es_valid bit set (for fixed format) or an 17711 * information descriptor (for descriptor format) we cannot 17712 * be certain of the error blkno, so just use the 17713 * request_blkno. 17714 */ 17715 err_blkno = (diskaddr_t)request_blkno; 17716 } 17717 17718 /* 17719 * The following will log the buffer contents for the release driver 17720 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17721 * level is set to verbose. 17722 */ 17723 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17724 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17725 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17726 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17727 17728 if (pfa_flag == FALSE) { 17729 /* This is normally only set for USCSI */ 17730 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17731 return; 17732 } 17733 17734 if ((SD_IS_BUFIO(xp) == TRUE) && 17735 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17736 (severity < sd_error_level))) { 17737 return; 17738 } 17739 } 17740 /* 17741 * Check for Sonoma Failover and keep a count of how many failed I/O's 17742 */ 17743 if ((SD_IS_LSI(un)) && 17744 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17745 (scsi_sense_asc(sensep) == 0x94) && 17746 (scsi_sense_ascq(sensep) == 0x01)) { 17747 un->un_sonoma_failure_count++; 17748 if (un->un_sonoma_failure_count > 1) { 17749 return; 17750 } 17751 } 17752 17753 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17754 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17755 (pktp->pkt_resid == 0))) { 17756 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17757 request_blkno, err_blkno, scsi_cmds, 17758 (struct scsi_extended_sense *)sensep, 17759 un->un_additional_codes, NULL); 17760 } 17761 } 17762 17763 /* 17764 * Function: sd_sense_key_no_sense 17765 * 17766 * Description: Recovery action when sense data was not received. 17767 * 17768 * Context: May be called from interrupt context 17769 */ 17770 17771 static void 17772 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17773 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17774 { 17775 struct sd_sense_info si; 17776 17777 ASSERT(un != NULL); 17778 ASSERT(mutex_owned(SD_MUTEX(un))); 17779 ASSERT(bp != NULL); 17780 ASSERT(xp != NULL); 17781 ASSERT(pktp != NULL); 17782 17783 si.ssi_severity = SCSI_ERR_FATAL; 17784 si.ssi_pfa_flag = FALSE; 17785 17786 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17787 17788 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17789 &si, EIO, (clock_t)0, NULL); 17790 } 17791 17792 17793 /* 17794 * Function: sd_sense_key_recoverable_error 17795 * 17796 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17797 * 17798 * Context: May be called from interrupt context 17799 */ 17800 17801 static void 17802 sd_sense_key_recoverable_error(struct sd_lun *un, 17803 uint8_t *sense_datap, 17804 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17805 { 17806 struct sd_sense_info si; 17807 uint8_t asc = scsi_sense_asc(sense_datap); 17808 17809 ASSERT(un != NULL); 17810 ASSERT(mutex_owned(SD_MUTEX(un))); 17811 ASSERT(bp != NULL); 17812 ASSERT(xp != NULL); 17813 ASSERT(pktp != NULL); 17814 17815 /* 17816 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17817 */ 17818 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17819 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17820 si.ssi_severity = SCSI_ERR_INFO; 17821 si.ssi_pfa_flag = TRUE; 17822 } else { 17823 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17824 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17825 si.ssi_severity = SCSI_ERR_RECOVERED; 17826 si.ssi_pfa_flag = FALSE; 17827 } 17828 17829 if (pktp->pkt_resid == 0) { 17830 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17831 sd_return_command(un, bp); 17832 return; 17833 } 17834 17835 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17836 &si, EIO, (clock_t)0, NULL); 17837 } 17838 17839 17840 17841 17842 /* 17843 * Function: sd_sense_key_not_ready 17844 * 17845 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17846 * 17847 * Context: May be called from interrupt context 17848 */ 17849 17850 static void 17851 sd_sense_key_not_ready(struct sd_lun *un, 17852 uint8_t *sense_datap, 17853 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17854 { 17855 struct sd_sense_info si; 17856 uint8_t asc = scsi_sense_asc(sense_datap); 17857 uint8_t ascq = scsi_sense_ascq(sense_datap); 17858 17859 ASSERT(un != NULL); 17860 ASSERT(mutex_owned(SD_MUTEX(un))); 17861 ASSERT(bp != NULL); 17862 ASSERT(xp != NULL); 17863 ASSERT(pktp != NULL); 17864 17865 si.ssi_severity = SCSI_ERR_FATAL; 17866 si.ssi_pfa_flag = FALSE; 17867 17868 /* 17869 * Update error stats after first NOT READY error. Disks may have 17870 * been powered down and may need to be restarted. For CDROMs, 17871 * report NOT READY errors only if media is present. 17872 */ 17873 if ((ISCD(un) && (asc == 0x3A)) || 17874 (xp->xb_nr_retry_count > 0)) { 17875 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17876 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17877 } 17878 17879 /* 17880 * Just fail if the "not ready" retry limit has been reached. 17881 */ 17882 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17883 /* Special check for error message printing for removables. */ 17884 if (un->un_f_has_removable_media && (asc == 0x04) && 17885 (ascq >= 0x04)) { 17886 si.ssi_severity = SCSI_ERR_ALL; 17887 } 17888 goto fail_command; 17889 } 17890 17891 /* 17892 * Check the ASC and ASCQ in the sense data as needed, to determine 17893 * what to do. 17894 */ 17895 switch (asc) { 17896 case 0x04: /* LOGICAL UNIT NOT READY */ 17897 /* 17898 * disk drives that don't spin up result in a very long delay 17899 * in format without warning messages. We will log a message 17900 * if the error level is set to verbose. 17901 */ 17902 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17903 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17904 "logical unit not ready, resetting disk\n"); 17905 } 17906 17907 /* 17908 * There are different requirements for CDROMs and disks for 17909 * the number of retries. If a CD-ROM is giving this, it is 17910 * probably reading TOC and is in the process of getting 17911 * ready, so we should keep on trying for a long time to make 17912 * sure that all types of media are taken in account (for 17913 * some media the drive takes a long time to read TOC). For 17914 * disks we do not want to retry this too many times as this 17915 * can cause a long hang in format when the drive refuses to 17916 * spin up (a very common failure). 17917 */ 17918 switch (ascq) { 17919 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17920 /* 17921 * Disk drives frequently refuse to spin up which 17922 * results in a very long hang in format without 17923 * warning messages. 17924 * 17925 * Note: This code preserves the legacy behavior of 17926 * comparing xb_nr_retry_count against zero for fibre 17927 * channel targets instead of comparing against the 17928 * un_reset_retry_count value. The reason for this 17929 * discrepancy has been so utterly lost beneath the 17930 * Sands of Time that even Indiana Jones could not 17931 * find it. 17932 */ 17933 if (un->un_f_is_fibre == TRUE) { 17934 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17935 (xp->xb_nr_retry_count > 0)) && 17936 (un->un_startstop_timeid == NULL)) { 17937 scsi_log(SD_DEVINFO(un), sd_label, 17938 CE_WARN, "logical unit not ready, " 17939 "resetting disk\n"); 17940 sd_reset_target(un, pktp); 17941 } 17942 } else { 17943 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17944 (xp->xb_nr_retry_count > 17945 un->un_reset_retry_count)) && 17946 (un->un_startstop_timeid == NULL)) { 17947 scsi_log(SD_DEVINFO(un), sd_label, 17948 CE_WARN, "logical unit not ready, " 17949 "resetting disk\n"); 17950 sd_reset_target(un, pktp); 17951 } 17952 } 17953 break; 17954 17955 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17956 /* 17957 * If the target is in the process of becoming 17958 * ready, just proceed with the retry. This can 17959 * happen with CD-ROMs that take a long time to 17960 * read TOC after a power cycle or reset. 17961 */ 17962 goto do_retry; 17963 17964 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17965 break; 17966 17967 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17968 /* 17969 * Retries cannot help here so just fail right away. 17970 */ 17971 goto fail_command; 17972 17973 case 0x88: 17974 /* 17975 * Vendor-unique code for T3/T4: it indicates a 17976 * path problem in a mutipathed config, but as far as 17977 * the target driver is concerned it equates to a fatal 17978 * error, so we should just fail the command right away 17979 * (without printing anything to the console). If this 17980 * is not a T3/T4, fall thru to the default recovery 17981 * action. 17982 * T3/T4 is FC only, don't need to check is_fibre 17983 */ 17984 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17985 sd_return_failed_command(un, bp, EIO); 17986 return; 17987 } 17988 /* FALLTHRU */ 17989 17990 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17991 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17992 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17993 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17994 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17995 default: /* Possible future codes in SCSI spec? */ 17996 /* 17997 * For removable-media devices, do not retry if 17998 * ASCQ > 2 as these result mostly from USCSI commands 17999 * on MMC devices issued to check status of an 18000 * operation initiated in immediate mode. Also for 18001 * ASCQ >= 4 do not print console messages as these 18002 * mainly represent a user-initiated operation 18003 * instead of a system failure. 18004 */ 18005 if (un->un_f_has_removable_media) { 18006 si.ssi_severity = SCSI_ERR_ALL; 18007 goto fail_command; 18008 } 18009 break; 18010 } 18011 18012 /* 18013 * As part of our recovery attempt for the NOT READY 18014 * condition, we issue a START STOP UNIT command. However 18015 * we want to wait for a short delay before attempting this 18016 * as there may still be more commands coming back from the 18017 * target with the check condition. To do this we use 18018 * timeout(9F) to call sd_start_stop_unit_callback() after 18019 * the delay interval expires. (sd_start_stop_unit_callback() 18020 * dispatches sd_start_stop_unit_task(), which will issue 18021 * the actual START STOP UNIT command. The delay interval 18022 * is one-half of the delay that we will use to retry the 18023 * command that generated the NOT READY condition. 18024 * 18025 * Note that we could just dispatch sd_start_stop_unit_task() 18026 * from here and allow it to sleep for the delay interval, 18027 * but then we would be tying up the taskq thread 18028 * uncesessarily for the duration of the delay. 18029 * 18030 * Do not issue the START STOP UNIT if the current command 18031 * is already a START STOP UNIT. 18032 */ 18033 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18034 break; 18035 } 18036 18037 /* 18038 * Do not schedule the timeout if one is already pending. 18039 */ 18040 if (un->un_startstop_timeid != NULL) { 18041 SD_INFO(SD_LOG_ERROR, un, 18042 "sd_sense_key_not_ready: restart already issued to" 18043 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18044 ddi_get_instance(SD_DEVINFO(un))); 18045 break; 18046 } 18047 18048 /* 18049 * Schedule the START STOP UNIT command, then queue the command 18050 * for a retry. 18051 * 18052 * Note: A timeout is not scheduled for this retry because we 18053 * want the retry to be serial with the START_STOP_UNIT. The 18054 * retry will be started when the START_STOP_UNIT is completed 18055 * in sd_start_stop_unit_task. 18056 */ 18057 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18058 un, un->un_busy_timeout / 2); 18059 xp->xb_nr_retry_count++; 18060 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18061 return; 18062 18063 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18064 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18065 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18066 "unit does not respond to selection\n"); 18067 } 18068 break; 18069 18070 case 0x3A: /* MEDIUM NOT PRESENT */ 18071 if (sd_error_level >= SCSI_ERR_FATAL) { 18072 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18073 "Caddy not inserted in drive\n"); 18074 } 18075 18076 sr_ejected(un); 18077 un->un_mediastate = DKIO_EJECTED; 18078 /* The state has changed, inform the media watch routines */ 18079 cv_broadcast(&un->un_state_cv); 18080 /* Just fail if no media is present in the drive. */ 18081 goto fail_command; 18082 18083 default: 18084 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18085 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18086 "Unit not Ready. Additional sense code 0x%x\n", 18087 asc); 18088 } 18089 break; 18090 } 18091 18092 do_retry: 18093 18094 /* 18095 * Retry the command, as some targets may report NOT READY for 18096 * several seconds after being reset. 18097 */ 18098 xp->xb_nr_retry_count++; 18099 si.ssi_severity = SCSI_ERR_RETRYABLE; 18100 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18101 &si, EIO, un->un_busy_timeout, NULL); 18102 18103 return; 18104 18105 fail_command: 18106 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18107 sd_return_failed_command(un, bp, EIO); 18108 } 18109 18110 18111 18112 /* 18113 * Function: sd_sense_key_medium_or_hardware_error 18114 * 18115 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18116 * sense key. 18117 * 18118 * Context: May be called from interrupt context 18119 */ 18120 18121 static void 18122 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 18123 uint8_t *sense_datap, 18124 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18125 { 18126 struct sd_sense_info si; 18127 uint8_t sense_key = scsi_sense_key(sense_datap); 18128 uint8_t asc = scsi_sense_asc(sense_datap); 18129 18130 ASSERT(un != NULL); 18131 ASSERT(mutex_owned(SD_MUTEX(un))); 18132 ASSERT(bp != NULL); 18133 ASSERT(xp != NULL); 18134 ASSERT(pktp != NULL); 18135 18136 si.ssi_severity = SCSI_ERR_FATAL; 18137 si.ssi_pfa_flag = FALSE; 18138 18139 if (sense_key == KEY_MEDIUM_ERROR) { 18140 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18141 } 18142 18143 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18144 18145 if ((un->un_reset_retry_count != 0) && 18146 (xp->xb_retry_count == un->un_reset_retry_count)) { 18147 mutex_exit(SD_MUTEX(un)); 18148 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18149 if (un->un_f_allow_bus_device_reset == TRUE) { 18150 18151 boolean_t try_resetting_target = B_TRUE; 18152 18153 /* 18154 * We need to be able to handle specific ASC when we are 18155 * handling a KEY_HARDWARE_ERROR. In particular 18156 * taking the default action of resetting the target may 18157 * not be the appropriate way to attempt recovery. 18158 * Resetting a target because of a single LUN failure 18159 * victimizes all LUNs on that target. 18160 * 18161 * This is true for the LSI arrays, if an LSI 18162 * array controller returns an ASC of 0x84 (LUN Dead) we 18163 * should trust it. 18164 */ 18165 18166 if (sense_key == KEY_HARDWARE_ERROR) { 18167 switch (asc) { 18168 case 0x84: 18169 if (SD_IS_LSI(un)) { 18170 try_resetting_target = B_FALSE; 18171 } 18172 break; 18173 default: 18174 break; 18175 } 18176 } 18177 18178 if (try_resetting_target == B_TRUE) { 18179 int reset_retval = 0; 18180 if (un->un_f_lun_reset_enabled == TRUE) { 18181 SD_TRACE(SD_LOG_IO_CORE, un, 18182 "sd_sense_key_medium_or_hardware_" 18183 "error: issuing RESET_LUN\n"); 18184 reset_retval = 18185 scsi_reset(SD_ADDRESS(un), 18186 RESET_LUN); 18187 } 18188 if (reset_retval == 0) { 18189 SD_TRACE(SD_LOG_IO_CORE, un, 18190 "sd_sense_key_medium_or_hardware_" 18191 "error: issuing RESET_TARGET\n"); 18192 (void) scsi_reset(SD_ADDRESS(un), 18193 RESET_TARGET); 18194 } 18195 } 18196 } 18197 mutex_enter(SD_MUTEX(un)); 18198 } 18199 18200 /* 18201 * This really ought to be a fatal error, but we will retry anyway 18202 * as some drives report this as a spurious error. 18203 */ 18204 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18205 &si, EIO, (clock_t)0, NULL); 18206 } 18207 18208 18209 18210 /* 18211 * Function: sd_sense_key_illegal_request 18212 * 18213 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18214 * 18215 * Context: May be called from interrupt context 18216 */ 18217 18218 static void 18219 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18220 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18221 { 18222 struct sd_sense_info si; 18223 18224 ASSERT(un != NULL); 18225 ASSERT(mutex_owned(SD_MUTEX(un))); 18226 ASSERT(bp != NULL); 18227 ASSERT(xp != NULL); 18228 ASSERT(pktp != NULL); 18229 18230 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18231 18232 si.ssi_severity = SCSI_ERR_INFO; 18233 si.ssi_pfa_flag = FALSE; 18234 18235 /* Pointless to retry if the target thinks it's an illegal request */ 18236 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18237 sd_return_failed_command(un, bp, EIO); 18238 } 18239 18240 18241 18242 18243 /* 18244 * Function: sd_sense_key_unit_attention 18245 * 18246 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18247 * 18248 * Context: May be called from interrupt context 18249 */ 18250 18251 static void 18252 sd_sense_key_unit_attention(struct sd_lun *un, 18253 uint8_t *sense_datap, 18254 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18255 { 18256 /* 18257 * For UNIT ATTENTION we allow retries for one minute. Devices 18258 * like Sonoma can return UNIT ATTENTION close to a minute 18259 * under certain conditions. 18260 */ 18261 int retry_check_flag = SD_RETRIES_UA; 18262 boolean_t kstat_updated = B_FALSE; 18263 struct sd_sense_info si; 18264 uint8_t asc = scsi_sense_asc(sense_datap); 18265 uint8_t ascq = scsi_sense_ascq(sense_datap); 18266 18267 ASSERT(un != NULL); 18268 ASSERT(mutex_owned(SD_MUTEX(un))); 18269 ASSERT(bp != NULL); 18270 ASSERT(xp != NULL); 18271 ASSERT(pktp != NULL); 18272 18273 si.ssi_severity = SCSI_ERR_INFO; 18274 si.ssi_pfa_flag = FALSE; 18275 18276 18277 switch (asc) { 18278 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18279 if (sd_report_pfa != 0) { 18280 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18281 si.ssi_pfa_flag = TRUE; 18282 retry_check_flag = SD_RETRIES_STANDARD; 18283 goto do_retry; 18284 } 18285 18286 break; 18287 18288 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18289 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18290 un->un_resvd_status |= 18291 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18292 } 18293 #ifdef _LP64 18294 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18295 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18296 un, KM_NOSLEEP) == 0) { 18297 /* 18298 * If we can't dispatch the task we'll just 18299 * live without descriptor sense. We can 18300 * try again on the next "unit attention" 18301 */ 18302 SD_ERROR(SD_LOG_ERROR, un, 18303 "sd_sense_key_unit_attention: " 18304 "Could not dispatch " 18305 "sd_reenable_dsense_task\n"); 18306 } 18307 } 18308 #endif /* _LP64 */ 18309 /* FALLTHRU */ 18310 18311 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18312 if (!un->un_f_has_removable_media) { 18313 break; 18314 } 18315 18316 /* 18317 * When we get a unit attention from a removable-media device, 18318 * it may be in a state that will take a long time to recover 18319 * (e.g., from a reset). Since we are executing in interrupt 18320 * context here, we cannot wait around for the device to come 18321 * back. So hand this command off to sd_media_change_task() 18322 * for deferred processing under taskq thread context. (Note 18323 * that the command still may be failed if a problem is 18324 * encountered at a later time.) 18325 */ 18326 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18327 KM_NOSLEEP) == 0) { 18328 /* 18329 * Cannot dispatch the request so fail the command. 18330 */ 18331 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18332 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18333 si.ssi_severity = SCSI_ERR_FATAL; 18334 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18335 sd_return_failed_command(un, bp, EIO); 18336 } 18337 18338 /* 18339 * If failed to dispatch sd_media_change_task(), we already 18340 * updated kstat. If succeed to dispatch sd_media_change_task(), 18341 * we should update kstat later if it encounters an error. So, 18342 * we update kstat_updated flag here. 18343 */ 18344 kstat_updated = B_TRUE; 18345 18346 /* 18347 * Either the command has been successfully dispatched to a 18348 * task Q for retrying, or the dispatch failed. In either case 18349 * do NOT retry again by calling sd_retry_command. This sets up 18350 * two retries of the same command and when one completes and 18351 * frees the resources the other will access freed memory, 18352 * a bad thing. 18353 */ 18354 return; 18355 18356 default: 18357 break; 18358 } 18359 18360 /* 18361 * ASC ASCQ 18362 * 2A 09 Capacity data has changed 18363 * 2A 01 Mode parameters changed 18364 * 3F 0E Reported luns data has changed 18365 * Arrays that support logical unit expansion should report 18366 * capacity changes(2Ah/09). Mode parameters changed and 18367 * reported luns data has changed are the approximation. 18368 */ 18369 if (((asc == 0x2a) && (ascq == 0x09)) || 18370 ((asc == 0x2a) && (ascq == 0x01)) || 18371 ((asc == 0x3f) && (ascq == 0x0e))) { 18372 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18373 KM_NOSLEEP) == 0) { 18374 SD_ERROR(SD_LOG_ERROR, un, 18375 "sd_sense_key_unit_attention: " 18376 "Could not dispatch sd_target_change_task\n"); 18377 } 18378 } 18379 18380 /* 18381 * Update kstat if we haven't done that. 18382 */ 18383 if (!kstat_updated) { 18384 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18385 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18386 } 18387 18388 do_retry: 18389 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18390 EIO, SD_UA_RETRY_DELAY, NULL); 18391 } 18392 18393 18394 18395 /* 18396 * Function: sd_sense_key_fail_command 18397 * 18398 * Description: Use to fail a command when we don't like the sense key that 18399 * was returned. 18400 * 18401 * Context: May be called from interrupt context 18402 */ 18403 18404 static void 18405 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18406 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18407 { 18408 struct sd_sense_info si; 18409 18410 ASSERT(un != NULL); 18411 ASSERT(mutex_owned(SD_MUTEX(un))); 18412 ASSERT(bp != NULL); 18413 ASSERT(xp != NULL); 18414 ASSERT(pktp != NULL); 18415 18416 si.ssi_severity = SCSI_ERR_FATAL; 18417 si.ssi_pfa_flag = FALSE; 18418 18419 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18420 sd_return_failed_command(un, bp, EIO); 18421 } 18422 18423 18424 18425 /* 18426 * Function: sd_sense_key_blank_check 18427 * 18428 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18429 * Has no monetary connotation. 18430 * 18431 * Context: May be called from interrupt context 18432 */ 18433 18434 static void 18435 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18436 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18437 { 18438 struct sd_sense_info si; 18439 18440 ASSERT(un != NULL); 18441 ASSERT(mutex_owned(SD_MUTEX(un))); 18442 ASSERT(bp != NULL); 18443 ASSERT(xp != NULL); 18444 ASSERT(pktp != NULL); 18445 18446 /* 18447 * Blank check is not fatal for removable devices, therefore 18448 * it does not require a console message. 18449 */ 18450 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18451 SCSI_ERR_FATAL; 18452 si.ssi_pfa_flag = FALSE; 18453 18454 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18455 sd_return_failed_command(un, bp, EIO); 18456 } 18457 18458 18459 18460 18461 /* 18462 * Function: sd_sense_key_aborted_command 18463 * 18464 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18465 * 18466 * Context: May be called from interrupt context 18467 */ 18468 18469 static void 18470 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18471 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18472 { 18473 struct sd_sense_info si; 18474 18475 ASSERT(un != NULL); 18476 ASSERT(mutex_owned(SD_MUTEX(un))); 18477 ASSERT(bp != NULL); 18478 ASSERT(xp != NULL); 18479 ASSERT(pktp != NULL); 18480 18481 si.ssi_severity = SCSI_ERR_FATAL; 18482 si.ssi_pfa_flag = FALSE; 18483 18484 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18485 18486 /* 18487 * This really ought to be a fatal error, but we will retry anyway 18488 * as some drives report this as a spurious error. 18489 */ 18490 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18491 &si, EIO, drv_usectohz(100000), NULL); 18492 } 18493 18494 18495 18496 /* 18497 * Function: sd_sense_key_default 18498 * 18499 * Description: Default recovery action for several SCSI sense keys (basically 18500 * attempts a retry). 18501 * 18502 * Context: May be called from interrupt context 18503 */ 18504 18505 static void 18506 sd_sense_key_default(struct sd_lun *un, 18507 uint8_t *sense_datap, 18508 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18509 { 18510 struct sd_sense_info si; 18511 uint8_t sense_key = scsi_sense_key(sense_datap); 18512 18513 ASSERT(un != NULL); 18514 ASSERT(mutex_owned(SD_MUTEX(un))); 18515 ASSERT(bp != NULL); 18516 ASSERT(xp != NULL); 18517 ASSERT(pktp != NULL); 18518 18519 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18520 18521 /* 18522 * Undecoded sense key. Attempt retries and hope that will fix 18523 * the problem. Otherwise, we're dead. 18524 */ 18525 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18526 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18527 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18528 } 18529 18530 si.ssi_severity = SCSI_ERR_FATAL; 18531 si.ssi_pfa_flag = FALSE; 18532 18533 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18534 &si, EIO, (clock_t)0, NULL); 18535 } 18536 18537 18538 18539 /* 18540 * Function: sd_print_retry_msg 18541 * 18542 * Description: Print a message indicating the retry action being taken. 18543 * 18544 * Arguments: un - ptr to associated softstate 18545 * bp - ptr to buf(9S) for the command 18546 * arg - not used. 18547 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18548 * or SD_NO_RETRY_ISSUED 18549 * 18550 * Context: May be called from interrupt context 18551 */ 18552 /* ARGSUSED */ 18553 static void 18554 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18555 { 18556 struct sd_xbuf *xp; 18557 struct scsi_pkt *pktp; 18558 char *reasonp; 18559 char *msgp; 18560 18561 ASSERT(un != NULL); 18562 ASSERT(mutex_owned(SD_MUTEX(un))); 18563 ASSERT(bp != NULL); 18564 pktp = SD_GET_PKTP(bp); 18565 ASSERT(pktp != NULL); 18566 xp = SD_GET_XBUF(bp); 18567 ASSERT(xp != NULL); 18568 18569 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18570 mutex_enter(&un->un_pm_mutex); 18571 if ((un->un_state == SD_STATE_SUSPENDED) || 18572 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18573 (pktp->pkt_flags & FLAG_SILENT)) { 18574 mutex_exit(&un->un_pm_mutex); 18575 goto update_pkt_reason; 18576 } 18577 mutex_exit(&un->un_pm_mutex); 18578 18579 /* 18580 * Suppress messages if they are all the same pkt_reason; with 18581 * TQ, many (up to 256) are returned with the same pkt_reason. 18582 * If we are in panic, then suppress the retry messages. 18583 */ 18584 switch (flag) { 18585 case SD_NO_RETRY_ISSUED: 18586 msgp = "giving up"; 18587 break; 18588 case SD_IMMEDIATE_RETRY_ISSUED: 18589 case SD_DELAYED_RETRY_ISSUED: 18590 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18591 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18592 (sd_error_level != SCSI_ERR_ALL))) { 18593 return; 18594 } 18595 msgp = "retrying command"; 18596 break; 18597 default: 18598 goto update_pkt_reason; 18599 } 18600 18601 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18602 scsi_rname(pktp->pkt_reason)); 18603 18604 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18605 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18606 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18607 } 18608 18609 update_pkt_reason: 18610 /* 18611 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18612 * This is to prevent multiple console messages for the same failure 18613 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18614 * when the command is retried successfully because there still may be 18615 * more commands coming back with the same value of pktp->pkt_reason. 18616 */ 18617 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18618 un->un_last_pkt_reason = pktp->pkt_reason; 18619 } 18620 } 18621 18622 18623 /* 18624 * Function: sd_print_cmd_incomplete_msg 18625 * 18626 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18627 * 18628 * Arguments: un - ptr to associated softstate 18629 * bp - ptr to buf(9S) for the command 18630 * arg - passed to sd_print_retry_msg() 18631 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18632 * or SD_NO_RETRY_ISSUED 18633 * 18634 * Context: May be called from interrupt context 18635 */ 18636 18637 static void 18638 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18639 int code) 18640 { 18641 dev_info_t *dip; 18642 18643 ASSERT(un != NULL); 18644 ASSERT(mutex_owned(SD_MUTEX(un))); 18645 ASSERT(bp != NULL); 18646 18647 switch (code) { 18648 case SD_NO_RETRY_ISSUED: 18649 /* Command was failed. Someone turned off this target? */ 18650 if (un->un_state != SD_STATE_OFFLINE) { 18651 /* 18652 * Suppress message if we are detaching and 18653 * device has been disconnected 18654 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18655 * private interface and not part of the DDI 18656 */ 18657 dip = un->un_sd->sd_dev; 18658 if (!(DEVI_IS_DETACHING(dip) && 18659 DEVI_IS_DEVICE_REMOVED(dip))) { 18660 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18661 "disk not responding to selection\n"); 18662 } 18663 New_state(un, SD_STATE_OFFLINE); 18664 } 18665 break; 18666 18667 case SD_DELAYED_RETRY_ISSUED: 18668 case SD_IMMEDIATE_RETRY_ISSUED: 18669 default: 18670 /* Command was successfully queued for retry */ 18671 sd_print_retry_msg(un, bp, arg, code); 18672 break; 18673 } 18674 } 18675 18676 18677 /* 18678 * Function: sd_pkt_reason_cmd_incomplete 18679 * 18680 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18681 * 18682 * Context: May be called from interrupt context 18683 */ 18684 18685 static void 18686 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18687 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18688 { 18689 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18690 18691 ASSERT(un != NULL); 18692 ASSERT(mutex_owned(SD_MUTEX(un))); 18693 ASSERT(bp != NULL); 18694 ASSERT(xp != NULL); 18695 ASSERT(pktp != NULL); 18696 18697 /* Do not do a reset if selection did not complete */ 18698 /* Note: Should this not just check the bit? */ 18699 if (pktp->pkt_state != STATE_GOT_BUS) { 18700 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18701 sd_reset_target(un, pktp); 18702 } 18703 18704 /* 18705 * If the target was not successfully selected, then set 18706 * SD_RETRIES_FAILFAST to indicate that we lost communication 18707 * with the target, and further retries and/or commands are 18708 * likely to take a long time. 18709 */ 18710 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18711 flag |= SD_RETRIES_FAILFAST; 18712 } 18713 18714 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18715 18716 sd_retry_command(un, bp, flag, 18717 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18718 } 18719 18720 18721 18722 /* 18723 * Function: sd_pkt_reason_cmd_tran_err 18724 * 18725 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18726 * 18727 * Context: May be called from interrupt context 18728 */ 18729 18730 static void 18731 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18732 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18733 { 18734 ASSERT(un != NULL); 18735 ASSERT(mutex_owned(SD_MUTEX(un))); 18736 ASSERT(bp != NULL); 18737 ASSERT(xp != NULL); 18738 ASSERT(pktp != NULL); 18739 18740 /* 18741 * Do not reset if we got a parity error, or if 18742 * selection did not complete. 18743 */ 18744 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18745 /* Note: Should this not just check the bit for pkt_state? */ 18746 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18747 (pktp->pkt_state != STATE_GOT_BUS)) { 18748 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18749 sd_reset_target(un, pktp); 18750 } 18751 18752 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18753 18754 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18755 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18756 } 18757 18758 18759 18760 /* 18761 * Function: sd_pkt_reason_cmd_reset 18762 * 18763 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18764 * 18765 * Context: May be called from interrupt context 18766 */ 18767 18768 static void 18769 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18770 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18771 { 18772 ASSERT(un != NULL); 18773 ASSERT(mutex_owned(SD_MUTEX(un))); 18774 ASSERT(bp != NULL); 18775 ASSERT(xp != NULL); 18776 ASSERT(pktp != NULL); 18777 18778 /* The target may still be running the command, so try to reset. */ 18779 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18780 sd_reset_target(un, pktp); 18781 18782 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18783 18784 /* 18785 * If pkt_reason is CMD_RESET chances are that this pkt got 18786 * reset because another target on this bus caused it. The target 18787 * that caused it should get CMD_TIMEOUT with pkt_statistics 18788 * of STAT_TIMEOUT/STAT_DEV_RESET. 18789 */ 18790 18791 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18792 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18793 } 18794 18795 18796 18797 18798 /* 18799 * Function: sd_pkt_reason_cmd_aborted 18800 * 18801 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18802 * 18803 * Context: May be called from interrupt context 18804 */ 18805 18806 static void 18807 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18808 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18809 { 18810 ASSERT(un != NULL); 18811 ASSERT(mutex_owned(SD_MUTEX(un))); 18812 ASSERT(bp != NULL); 18813 ASSERT(xp != NULL); 18814 ASSERT(pktp != NULL); 18815 18816 /* The target may still be running the command, so try to reset. */ 18817 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18818 sd_reset_target(un, pktp); 18819 18820 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18821 18822 /* 18823 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18824 * aborted because another target on this bus caused it. The target 18825 * that caused it should get CMD_TIMEOUT with pkt_statistics 18826 * of STAT_TIMEOUT/STAT_DEV_RESET. 18827 */ 18828 18829 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18830 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18831 } 18832 18833 18834 18835 /* 18836 * Function: sd_pkt_reason_cmd_timeout 18837 * 18838 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18839 * 18840 * Context: May be called from interrupt context 18841 */ 18842 18843 static void 18844 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18845 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18846 { 18847 ASSERT(un != NULL); 18848 ASSERT(mutex_owned(SD_MUTEX(un))); 18849 ASSERT(bp != NULL); 18850 ASSERT(xp != NULL); 18851 ASSERT(pktp != NULL); 18852 18853 18854 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18855 sd_reset_target(un, pktp); 18856 18857 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18858 18859 /* 18860 * A command timeout indicates that we could not establish 18861 * communication with the target, so set SD_RETRIES_FAILFAST 18862 * as further retries/commands are likely to take a long time. 18863 */ 18864 sd_retry_command(un, bp, 18865 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18866 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18867 } 18868 18869 18870 18871 /* 18872 * Function: sd_pkt_reason_cmd_unx_bus_free 18873 * 18874 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18875 * 18876 * Context: May be called from interrupt context 18877 */ 18878 18879 static void 18880 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18881 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18882 { 18883 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18884 18885 ASSERT(un != NULL); 18886 ASSERT(mutex_owned(SD_MUTEX(un))); 18887 ASSERT(bp != NULL); 18888 ASSERT(xp != NULL); 18889 ASSERT(pktp != NULL); 18890 18891 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18892 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18893 18894 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18895 sd_print_retry_msg : NULL; 18896 18897 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18898 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18899 } 18900 18901 18902 /* 18903 * Function: sd_pkt_reason_cmd_tag_reject 18904 * 18905 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18906 * 18907 * Context: May be called from interrupt context 18908 */ 18909 18910 static void 18911 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18912 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18913 { 18914 ASSERT(un != NULL); 18915 ASSERT(mutex_owned(SD_MUTEX(un))); 18916 ASSERT(bp != NULL); 18917 ASSERT(xp != NULL); 18918 ASSERT(pktp != NULL); 18919 18920 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18921 pktp->pkt_flags = 0; 18922 un->un_tagflags = 0; 18923 if (un->un_f_opt_queueing == TRUE) { 18924 un->un_throttle = min(un->un_throttle, 3); 18925 } else { 18926 un->un_throttle = 1; 18927 } 18928 mutex_exit(SD_MUTEX(un)); 18929 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18930 mutex_enter(SD_MUTEX(un)); 18931 18932 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18933 18934 /* Legacy behavior not to check retry counts here. */ 18935 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18936 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18937 } 18938 18939 18940 /* 18941 * Function: sd_pkt_reason_default 18942 * 18943 * Description: Default recovery actions for SCSA pkt_reason values that 18944 * do not have more explicit recovery actions. 18945 * 18946 * Context: May be called from interrupt context 18947 */ 18948 18949 static void 18950 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18951 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18952 { 18953 ASSERT(un != NULL); 18954 ASSERT(mutex_owned(SD_MUTEX(un))); 18955 ASSERT(bp != NULL); 18956 ASSERT(xp != NULL); 18957 ASSERT(pktp != NULL); 18958 18959 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18960 sd_reset_target(un, pktp); 18961 18962 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18963 18964 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18965 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18966 } 18967 18968 18969 18970 /* 18971 * Function: sd_pkt_status_check_condition 18972 * 18973 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18974 * 18975 * Context: May be called from interrupt context 18976 */ 18977 18978 static void 18979 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18980 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18981 { 18982 ASSERT(un != NULL); 18983 ASSERT(mutex_owned(SD_MUTEX(un))); 18984 ASSERT(bp != NULL); 18985 ASSERT(xp != NULL); 18986 ASSERT(pktp != NULL); 18987 18988 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18989 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18990 18991 /* 18992 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18993 * command will be retried after the request sense). Otherwise, retry 18994 * the command. Note: we are issuing the request sense even though the 18995 * retry limit may have been reached for the failed command. 18996 */ 18997 if (un->un_f_arq_enabled == FALSE) { 18998 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18999 "no ARQ, sending request sense command\n"); 19000 sd_send_request_sense_command(un, bp, pktp); 19001 } else { 19002 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19003 "ARQ,retrying request sense command\n"); 19004 #if defined(__i386) || defined(__amd64) 19005 /* 19006 * The SD_RETRY_DELAY value need to be adjusted here 19007 * when SD_RETRY_DELAY change in sddef.h 19008 */ 19009 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19010 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19011 NULL); 19012 #else 19013 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19014 EIO, SD_RETRY_DELAY, NULL); 19015 #endif 19016 } 19017 19018 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19019 } 19020 19021 19022 /* 19023 * Function: sd_pkt_status_busy 19024 * 19025 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19026 * 19027 * Context: May be called from interrupt context 19028 */ 19029 19030 static void 19031 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19032 struct scsi_pkt *pktp) 19033 { 19034 ASSERT(un != NULL); 19035 ASSERT(mutex_owned(SD_MUTEX(un))); 19036 ASSERT(bp != NULL); 19037 ASSERT(xp != NULL); 19038 ASSERT(pktp != NULL); 19039 19040 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19041 "sd_pkt_status_busy: entry\n"); 19042 19043 /* If retries are exhausted, just fail the command. */ 19044 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19045 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19046 "device busy too long\n"); 19047 sd_return_failed_command(un, bp, EIO); 19048 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19049 "sd_pkt_status_busy: exit\n"); 19050 return; 19051 } 19052 xp->xb_retry_count++; 19053 19054 /* 19055 * Try to reset the target. However, we do not want to perform 19056 * more than one reset if the device continues to fail. The reset 19057 * will be performed when the retry count reaches the reset 19058 * threshold. This threshold should be set such that at least 19059 * one retry is issued before the reset is performed. 19060 */ 19061 if (xp->xb_retry_count == 19062 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19063 int rval = 0; 19064 mutex_exit(SD_MUTEX(un)); 19065 if (un->un_f_allow_bus_device_reset == TRUE) { 19066 /* 19067 * First try to reset the LUN; if we cannot then 19068 * try to reset the target. 19069 */ 19070 if (un->un_f_lun_reset_enabled == TRUE) { 19071 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19072 "sd_pkt_status_busy: RESET_LUN\n"); 19073 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19074 } 19075 if (rval == 0) { 19076 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19077 "sd_pkt_status_busy: RESET_TARGET\n"); 19078 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19079 } 19080 } 19081 if (rval == 0) { 19082 /* 19083 * If the RESET_LUN and/or RESET_TARGET failed, 19084 * try RESET_ALL 19085 */ 19086 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19087 "sd_pkt_status_busy: RESET_ALL\n"); 19088 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19089 } 19090 mutex_enter(SD_MUTEX(un)); 19091 if (rval == 0) { 19092 /* 19093 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19094 * At this point we give up & fail the command. 19095 */ 19096 sd_return_failed_command(un, bp, EIO); 19097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19098 "sd_pkt_status_busy: exit (failed cmd)\n"); 19099 return; 19100 } 19101 } 19102 19103 /* 19104 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19105 * we have already checked the retry counts above. 19106 */ 19107 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19108 EIO, un->un_busy_timeout, NULL); 19109 19110 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19111 "sd_pkt_status_busy: exit\n"); 19112 } 19113 19114 19115 /* 19116 * Function: sd_pkt_status_reservation_conflict 19117 * 19118 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19119 * command status. 19120 * 19121 * Context: May be called from interrupt context 19122 */ 19123 19124 static void 19125 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19126 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19127 { 19128 ASSERT(un != NULL); 19129 ASSERT(mutex_owned(SD_MUTEX(un))); 19130 ASSERT(bp != NULL); 19131 ASSERT(xp != NULL); 19132 ASSERT(pktp != NULL); 19133 19134 /* 19135 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19136 * conflict could be due to various reasons like incorrect keys, not 19137 * registered or not reserved etc. So, we return EACCES to the caller. 19138 */ 19139 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19140 int cmd = SD_GET_PKT_OPCODE(pktp); 19141 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19142 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19143 sd_return_failed_command(un, bp, EACCES); 19144 return; 19145 } 19146 } 19147 19148 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19149 19150 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19151 if (sd_failfast_enable != 0) { 19152 /* By definition, we must panic here.... */ 19153 sd_panic_for_res_conflict(un); 19154 /*NOTREACHED*/ 19155 } 19156 SD_ERROR(SD_LOG_IO, un, 19157 "sd_handle_resv_conflict: Disk Reserved\n"); 19158 sd_return_failed_command(un, bp, EACCES); 19159 return; 19160 } 19161 19162 /* 19163 * 1147670: retry only if sd_retry_on_reservation_conflict 19164 * property is set (default is 1). Retries will not succeed 19165 * on a disk reserved by another initiator. HA systems 19166 * may reset this via sd.conf to avoid these retries. 19167 * 19168 * Note: The legacy return code for this failure is EIO, however EACCES 19169 * seems more appropriate for a reservation conflict. 19170 */ 19171 if (sd_retry_on_reservation_conflict == 0) { 19172 SD_ERROR(SD_LOG_IO, un, 19173 "sd_handle_resv_conflict: Device Reserved\n"); 19174 sd_return_failed_command(un, bp, EIO); 19175 return; 19176 } 19177 19178 /* 19179 * Retry the command if we can. 19180 * 19181 * Note: The legacy return code for this failure is EIO, however EACCES 19182 * seems more appropriate for a reservation conflict. 19183 */ 19184 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19185 (clock_t)2, NULL); 19186 } 19187 19188 19189 19190 /* 19191 * Function: sd_pkt_status_qfull 19192 * 19193 * Description: Handle a QUEUE FULL condition from the target. This can 19194 * occur if the HBA does not handle the queue full condition. 19195 * (Basically this means third-party HBAs as Sun HBAs will 19196 * handle the queue full condition.) Note that if there are 19197 * some commands already in the transport, then the queue full 19198 * has occurred because the queue for this nexus is actually 19199 * full. If there are no commands in the transport, then the 19200 * queue full is resulting from some other initiator or lun 19201 * consuming all the resources at the target. 19202 * 19203 * Context: May be called from interrupt context 19204 */ 19205 19206 static void 19207 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 19208 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19209 { 19210 ASSERT(un != NULL); 19211 ASSERT(mutex_owned(SD_MUTEX(un))); 19212 ASSERT(bp != NULL); 19213 ASSERT(xp != NULL); 19214 ASSERT(pktp != NULL); 19215 19216 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19217 "sd_pkt_status_qfull: entry\n"); 19218 19219 /* 19220 * Just lower the QFULL throttle and retry the command. Note that 19221 * we do not limit the number of retries here. 19222 */ 19223 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19224 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19225 SD_RESTART_TIMEOUT, NULL); 19226 19227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19228 "sd_pkt_status_qfull: exit\n"); 19229 } 19230 19231 19232 /* 19233 * Function: sd_reset_target 19234 * 19235 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19236 * RESET_TARGET, or RESET_ALL. 19237 * 19238 * Context: May be called under interrupt context. 19239 */ 19240 19241 static void 19242 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19243 { 19244 int rval = 0; 19245 19246 ASSERT(un != NULL); 19247 ASSERT(mutex_owned(SD_MUTEX(un))); 19248 ASSERT(pktp != NULL); 19249 19250 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19251 19252 /* 19253 * No need to reset if the transport layer has already done so. 19254 */ 19255 if ((pktp->pkt_statistics & 19256 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19257 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19258 "sd_reset_target: no reset\n"); 19259 return; 19260 } 19261 19262 mutex_exit(SD_MUTEX(un)); 19263 19264 if (un->un_f_allow_bus_device_reset == TRUE) { 19265 if (un->un_f_lun_reset_enabled == TRUE) { 19266 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19267 "sd_reset_target: RESET_LUN\n"); 19268 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19269 } 19270 if (rval == 0) { 19271 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19272 "sd_reset_target: RESET_TARGET\n"); 19273 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19274 } 19275 } 19276 19277 if (rval == 0) { 19278 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19279 "sd_reset_target: RESET_ALL\n"); 19280 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19281 } 19282 19283 mutex_enter(SD_MUTEX(un)); 19284 19285 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19286 } 19287 19288 /* 19289 * Function: sd_target_change_task 19290 * 19291 * Description: Handle dynamic target change 19292 * 19293 * Context: Executes in a taskq() thread context 19294 */ 19295 static void 19296 sd_target_change_task(void *arg) 19297 { 19298 struct sd_lun *un = arg; 19299 uint64_t capacity; 19300 diskaddr_t label_cap; 19301 uint_t lbasize; 19302 sd_ssc_t *ssc; 19303 19304 ASSERT(un != NULL); 19305 ASSERT(!mutex_owned(SD_MUTEX(un))); 19306 19307 if ((un->un_f_blockcount_is_valid == FALSE) || 19308 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19309 return; 19310 } 19311 19312 ssc = sd_ssc_init(un); 19313 19314 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19315 &lbasize, SD_PATH_DIRECT) != 0) { 19316 SD_ERROR(SD_LOG_ERROR, un, 19317 "sd_target_change_task: fail to read capacity\n"); 19318 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19319 goto task_exit; 19320 } 19321 19322 mutex_enter(SD_MUTEX(un)); 19323 if (capacity <= un->un_blockcount) { 19324 mutex_exit(SD_MUTEX(un)); 19325 goto task_exit; 19326 } 19327 19328 sd_update_block_info(un, lbasize, capacity); 19329 mutex_exit(SD_MUTEX(un)); 19330 19331 /* 19332 * If lun is EFI labeled and lun capacity is greater than the 19333 * capacity contained in the label, log a sys event. 19334 */ 19335 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19336 (void*)SD_PATH_DIRECT) == 0) { 19337 mutex_enter(SD_MUTEX(un)); 19338 if (un->un_f_blockcount_is_valid && 19339 un->un_blockcount > label_cap) { 19340 mutex_exit(SD_MUTEX(un)); 19341 sd_log_lun_expansion_event(un, KM_SLEEP); 19342 } else { 19343 mutex_exit(SD_MUTEX(un)); 19344 } 19345 } 19346 19347 task_exit: 19348 sd_ssc_fini(ssc); 19349 } 19350 19351 /* 19352 * Function: sd_log_lun_expansion_event 19353 * 19354 * Description: Log lun expansion sys event 19355 * 19356 * Context: Never called from interrupt context 19357 */ 19358 static void 19359 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19360 { 19361 int err; 19362 char *path; 19363 nvlist_t *dle_attr_list; 19364 19365 /* Allocate and build sysevent attribute list */ 19366 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19367 if (err != 0) { 19368 SD_ERROR(SD_LOG_ERROR, un, 19369 "sd_log_lun_expansion_event: fail to allocate space\n"); 19370 return; 19371 } 19372 19373 path = kmem_alloc(MAXPATHLEN, km_flag); 19374 if (path == NULL) { 19375 nvlist_free(dle_attr_list); 19376 SD_ERROR(SD_LOG_ERROR, un, 19377 "sd_log_lun_expansion_event: fail to allocate space\n"); 19378 return; 19379 } 19380 /* 19381 * Add path attribute to identify the lun. 19382 * We are using minor node 'a' as the sysevent attribute. 19383 */ 19384 (void) snprintf(path, MAXPATHLEN, "/devices"); 19385 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19386 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19387 ":a"); 19388 19389 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 19390 if (err != 0) { 19391 nvlist_free(dle_attr_list); 19392 kmem_free(path, MAXPATHLEN); 19393 SD_ERROR(SD_LOG_ERROR, un, 19394 "sd_log_lun_expansion_event: fail to add attribute\n"); 19395 return; 19396 } 19397 19398 /* Log dynamic lun expansion sysevent */ 19399 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19400 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 19401 if (err != DDI_SUCCESS) { 19402 SD_ERROR(SD_LOG_ERROR, un, 19403 "sd_log_lun_expansion_event: fail to log sysevent\n"); 19404 } 19405 19406 nvlist_free(dle_attr_list); 19407 kmem_free(path, MAXPATHLEN); 19408 } 19409 19410 /* 19411 * Function: sd_media_change_task 19412 * 19413 * Description: Recovery action for CDROM to become available. 19414 * 19415 * Context: Executes in a taskq() thread context 19416 */ 19417 19418 static void 19419 sd_media_change_task(void *arg) 19420 { 19421 struct scsi_pkt *pktp = arg; 19422 struct sd_lun *un; 19423 struct buf *bp; 19424 struct sd_xbuf *xp; 19425 int err = 0; 19426 int retry_count = 0; 19427 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19428 struct sd_sense_info si; 19429 19430 ASSERT(pktp != NULL); 19431 bp = (struct buf *)pktp->pkt_private; 19432 ASSERT(bp != NULL); 19433 xp = SD_GET_XBUF(bp); 19434 ASSERT(xp != NULL); 19435 un = SD_GET_UN(bp); 19436 ASSERT(un != NULL); 19437 ASSERT(!mutex_owned(SD_MUTEX(un))); 19438 ASSERT(un->un_f_monitor_media_state); 19439 19440 si.ssi_severity = SCSI_ERR_INFO; 19441 si.ssi_pfa_flag = FALSE; 19442 19443 /* 19444 * When a reset is issued on a CDROM, it takes a long time to 19445 * recover. First few attempts to read capacity and other things 19446 * related to handling unit attention fail (with a ASC 0x4 and 19447 * ASCQ 0x1). In that case we want to do enough retries and we want 19448 * to limit the retries in other cases of genuine failures like 19449 * no media in drive. 19450 */ 19451 while (retry_count++ < retry_limit) { 19452 if ((err = sd_handle_mchange(un)) == 0) { 19453 break; 19454 } 19455 if (err == EAGAIN) { 19456 retry_limit = SD_UNIT_ATTENTION_RETRY; 19457 } 19458 /* Sleep for 0.5 sec. & try again */ 19459 delay(drv_usectohz(500000)); 19460 } 19461 19462 /* 19463 * Dispatch (retry or fail) the original command here, 19464 * along with appropriate console messages.... 19465 * 19466 * Must grab the mutex before calling sd_retry_command, 19467 * sd_print_sense_msg and sd_return_failed_command. 19468 */ 19469 mutex_enter(SD_MUTEX(un)); 19470 if (err != SD_CMD_SUCCESS) { 19471 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19472 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19473 si.ssi_severity = SCSI_ERR_FATAL; 19474 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19475 sd_return_failed_command(un, bp, EIO); 19476 } else { 19477 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 19478 &si, EIO, (clock_t)0, NULL); 19479 } 19480 mutex_exit(SD_MUTEX(un)); 19481 } 19482 19483 19484 19485 /* 19486 * Function: sd_handle_mchange 19487 * 19488 * Description: Perform geometry validation & other recovery when CDROM 19489 * has been removed from drive. 19490 * 19491 * Return Code: 0 for success 19492 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19493 * sd_send_scsi_READ_CAPACITY() 19494 * 19495 * Context: Executes in a taskq() thread context 19496 */ 19497 19498 static int 19499 sd_handle_mchange(struct sd_lun *un) 19500 { 19501 uint64_t capacity; 19502 uint32_t lbasize; 19503 int rval; 19504 sd_ssc_t *ssc; 19505 19506 ASSERT(!mutex_owned(SD_MUTEX(un))); 19507 ASSERT(un->un_f_monitor_media_state); 19508 19509 ssc = sd_ssc_init(un); 19510 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19511 SD_PATH_DIRECT_PRIORITY); 19512 19513 if (rval != 0) 19514 goto failed; 19515 19516 mutex_enter(SD_MUTEX(un)); 19517 sd_update_block_info(un, lbasize, capacity); 19518 19519 if (un->un_errstats != NULL) { 19520 struct sd_errstats *stp = 19521 (struct sd_errstats *)un->un_errstats->ks_data; 19522 stp->sd_capacity.value.ui64 = (uint64_t) 19523 ((uint64_t)un->un_blockcount * 19524 (uint64_t)un->un_tgt_blocksize); 19525 } 19526 19527 /* 19528 * Check if the media in the device is writable or not 19529 */ 19530 if (ISCD(un)) { 19531 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19532 } 19533 19534 /* 19535 * Note: Maybe let the strategy/partitioning chain worry about getting 19536 * valid geometry. 19537 */ 19538 mutex_exit(SD_MUTEX(un)); 19539 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19540 19541 19542 if (cmlb_validate(un->un_cmlbhandle, 0, 19543 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19544 sd_ssc_fini(ssc); 19545 return (EIO); 19546 } else { 19547 if (un->un_f_pkstats_enabled) { 19548 sd_set_pstats(un); 19549 SD_TRACE(SD_LOG_IO_PARTITION, un, 19550 "sd_handle_mchange: un:0x%p pstats created and " 19551 "set\n", un); 19552 } 19553 } 19554 19555 /* 19556 * Try to lock the door 19557 */ 19558 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19559 SD_PATH_DIRECT_PRIORITY); 19560 failed: 19561 if (rval != 0) 19562 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19563 sd_ssc_fini(ssc); 19564 return (rval); 19565 } 19566 19567 19568 /* 19569 * Function: sd_send_scsi_DOORLOCK 19570 * 19571 * Description: Issue the scsi DOOR LOCK command 19572 * 19573 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19574 * structure for this target. 19575 * flag - SD_REMOVAL_ALLOW 19576 * SD_REMOVAL_PREVENT 19577 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19578 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19579 * to use the USCSI "direct" chain and bypass the normal 19580 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19581 * command is issued as part of an error recovery action. 19582 * 19583 * Return Code: 0 - Success 19584 * errno return code from sd_ssc_send() 19585 * 19586 * Context: Can sleep. 19587 */ 19588 19589 static int 19590 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19591 { 19592 struct scsi_extended_sense sense_buf; 19593 union scsi_cdb cdb; 19594 struct uscsi_cmd ucmd_buf; 19595 int status; 19596 struct sd_lun *un; 19597 19598 ASSERT(ssc != NULL); 19599 un = ssc->ssc_un; 19600 ASSERT(un != NULL); 19601 ASSERT(!mutex_owned(SD_MUTEX(un))); 19602 19603 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19604 19605 /* already determined doorlock is not supported, fake success */ 19606 if (un->un_f_doorlock_supported == FALSE) { 19607 return (0); 19608 } 19609 19610 /* 19611 * If we are ejecting and see an SD_REMOVAL_PREVENT 19612 * ignore the command so we can complete the eject 19613 * operation. 19614 */ 19615 if (flag == SD_REMOVAL_PREVENT) { 19616 mutex_enter(SD_MUTEX(un)); 19617 if (un->un_f_ejecting == TRUE) { 19618 mutex_exit(SD_MUTEX(un)); 19619 return (EAGAIN); 19620 } 19621 mutex_exit(SD_MUTEX(un)); 19622 } 19623 19624 bzero(&cdb, sizeof (cdb)); 19625 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19626 19627 cdb.scc_cmd = SCMD_DOORLOCK; 19628 cdb.cdb_opaque[4] = (uchar_t)flag; 19629 19630 ucmd_buf.uscsi_cdb = (char *)&cdb; 19631 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19632 ucmd_buf.uscsi_bufaddr = NULL; 19633 ucmd_buf.uscsi_buflen = 0; 19634 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19635 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19636 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19637 ucmd_buf.uscsi_timeout = 15; 19638 19639 SD_TRACE(SD_LOG_IO, un, 19640 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19641 19642 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19643 UIO_SYSSPACE, path_flag); 19644 19645 if (status == 0) 19646 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19647 19648 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19649 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19650 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19651 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19652 19653 /* fake success and skip subsequent doorlock commands */ 19654 un->un_f_doorlock_supported = FALSE; 19655 return (0); 19656 } 19657 19658 return (status); 19659 } 19660 19661 /* 19662 * Function: sd_send_scsi_READ_CAPACITY 19663 * 19664 * Description: This routine uses the scsi READ CAPACITY command to determine 19665 * the device capacity in number of blocks and the device native 19666 * block size. If this function returns a failure, then the 19667 * values in *capp and *lbap are undefined. If the capacity 19668 * returned is 0xffffffff then the lun is too large for a 19669 * normal READ CAPACITY command and the results of a 19670 * READ CAPACITY 16 will be used instead. 19671 * 19672 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19673 * capp - ptr to unsigned 64-bit variable to receive the 19674 * capacity value from the command. 19675 * lbap - ptr to unsigned 32-bit varaible to receive the 19676 * block size value from the command 19677 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19678 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19679 * to use the USCSI "direct" chain and bypass the normal 19680 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19681 * command is issued as part of an error recovery action. 19682 * 19683 * Return Code: 0 - Success 19684 * EIO - IO error 19685 * EACCES - Reservation conflict detected 19686 * EAGAIN - Device is becoming ready 19687 * errno return code from sd_ssc_send() 19688 * 19689 * Context: Can sleep. Blocks until command completes. 19690 */ 19691 19692 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19693 19694 static int 19695 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19696 int path_flag) 19697 { 19698 struct scsi_extended_sense sense_buf; 19699 struct uscsi_cmd ucmd_buf; 19700 union scsi_cdb cdb; 19701 uint32_t *capacity_buf; 19702 uint64_t capacity; 19703 uint32_t lbasize; 19704 uint32_t pbsize; 19705 int status; 19706 struct sd_lun *un; 19707 19708 ASSERT(ssc != NULL); 19709 19710 un = ssc->ssc_un; 19711 ASSERT(un != NULL); 19712 ASSERT(!mutex_owned(SD_MUTEX(un))); 19713 ASSERT(capp != NULL); 19714 ASSERT(lbap != NULL); 19715 19716 SD_TRACE(SD_LOG_IO, un, 19717 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19718 19719 /* 19720 * First send a READ_CAPACITY command to the target. 19721 * (This command is mandatory under SCSI-2.) 19722 * 19723 * Set up the CDB for the READ_CAPACITY command. The Partial 19724 * Medium Indicator bit is cleared. The address field must be 19725 * zero if the PMI bit is zero. 19726 */ 19727 bzero(&cdb, sizeof (cdb)); 19728 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19729 19730 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19731 19732 cdb.scc_cmd = SCMD_READ_CAPACITY; 19733 19734 ucmd_buf.uscsi_cdb = (char *)&cdb; 19735 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19736 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19737 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19738 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19739 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19740 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19741 ucmd_buf.uscsi_timeout = 60; 19742 19743 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19744 UIO_SYSSPACE, path_flag); 19745 19746 switch (status) { 19747 case 0: 19748 /* Return failure if we did not get valid capacity data. */ 19749 if (ucmd_buf.uscsi_resid != 0) { 19750 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19751 "sd_send_scsi_READ_CAPACITY received invalid " 19752 "capacity data"); 19753 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19754 return (EIO); 19755 } 19756 /* 19757 * Read capacity and block size from the READ CAPACITY 10 data. 19758 * This data may be adjusted later due to device specific 19759 * issues. 19760 * 19761 * According to the SCSI spec, the READ CAPACITY 10 19762 * command returns the following: 19763 * 19764 * bytes 0-3: Maximum logical block address available. 19765 * (MSB in byte:0 & LSB in byte:3) 19766 * 19767 * bytes 4-7: Block length in bytes 19768 * (MSB in byte:4 & LSB in byte:7) 19769 * 19770 */ 19771 capacity = BE_32(capacity_buf[0]); 19772 lbasize = BE_32(capacity_buf[1]); 19773 19774 /* 19775 * Done with capacity_buf 19776 */ 19777 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19778 19779 /* 19780 * if the reported capacity is set to all 0xf's, then 19781 * this disk is too large and requires SBC-2 commands. 19782 * Reissue the request using READ CAPACITY 16. 19783 */ 19784 if (capacity == 0xffffffff) { 19785 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19786 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19787 &lbasize, &pbsize, path_flag); 19788 if (status != 0) { 19789 return (status); 19790 } 19791 } 19792 break; /* Success! */ 19793 case EIO: 19794 switch (ucmd_buf.uscsi_status) { 19795 case STATUS_RESERVATION_CONFLICT: 19796 status = EACCES; 19797 break; 19798 case STATUS_CHECK: 19799 /* 19800 * Check condition; look for ASC/ASCQ of 0x04/0x01 19801 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19802 */ 19803 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19804 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19805 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19806 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19807 return (EAGAIN); 19808 } 19809 break; 19810 default: 19811 break; 19812 } 19813 /* FALLTHRU */ 19814 default: 19815 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19816 return (status); 19817 } 19818 19819 /* 19820 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19821 * (2352 and 0 are common) so for these devices always force the value 19822 * to 2048 as required by the ATAPI specs. 19823 */ 19824 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19825 lbasize = 2048; 19826 } 19827 19828 /* 19829 * Get the maximum LBA value from the READ CAPACITY data. 19830 * Here we assume that the Partial Medium Indicator (PMI) bit 19831 * was cleared when issuing the command. This means that the LBA 19832 * returned from the device is the LBA of the last logical block 19833 * on the logical unit. The actual logical block count will be 19834 * this value plus one. 19835 * 19836 * Currently, for removable media, the capacity is saved in terms 19837 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 19838 */ 19839 if (un->un_f_has_removable_media) 19840 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19841 19842 /* 19843 * Copy the values from the READ CAPACITY command into the space 19844 * provided by the caller. 19845 */ 19846 *capp = capacity; 19847 *lbap = lbasize; 19848 19849 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19850 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19851 19852 /* 19853 * Both the lbasize and capacity from the device must be nonzero, 19854 * otherwise we assume that the values are not valid and return 19855 * failure to the caller. (4203735) 19856 */ 19857 if ((capacity == 0) || (lbasize == 0)) { 19858 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19859 "sd_send_scsi_READ_CAPACITY received invalid value " 19860 "capacity %llu lbasize %d", capacity, lbasize); 19861 return (EIO); 19862 } 19863 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19864 return (0); 19865 } 19866 19867 /* 19868 * Function: sd_send_scsi_READ_CAPACITY_16 19869 * 19870 * Description: This routine uses the scsi READ CAPACITY 16 command to 19871 * determine the device capacity in number of blocks and the 19872 * device native block size. If this function returns a failure, 19873 * then the values in *capp and *lbap are undefined. 19874 * This routine should be called by sd_send_scsi_READ_CAPACITY 19875 * which will apply any device specific adjustments to capacity 19876 * and lbasize. One exception is it is also called by 19877 * sd_get_media_info_ext. In that function, there is no need to 19878 * adjust the capacity and lbasize. 19879 * 19880 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19881 * capp - ptr to unsigned 64-bit variable to receive the 19882 * capacity value from the command. 19883 * lbap - ptr to unsigned 32-bit varaible to receive the 19884 * block size value from the command 19885 * psp - ptr to unsigned 32-bit variable to receive the 19886 * physical block size value from the command 19887 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19888 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19889 * to use the USCSI "direct" chain and bypass the normal 19890 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19891 * this command is issued as part of an error recovery 19892 * action. 19893 * 19894 * Return Code: 0 - Success 19895 * EIO - IO error 19896 * EACCES - Reservation conflict detected 19897 * EAGAIN - Device is becoming ready 19898 * errno return code from sd_ssc_send() 19899 * 19900 * Context: Can sleep. Blocks until command completes. 19901 */ 19902 19903 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19904 19905 static int 19906 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19907 uint32_t *lbap, uint32_t *psp, int path_flag) 19908 { 19909 struct scsi_extended_sense sense_buf; 19910 struct uscsi_cmd ucmd_buf; 19911 union scsi_cdb cdb; 19912 uint64_t *capacity16_buf; 19913 uint64_t capacity; 19914 uint32_t lbasize; 19915 uint32_t pbsize; 19916 uint32_t lbpb_exp; 19917 int status; 19918 struct sd_lun *un; 19919 19920 ASSERT(ssc != NULL); 19921 19922 un = ssc->ssc_un; 19923 ASSERT(un != NULL); 19924 ASSERT(!mutex_owned(SD_MUTEX(un))); 19925 ASSERT(capp != NULL); 19926 ASSERT(lbap != NULL); 19927 19928 SD_TRACE(SD_LOG_IO, un, 19929 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19930 19931 /* 19932 * First send a READ_CAPACITY_16 command to the target. 19933 * 19934 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19935 * Medium Indicator bit is cleared. The address field must be 19936 * zero if the PMI bit is zero. 19937 */ 19938 bzero(&cdb, sizeof (cdb)); 19939 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19940 19941 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19942 19943 ucmd_buf.uscsi_cdb = (char *)&cdb; 19944 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19945 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19946 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19947 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19948 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19949 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19950 ucmd_buf.uscsi_timeout = 60; 19951 19952 /* 19953 * Read Capacity (16) is a Service Action In command. One 19954 * command byte (0x9E) is overloaded for multiple operations, 19955 * with the second CDB byte specifying the desired operation 19956 */ 19957 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19958 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19959 19960 /* 19961 * Fill in allocation length field 19962 */ 19963 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19964 19965 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19966 UIO_SYSSPACE, path_flag); 19967 19968 switch (status) { 19969 case 0: 19970 /* Return failure if we did not get valid capacity data. */ 19971 if (ucmd_buf.uscsi_resid > 20) { 19972 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19973 "sd_send_scsi_READ_CAPACITY_16 received invalid " 19974 "capacity data"); 19975 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19976 return (EIO); 19977 } 19978 19979 /* 19980 * Read capacity and block size from the READ CAPACITY 10 data. 19981 * This data may be adjusted later due to device specific 19982 * issues. 19983 * 19984 * According to the SCSI spec, the READ CAPACITY 10 19985 * command returns the following: 19986 * 19987 * bytes 0-7: Maximum logical block address available. 19988 * (MSB in byte:0 & LSB in byte:7) 19989 * 19990 * bytes 8-11: Block length in bytes 19991 * (MSB in byte:8 & LSB in byte:11) 19992 * 19993 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 19994 */ 19995 capacity = BE_64(capacity16_buf[0]); 19996 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19997 lbpb_exp = (BE_64(capacity16_buf[1]) >> 40) & 0x0f; 19998 19999 pbsize = lbasize << lbpb_exp; 20000 20001 /* 20002 * Done with capacity16_buf 20003 */ 20004 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20005 20006 /* 20007 * if the reported capacity is set to all 0xf's, then 20008 * this disk is too large. This could only happen with 20009 * a device that supports LBAs larger than 64 bits which 20010 * are not defined by any current T10 standards. 20011 */ 20012 if (capacity == 0xffffffffffffffff) { 20013 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20014 "disk is too large"); 20015 return (EIO); 20016 } 20017 break; /* Success! */ 20018 case EIO: 20019 switch (ucmd_buf.uscsi_status) { 20020 case STATUS_RESERVATION_CONFLICT: 20021 status = EACCES; 20022 break; 20023 case STATUS_CHECK: 20024 /* 20025 * Check condition; look for ASC/ASCQ of 0x04/0x01 20026 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20027 */ 20028 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20029 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20030 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20031 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20032 return (EAGAIN); 20033 } 20034 break; 20035 default: 20036 break; 20037 } 20038 /* FALLTHRU */ 20039 default: 20040 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20041 return (status); 20042 } 20043 20044 *capp = capacity; 20045 *lbap = lbasize; 20046 *psp = pbsize; 20047 20048 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20049 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20050 capacity, lbasize, pbsize); 20051 20052 return (0); 20053 } 20054 20055 20056 /* 20057 * Function: sd_send_scsi_START_STOP_UNIT 20058 * 20059 * Description: Issue a scsi START STOP UNIT command to the target. 20060 * 20061 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20062 * structure for this target. 20063 * flag - SD_TARGET_START 20064 * SD_TARGET_STOP 20065 * SD_TARGET_EJECT 20066 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20067 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20068 * to use the USCSI "direct" chain and bypass the normal 20069 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20070 * command is issued as part of an error recovery action. 20071 * 20072 * Return Code: 0 - Success 20073 * EIO - IO error 20074 * EACCES - Reservation conflict detected 20075 * ENXIO - Not Ready, medium not present 20076 * errno return code from sd_ssc_send() 20077 * 20078 * Context: Can sleep. 20079 */ 20080 20081 static int 20082 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 20083 { 20084 struct scsi_extended_sense sense_buf; 20085 union scsi_cdb cdb; 20086 struct uscsi_cmd ucmd_buf; 20087 int status; 20088 struct sd_lun *un; 20089 20090 ASSERT(ssc != NULL); 20091 un = ssc->ssc_un; 20092 ASSERT(un != NULL); 20093 ASSERT(!mutex_owned(SD_MUTEX(un))); 20094 20095 SD_TRACE(SD_LOG_IO, un, 20096 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20097 20098 if (un->un_f_check_start_stop && 20099 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20100 (un->un_f_start_stop_supported != TRUE)) { 20101 return (0); 20102 } 20103 20104 /* 20105 * If we are performing an eject operation and 20106 * we receive any command other than SD_TARGET_EJECT 20107 * we should immediately return. 20108 */ 20109 if (flag != SD_TARGET_EJECT) { 20110 mutex_enter(SD_MUTEX(un)); 20111 if (un->un_f_ejecting == TRUE) { 20112 mutex_exit(SD_MUTEX(un)); 20113 return (EAGAIN); 20114 } 20115 mutex_exit(SD_MUTEX(un)); 20116 } 20117 20118 bzero(&cdb, sizeof (cdb)); 20119 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20120 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20121 20122 cdb.scc_cmd = SCMD_START_STOP; 20123 cdb.cdb_opaque[4] = (uchar_t)flag; 20124 20125 ucmd_buf.uscsi_cdb = (char *)&cdb; 20126 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20127 ucmd_buf.uscsi_bufaddr = NULL; 20128 ucmd_buf.uscsi_buflen = 0; 20129 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20130 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20131 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20132 ucmd_buf.uscsi_timeout = 200; 20133 20134 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20135 UIO_SYSSPACE, path_flag); 20136 20137 switch (status) { 20138 case 0: 20139 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20140 break; /* Success! */ 20141 case EIO: 20142 switch (ucmd_buf.uscsi_status) { 20143 case STATUS_RESERVATION_CONFLICT: 20144 status = EACCES; 20145 break; 20146 case STATUS_CHECK: 20147 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20148 switch (scsi_sense_key( 20149 (uint8_t *)&sense_buf)) { 20150 case KEY_ILLEGAL_REQUEST: 20151 status = ENOTSUP; 20152 break; 20153 case KEY_NOT_READY: 20154 if (scsi_sense_asc( 20155 (uint8_t *)&sense_buf) 20156 == 0x3A) { 20157 status = ENXIO; 20158 } 20159 break; 20160 default: 20161 break; 20162 } 20163 } 20164 break; 20165 default: 20166 break; 20167 } 20168 break; 20169 default: 20170 break; 20171 } 20172 20173 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20174 20175 return (status); 20176 } 20177 20178 20179 /* 20180 * Function: sd_start_stop_unit_callback 20181 * 20182 * Description: timeout(9F) callback to begin recovery process for a 20183 * device that has spun down. 20184 * 20185 * Arguments: arg - pointer to associated softstate struct. 20186 * 20187 * Context: Executes in a timeout(9F) thread context 20188 */ 20189 20190 static void 20191 sd_start_stop_unit_callback(void *arg) 20192 { 20193 struct sd_lun *un = arg; 20194 ASSERT(un != NULL); 20195 ASSERT(!mutex_owned(SD_MUTEX(un))); 20196 20197 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20198 20199 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20200 } 20201 20202 20203 /* 20204 * Function: sd_start_stop_unit_task 20205 * 20206 * Description: Recovery procedure when a drive is spun down. 20207 * 20208 * Arguments: arg - pointer to associated softstate struct. 20209 * 20210 * Context: Executes in a taskq() thread context 20211 */ 20212 20213 static void 20214 sd_start_stop_unit_task(void *arg) 20215 { 20216 struct sd_lun *un = arg; 20217 sd_ssc_t *ssc; 20218 int rval; 20219 20220 ASSERT(un != NULL); 20221 ASSERT(!mutex_owned(SD_MUTEX(un))); 20222 20223 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20224 20225 /* 20226 * Some unformatted drives report not ready error, no need to 20227 * restart if format has been initiated. 20228 */ 20229 mutex_enter(SD_MUTEX(un)); 20230 if (un->un_f_format_in_progress == TRUE) { 20231 mutex_exit(SD_MUTEX(un)); 20232 return; 20233 } 20234 mutex_exit(SD_MUTEX(un)); 20235 20236 /* 20237 * When a START STOP command is issued from here, it is part of a 20238 * failure recovery operation and must be issued before any other 20239 * commands, including any pending retries. Thus it must be sent 20240 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20241 * succeeds or not, we will start I/O after the attempt. 20242 */ 20243 ssc = sd_ssc_init(un); 20244 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 20245 SD_PATH_DIRECT_PRIORITY); 20246 if (rval != 0) 20247 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20248 sd_ssc_fini(ssc); 20249 /* 20250 * The above call blocks until the START_STOP_UNIT command completes. 20251 * Now that it has completed, we must re-try the original IO that 20252 * received the NOT READY condition in the first place. There are 20253 * three possible conditions here: 20254 * 20255 * (1) The original IO is on un_retry_bp. 20256 * (2) The original IO is on the regular wait queue, and un_retry_bp 20257 * is NULL. 20258 * (3) The original IO is on the regular wait queue, and un_retry_bp 20259 * points to some other, unrelated bp. 20260 * 20261 * For each case, we must call sd_start_cmds() with un_retry_bp 20262 * as the argument. If un_retry_bp is NULL, this will initiate 20263 * processing of the regular wait queue. If un_retry_bp is not NULL, 20264 * then this will process the bp on un_retry_bp. That may or may not 20265 * be the original IO, but that does not matter: the important thing 20266 * is to keep the IO processing going at this point. 20267 * 20268 * Note: This is a very specific error recovery sequence associated 20269 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20270 * serialize the I/O with completion of the spin-up. 20271 */ 20272 mutex_enter(SD_MUTEX(un)); 20273 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20274 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20275 un, un->un_retry_bp); 20276 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20277 sd_start_cmds(un, un->un_retry_bp); 20278 mutex_exit(SD_MUTEX(un)); 20279 20280 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20281 } 20282 20283 20284 /* 20285 * Function: sd_send_scsi_INQUIRY 20286 * 20287 * Description: Issue the scsi INQUIRY command. 20288 * 20289 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20290 * structure for this target. 20291 * bufaddr 20292 * buflen 20293 * evpd 20294 * page_code 20295 * page_length 20296 * 20297 * Return Code: 0 - Success 20298 * errno return code from sd_ssc_send() 20299 * 20300 * Context: Can sleep. Does not return until command is completed. 20301 */ 20302 20303 static int 20304 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20305 uchar_t evpd, uchar_t page_code, size_t *residp) 20306 { 20307 union scsi_cdb cdb; 20308 struct uscsi_cmd ucmd_buf; 20309 int status; 20310 struct sd_lun *un; 20311 20312 ASSERT(ssc != NULL); 20313 un = ssc->ssc_un; 20314 ASSERT(un != NULL); 20315 ASSERT(!mutex_owned(SD_MUTEX(un))); 20316 ASSERT(bufaddr != NULL); 20317 20318 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20319 20320 bzero(&cdb, sizeof (cdb)); 20321 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20322 bzero(bufaddr, buflen); 20323 20324 cdb.scc_cmd = SCMD_INQUIRY; 20325 cdb.cdb_opaque[1] = evpd; 20326 cdb.cdb_opaque[2] = page_code; 20327 FORMG0COUNT(&cdb, buflen); 20328 20329 ucmd_buf.uscsi_cdb = (char *)&cdb; 20330 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20331 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20332 ucmd_buf.uscsi_buflen = buflen; 20333 ucmd_buf.uscsi_rqbuf = NULL; 20334 ucmd_buf.uscsi_rqlen = 0; 20335 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20336 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20337 20338 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20339 UIO_SYSSPACE, SD_PATH_DIRECT); 20340 20341 /* 20342 * Only handle status == 0, the upper-level caller 20343 * will put different assessment based on the context. 20344 */ 20345 if (status == 0) 20346 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20347 20348 if ((status == 0) && (residp != NULL)) { 20349 *residp = ucmd_buf.uscsi_resid; 20350 } 20351 20352 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20353 20354 return (status); 20355 } 20356 20357 20358 /* 20359 * Function: sd_send_scsi_TEST_UNIT_READY 20360 * 20361 * Description: Issue the scsi TEST UNIT READY command. 20362 * This routine can be told to set the flag USCSI_DIAGNOSE to 20363 * prevent retrying failed commands. Use this when the intent 20364 * is either to check for device readiness, to clear a Unit 20365 * Attention, or to clear any outstanding sense data. 20366 * However under specific conditions the expected behavior 20367 * is for retries to bring a device ready, so use the flag 20368 * with caution. 20369 * 20370 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20371 * structure for this target. 20372 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20373 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20374 * 0: dont check for media present, do retries on cmd. 20375 * 20376 * Return Code: 0 - Success 20377 * EIO - IO error 20378 * EACCES - Reservation conflict detected 20379 * ENXIO - Not Ready, medium not present 20380 * errno return code from sd_ssc_send() 20381 * 20382 * Context: Can sleep. Does not return until command is completed. 20383 */ 20384 20385 static int 20386 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20387 { 20388 struct scsi_extended_sense sense_buf; 20389 union scsi_cdb cdb; 20390 struct uscsi_cmd ucmd_buf; 20391 int status; 20392 struct sd_lun *un; 20393 20394 ASSERT(ssc != NULL); 20395 un = ssc->ssc_un; 20396 ASSERT(un != NULL); 20397 ASSERT(!mutex_owned(SD_MUTEX(un))); 20398 20399 SD_TRACE(SD_LOG_IO, un, 20400 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20401 20402 /* 20403 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20404 * timeouts when they receive a TUR and the queue is not empty. Check 20405 * the configuration flag set during attach (indicating the drive has 20406 * this firmware bug) and un_ncmds_in_transport before issuing the 20407 * TUR. If there are 20408 * pending commands return success, this is a bit arbitrary but is ok 20409 * for non-removables (i.e. the eliteI disks) and non-clustering 20410 * configurations. 20411 */ 20412 if (un->un_f_cfg_tur_check == TRUE) { 20413 mutex_enter(SD_MUTEX(un)); 20414 if (un->un_ncmds_in_transport != 0) { 20415 mutex_exit(SD_MUTEX(un)); 20416 return (0); 20417 } 20418 mutex_exit(SD_MUTEX(un)); 20419 } 20420 20421 bzero(&cdb, sizeof (cdb)); 20422 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20423 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20424 20425 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20426 20427 ucmd_buf.uscsi_cdb = (char *)&cdb; 20428 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20429 ucmd_buf.uscsi_bufaddr = NULL; 20430 ucmd_buf.uscsi_buflen = 0; 20431 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20432 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20433 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20434 20435 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20436 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20437 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20438 } 20439 ucmd_buf.uscsi_timeout = 60; 20440 20441 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20442 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20443 SD_PATH_STANDARD)); 20444 20445 switch (status) { 20446 case 0: 20447 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20448 break; /* Success! */ 20449 case EIO: 20450 switch (ucmd_buf.uscsi_status) { 20451 case STATUS_RESERVATION_CONFLICT: 20452 status = EACCES; 20453 break; 20454 case STATUS_CHECK: 20455 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20456 break; 20457 } 20458 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20459 (scsi_sense_key((uint8_t *)&sense_buf) == 20460 KEY_NOT_READY) && 20461 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20462 status = ENXIO; 20463 } 20464 break; 20465 default: 20466 break; 20467 } 20468 break; 20469 default: 20470 break; 20471 } 20472 20473 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20474 20475 return (status); 20476 } 20477 20478 /* 20479 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20480 * 20481 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20482 * 20483 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20484 * structure for this target. 20485 * 20486 * Return Code: 0 - Success 20487 * EACCES 20488 * ENOTSUP 20489 * errno return code from sd_ssc_send() 20490 * 20491 * Context: Can sleep. Does not return until command is completed. 20492 */ 20493 20494 static int 20495 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20496 uint16_t data_len, uchar_t *data_bufp) 20497 { 20498 struct scsi_extended_sense sense_buf; 20499 union scsi_cdb cdb; 20500 struct uscsi_cmd ucmd_buf; 20501 int status; 20502 int no_caller_buf = FALSE; 20503 struct sd_lun *un; 20504 20505 ASSERT(ssc != NULL); 20506 un = ssc->ssc_un; 20507 ASSERT(un != NULL); 20508 ASSERT(!mutex_owned(SD_MUTEX(un))); 20509 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20510 20511 SD_TRACE(SD_LOG_IO, un, 20512 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20513 20514 bzero(&cdb, sizeof (cdb)); 20515 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20516 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20517 if (data_bufp == NULL) { 20518 /* Allocate a default buf if the caller did not give one */ 20519 ASSERT(data_len == 0); 20520 data_len = MHIOC_RESV_KEY_SIZE; 20521 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20522 no_caller_buf = TRUE; 20523 } 20524 20525 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20526 cdb.cdb_opaque[1] = usr_cmd; 20527 FORMG1COUNT(&cdb, data_len); 20528 20529 ucmd_buf.uscsi_cdb = (char *)&cdb; 20530 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20531 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20532 ucmd_buf.uscsi_buflen = data_len; 20533 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20534 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20535 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20536 ucmd_buf.uscsi_timeout = 60; 20537 20538 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20539 UIO_SYSSPACE, SD_PATH_STANDARD); 20540 20541 switch (status) { 20542 case 0: 20543 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20544 20545 break; /* Success! */ 20546 case EIO: 20547 switch (ucmd_buf.uscsi_status) { 20548 case STATUS_RESERVATION_CONFLICT: 20549 status = EACCES; 20550 break; 20551 case STATUS_CHECK: 20552 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20553 (scsi_sense_key((uint8_t *)&sense_buf) == 20554 KEY_ILLEGAL_REQUEST)) { 20555 status = ENOTSUP; 20556 } 20557 break; 20558 default: 20559 break; 20560 } 20561 break; 20562 default: 20563 break; 20564 } 20565 20566 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20567 20568 if (no_caller_buf == TRUE) { 20569 kmem_free(data_bufp, data_len); 20570 } 20571 20572 return (status); 20573 } 20574 20575 20576 /* 20577 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20578 * 20579 * Description: This routine is the driver entry point for handling CD-ROM 20580 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20581 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20582 * device. 20583 * 20584 * Arguments: ssc - ssc contains un - pointer to soft state struct 20585 * for the target. 20586 * usr_cmd SCSI-3 reservation facility command (one of 20587 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20588 * SD_SCSI3_PREEMPTANDABORT) 20589 * usr_bufp - user provided pointer register, reserve descriptor or 20590 * preempt and abort structure (mhioc_register_t, 20591 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20592 * 20593 * Return Code: 0 - Success 20594 * EACCES 20595 * ENOTSUP 20596 * errno return code from sd_ssc_send() 20597 * 20598 * Context: Can sleep. Does not return until command is completed. 20599 */ 20600 20601 static int 20602 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20603 uchar_t *usr_bufp) 20604 { 20605 struct scsi_extended_sense sense_buf; 20606 union scsi_cdb cdb; 20607 struct uscsi_cmd ucmd_buf; 20608 int status; 20609 uchar_t data_len = sizeof (sd_prout_t); 20610 sd_prout_t *prp; 20611 struct sd_lun *un; 20612 20613 ASSERT(ssc != NULL); 20614 un = ssc->ssc_un; 20615 ASSERT(un != NULL); 20616 ASSERT(!mutex_owned(SD_MUTEX(un))); 20617 ASSERT(data_len == 24); /* required by scsi spec */ 20618 20619 SD_TRACE(SD_LOG_IO, un, 20620 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20621 20622 if (usr_bufp == NULL) { 20623 return (EINVAL); 20624 } 20625 20626 bzero(&cdb, sizeof (cdb)); 20627 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20628 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20629 prp = kmem_zalloc(data_len, KM_SLEEP); 20630 20631 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20632 cdb.cdb_opaque[1] = usr_cmd; 20633 FORMG1COUNT(&cdb, data_len); 20634 20635 ucmd_buf.uscsi_cdb = (char *)&cdb; 20636 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20637 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20638 ucmd_buf.uscsi_buflen = data_len; 20639 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20640 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20641 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20642 ucmd_buf.uscsi_timeout = 60; 20643 20644 switch (usr_cmd) { 20645 case SD_SCSI3_REGISTER: { 20646 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20647 20648 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20649 bcopy(ptr->newkey.key, prp->service_key, 20650 MHIOC_RESV_KEY_SIZE); 20651 prp->aptpl = ptr->aptpl; 20652 break; 20653 } 20654 case SD_SCSI3_RESERVE: 20655 case SD_SCSI3_RELEASE: { 20656 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20657 20658 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20659 prp->scope_address = BE_32(ptr->scope_specific_addr); 20660 cdb.cdb_opaque[2] = ptr->type; 20661 break; 20662 } 20663 case SD_SCSI3_PREEMPTANDABORT: { 20664 mhioc_preemptandabort_t *ptr = 20665 (mhioc_preemptandabort_t *)usr_bufp; 20666 20667 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20668 bcopy(ptr->victim_key.key, prp->service_key, 20669 MHIOC_RESV_KEY_SIZE); 20670 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20671 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20672 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20673 break; 20674 } 20675 case SD_SCSI3_REGISTERANDIGNOREKEY: 20676 { 20677 mhioc_registerandignorekey_t *ptr; 20678 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20679 bcopy(ptr->newkey.key, 20680 prp->service_key, MHIOC_RESV_KEY_SIZE); 20681 prp->aptpl = ptr->aptpl; 20682 break; 20683 } 20684 default: 20685 ASSERT(FALSE); 20686 break; 20687 } 20688 20689 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20690 UIO_SYSSPACE, SD_PATH_STANDARD); 20691 20692 switch (status) { 20693 case 0: 20694 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20695 break; /* Success! */ 20696 case EIO: 20697 switch (ucmd_buf.uscsi_status) { 20698 case STATUS_RESERVATION_CONFLICT: 20699 status = EACCES; 20700 break; 20701 case STATUS_CHECK: 20702 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20703 (scsi_sense_key((uint8_t *)&sense_buf) == 20704 KEY_ILLEGAL_REQUEST)) { 20705 status = ENOTSUP; 20706 } 20707 break; 20708 default: 20709 break; 20710 } 20711 break; 20712 default: 20713 break; 20714 } 20715 20716 kmem_free(prp, data_len); 20717 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20718 return (status); 20719 } 20720 20721 20722 /* 20723 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20724 * 20725 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20726 * 20727 * Arguments: un - pointer to the target's soft state struct 20728 * dkc - pointer to the callback structure 20729 * 20730 * Return Code: 0 - success 20731 * errno-type error code 20732 * 20733 * Context: kernel thread context only. 20734 * 20735 * _______________________________________________________________ 20736 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20737 * |FLUSH_VOLATILE| | operation | 20738 * |______________|______________|_________________________________| 20739 * | 0 | NULL | Synchronous flush on both | 20740 * | | | volatile and non-volatile cache | 20741 * |______________|______________|_________________________________| 20742 * | 1 | NULL | Synchronous flush on volatile | 20743 * | | | cache; disk drivers may suppress| 20744 * | | | flush if disk table indicates | 20745 * | | | non-volatile cache | 20746 * |______________|______________|_________________________________| 20747 * | 0 | !NULL | Asynchronous flush on both | 20748 * | | | volatile and non-volatile cache;| 20749 * |______________|______________|_________________________________| 20750 * | 1 | !NULL | Asynchronous flush on volatile | 20751 * | | | cache; disk drivers may suppress| 20752 * | | | flush if disk table indicates | 20753 * | | | non-volatile cache | 20754 * |______________|______________|_________________________________| 20755 * 20756 */ 20757 20758 static int 20759 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20760 { 20761 struct sd_uscsi_info *uip; 20762 struct uscsi_cmd *uscmd; 20763 union scsi_cdb *cdb; 20764 struct buf *bp; 20765 int rval = 0; 20766 int is_async; 20767 20768 SD_TRACE(SD_LOG_IO, un, 20769 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20770 20771 ASSERT(un != NULL); 20772 ASSERT(!mutex_owned(SD_MUTEX(un))); 20773 20774 if (dkc == NULL || dkc->dkc_callback == NULL) { 20775 is_async = FALSE; 20776 } else { 20777 is_async = TRUE; 20778 } 20779 20780 mutex_enter(SD_MUTEX(un)); 20781 /* check whether cache flush should be suppressed */ 20782 if (un->un_f_suppress_cache_flush == TRUE) { 20783 mutex_exit(SD_MUTEX(un)); 20784 /* 20785 * suppress the cache flush if the device is told to do 20786 * so by sd.conf or disk table 20787 */ 20788 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20789 skip the cache flush since suppress_cache_flush is %d!\n", 20790 un->un_f_suppress_cache_flush); 20791 20792 if (is_async == TRUE) { 20793 /* invoke callback for asynchronous flush */ 20794 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20795 } 20796 return (rval); 20797 } 20798 mutex_exit(SD_MUTEX(un)); 20799 20800 /* 20801 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20802 * set properly 20803 */ 20804 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20805 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20806 20807 mutex_enter(SD_MUTEX(un)); 20808 if (dkc != NULL && un->un_f_sync_nv_supported && 20809 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20810 /* 20811 * if the device supports SYNC_NV bit, turn on 20812 * the SYNC_NV bit to only flush volatile cache 20813 */ 20814 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20815 } 20816 mutex_exit(SD_MUTEX(un)); 20817 20818 /* 20819 * First get some memory for the uscsi_cmd struct and cdb 20820 * and initialize for SYNCHRONIZE_CACHE cmd. 20821 */ 20822 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20823 uscmd->uscsi_cdblen = CDB_GROUP1; 20824 uscmd->uscsi_cdb = (caddr_t)cdb; 20825 uscmd->uscsi_bufaddr = NULL; 20826 uscmd->uscsi_buflen = 0; 20827 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20828 uscmd->uscsi_rqlen = SENSE_LENGTH; 20829 uscmd->uscsi_rqresid = SENSE_LENGTH; 20830 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20831 uscmd->uscsi_timeout = sd_io_time; 20832 20833 /* 20834 * Allocate an sd_uscsi_info struct and fill it with the info 20835 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20836 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20837 * since we allocate the buf here in this function, we do not 20838 * need to preserve the prior contents of b_private. 20839 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20840 */ 20841 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20842 uip->ui_flags = SD_PATH_DIRECT; 20843 uip->ui_cmdp = uscmd; 20844 20845 bp = getrbuf(KM_SLEEP); 20846 bp->b_private = uip; 20847 20848 /* 20849 * Setup buffer to carry uscsi request. 20850 */ 20851 bp->b_flags = B_BUSY; 20852 bp->b_bcount = 0; 20853 bp->b_blkno = 0; 20854 20855 if (is_async == TRUE) { 20856 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20857 uip->ui_dkc = *dkc; 20858 } 20859 20860 bp->b_edev = SD_GET_DEV(un); 20861 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20862 20863 /* 20864 * Unset un_f_sync_cache_required flag 20865 */ 20866 mutex_enter(SD_MUTEX(un)); 20867 un->un_f_sync_cache_required = FALSE; 20868 mutex_exit(SD_MUTEX(un)); 20869 20870 (void) sd_uscsi_strategy(bp); 20871 20872 /* 20873 * If synchronous request, wait for completion 20874 * If async just return and let b_iodone callback 20875 * cleanup. 20876 * NOTE: On return, u_ncmds_in_driver will be decremented, 20877 * but it was also incremented in sd_uscsi_strategy(), so 20878 * we should be ok. 20879 */ 20880 if (is_async == FALSE) { 20881 (void) biowait(bp); 20882 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20883 } 20884 20885 return (rval); 20886 } 20887 20888 20889 static int 20890 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20891 { 20892 struct sd_uscsi_info *uip; 20893 struct uscsi_cmd *uscmd; 20894 uint8_t *sense_buf; 20895 struct sd_lun *un; 20896 int status; 20897 union scsi_cdb *cdb; 20898 20899 uip = (struct sd_uscsi_info *)(bp->b_private); 20900 ASSERT(uip != NULL); 20901 20902 uscmd = uip->ui_cmdp; 20903 ASSERT(uscmd != NULL); 20904 20905 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20906 ASSERT(sense_buf != NULL); 20907 20908 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20909 ASSERT(un != NULL); 20910 20911 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20912 20913 status = geterror(bp); 20914 switch (status) { 20915 case 0: 20916 break; /* Success! */ 20917 case EIO: 20918 switch (uscmd->uscsi_status) { 20919 case STATUS_RESERVATION_CONFLICT: 20920 /* Ignore reservation conflict */ 20921 status = 0; 20922 goto done; 20923 20924 case STATUS_CHECK: 20925 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20926 (scsi_sense_key(sense_buf) == 20927 KEY_ILLEGAL_REQUEST)) { 20928 /* Ignore Illegal Request error */ 20929 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20930 mutex_enter(SD_MUTEX(un)); 20931 un->un_f_sync_nv_supported = FALSE; 20932 mutex_exit(SD_MUTEX(un)); 20933 status = 0; 20934 SD_TRACE(SD_LOG_IO, un, 20935 "un_f_sync_nv_supported \ 20936 is set to false.\n"); 20937 goto done; 20938 } 20939 20940 mutex_enter(SD_MUTEX(un)); 20941 un->un_f_sync_cache_supported = FALSE; 20942 mutex_exit(SD_MUTEX(un)); 20943 SD_TRACE(SD_LOG_IO, un, 20944 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20945 un_f_sync_cache_supported set to false \ 20946 with asc = %x, ascq = %x\n", 20947 scsi_sense_asc(sense_buf), 20948 scsi_sense_ascq(sense_buf)); 20949 status = ENOTSUP; 20950 goto done; 20951 } 20952 break; 20953 default: 20954 break; 20955 } 20956 /* FALLTHRU */ 20957 default: 20958 /* 20959 * Turn on the un_f_sync_cache_required flag 20960 * since the SYNC CACHE command failed 20961 */ 20962 mutex_enter(SD_MUTEX(un)); 20963 un->un_f_sync_cache_required = TRUE; 20964 mutex_exit(SD_MUTEX(un)); 20965 20966 /* 20967 * Don't log an error message if this device 20968 * has removable media. 20969 */ 20970 if (!un->un_f_has_removable_media) { 20971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20972 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20973 } 20974 break; 20975 } 20976 20977 done: 20978 if (uip->ui_dkc.dkc_callback != NULL) { 20979 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20980 } 20981 20982 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20983 freerbuf(bp); 20984 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20985 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20986 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20987 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20988 20989 return (status); 20990 } 20991 20992 20993 /* 20994 * Function: sd_send_scsi_GET_CONFIGURATION 20995 * 20996 * Description: Issues the get configuration command to the device. 20997 * Called from sd_check_for_writable_cd & sd_get_media_info 20998 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20999 * Arguments: ssc 21000 * ucmdbuf 21001 * rqbuf 21002 * rqbuflen 21003 * bufaddr 21004 * buflen 21005 * path_flag 21006 * 21007 * Return Code: 0 - Success 21008 * errno return code from sd_ssc_send() 21009 * 21010 * Context: Can sleep. Does not return until command is completed. 21011 * 21012 */ 21013 21014 static int 21015 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21016 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21017 int path_flag) 21018 { 21019 char cdb[CDB_GROUP1]; 21020 int status; 21021 struct sd_lun *un; 21022 21023 ASSERT(ssc != NULL); 21024 un = ssc->ssc_un; 21025 ASSERT(un != NULL); 21026 ASSERT(!mutex_owned(SD_MUTEX(un))); 21027 ASSERT(bufaddr != NULL); 21028 ASSERT(ucmdbuf != NULL); 21029 ASSERT(rqbuf != NULL); 21030 21031 SD_TRACE(SD_LOG_IO, un, 21032 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21033 21034 bzero(cdb, sizeof (cdb)); 21035 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21036 bzero(rqbuf, rqbuflen); 21037 bzero(bufaddr, buflen); 21038 21039 /* 21040 * Set up cdb field for the get configuration command. 21041 */ 21042 cdb[0] = SCMD_GET_CONFIGURATION; 21043 cdb[1] = 0x02; /* Requested Type */ 21044 cdb[8] = SD_PROFILE_HEADER_LEN; 21045 ucmdbuf->uscsi_cdb = cdb; 21046 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21047 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21048 ucmdbuf->uscsi_buflen = buflen; 21049 ucmdbuf->uscsi_timeout = sd_io_time; 21050 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21051 ucmdbuf->uscsi_rqlen = rqbuflen; 21052 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21053 21054 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21055 UIO_SYSSPACE, path_flag); 21056 21057 switch (status) { 21058 case 0: 21059 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21060 break; /* Success! */ 21061 case EIO: 21062 switch (ucmdbuf->uscsi_status) { 21063 case STATUS_RESERVATION_CONFLICT: 21064 status = EACCES; 21065 break; 21066 default: 21067 break; 21068 } 21069 break; 21070 default: 21071 break; 21072 } 21073 21074 if (status == 0) { 21075 SD_DUMP_MEMORY(un, SD_LOG_IO, 21076 "sd_send_scsi_GET_CONFIGURATION: data", 21077 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21078 } 21079 21080 SD_TRACE(SD_LOG_IO, un, 21081 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21082 21083 return (status); 21084 } 21085 21086 /* 21087 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21088 * 21089 * Description: Issues the get configuration command to the device to 21090 * retrieve a specific feature. Called from 21091 * sd_check_for_writable_cd & sd_set_mmc_caps. 21092 * Arguments: ssc 21093 * ucmdbuf 21094 * rqbuf 21095 * rqbuflen 21096 * bufaddr 21097 * buflen 21098 * feature 21099 * 21100 * Return Code: 0 - Success 21101 * errno return code from sd_ssc_send() 21102 * 21103 * Context: Can sleep. Does not return until command is completed. 21104 * 21105 */ 21106 static int 21107 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 21108 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 21109 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 21110 { 21111 char cdb[CDB_GROUP1]; 21112 int status; 21113 struct sd_lun *un; 21114 21115 ASSERT(ssc != NULL); 21116 un = ssc->ssc_un; 21117 ASSERT(un != NULL); 21118 ASSERT(!mutex_owned(SD_MUTEX(un))); 21119 ASSERT(bufaddr != NULL); 21120 ASSERT(ucmdbuf != NULL); 21121 ASSERT(rqbuf != NULL); 21122 21123 SD_TRACE(SD_LOG_IO, un, 21124 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21125 21126 bzero(cdb, sizeof (cdb)); 21127 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21128 bzero(rqbuf, rqbuflen); 21129 bzero(bufaddr, buflen); 21130 21131 /* 21132 * Set up cdb field for the get configuration command. 21133 */ 21134 cdb[0] = SCMD_GET_CONFIGURATION; 21135 cdb[1] = 0x02; /* Requested Type */ 21136 cdb[3] = feature; 21137 cdb[8] = buflen; 21138 ucmdbuf->uscsi_cdb = cdb; 21139 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21140 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21141 ucmdbuf->uscsi_buflen = buflen; 21142 ucmdbuf->uscsi_timeout = sd_io_time; 21143 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21144 ucmdbuf->uscsi_rqlen = rqbuflen; 21145 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21146 21147 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21148 UIO_SYSSPACE, path_flag); 21149 21150 switch (status) { 21151 case 0: 21152 21153 break; /* Success! */ 21154 case EIO: 21155 switch (ucmdbuf->uscsi_status) { 21156 case STATUS_RESERVATION_CONFLICT: 21157 status = EACCES; 21158 break; 21159 default: 21160 break; 21161 } 21162 break; 21163 default: 21164 break; 21165 } 21166 21167 if (status == 0) { 21168 SD_DUMP_MEMORY(un, SD_LOG_IO, 21169 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21170 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21171 } 21172 21173 SD_TRACE(SD_LOG_IO, un, 21174 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21175 21176 return (status); 21177 } 21178 21179 21180 /* 21181 * Function: sd_send_scsi_MODE_SENSE 21182 * 21183 * Description: Utility function for issuing a scsi MODE SENSE command. 21184 * Note: This routine uses a consistent implementation for Group0, 21185 * Group1, and Group2 commands across all platforms. ATAPI devices 21186 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21187 * 21188 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21189 * structure for this target. 21190 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21191 * CDB_GROUP[1|2] (10 byte). 21192 * bufaddr - buffer for page data retrieved from the target. 21193 * buflen - size of page to be retrieved. 21194 * page_code - page code of data to be retrieved from the target. 21195 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21196 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21197 * to use the USCSI "direct" chain and bypass the normal 21198 * command waitq. 21199 * 21200 * Return Code: 0 - Success 21201 * errno return code from sd_ssc_send() 21202 * 21203 * Context: Can sleep. Does not return until command is completed. 21204 */ 21205 21206 static int 21207 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21208 size_t buflen, uchar_t page_code, int path_flag) 21209 { 21210 struct scsi_extended_sense sense_buf; 21211 union scsi_cdb cdb; 21212 struct uscsi_cmd ucmd_buf; 21213 int status; 21214 int headlen; 21215 struct sd_lun *un; 21216 21217 ASSERT(ssc != NULL); 21218 un = ssc->ssc_un; 21219 ASSERT(un != NULL); 21220 ASSERT(!mutex_owned(SD_MUTEX(un))); 21221 ASSERT(bufaddr != NULL); 21222 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21223 (cdbsize == CDB_GROUP2)); 21224 21225 SD_TRACE(SD_LOG_IO, un, 21226 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21227 21228 bzero(&cdb, sizeof (cdb)); 21229 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21230 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21231 bzero(bufaddr, buflen); 21232 21233 if (cdbsize == CDB_GROUP0) { 21234 cdb.scc_cmd = SCMD_MODE_SENSE; 21235 cdb.cdb_opaque[2] = page_code; 21236 FORMG0COUNT(&cdb, buflen); 21237 headlen = MODE_HEADER_LENGTH; 21238 } else { 21239 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21240 cdb.cdb_opaque[2] = page_code; 21241 FORMG1COUNT(&cdb, buflen); 21242 headlen = MODE_HEADER_LENGTH_GRP2; 21243 } 21244 21245 ASSERT(headlen <= buflen); 21246 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21247 21248 ucmd_buf.uscsi_cdb = (char *)&cdb; 21249 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21250 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21251 ucmd_buf.uscsi_buflen = buflen; 21252 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21253 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21254 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21255 ucmd_buf.uscsi_timeout = 60; 21256 21257 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21258 UIO_SYSSPACE, path_flag); 21259 21260 switch (status) { 21261 case 0: 21262 /* 21263 * sr_check_wp() uses 0x3f page code and check the header of 21264 * mode page to determine if target device is write-protected. 21265 * But some USB devices return 0 bytes for 0x3f page code. For 21266 * this case, make sure that mode page header is returned at 21267 * least. 21268 */ 21269 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21270 status = EIO; 21271 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21272 "mode page header is not returned"); 21273 } 21274 break; /* Success! */ 21275 case EIO: 21276 switch (ucmd_buf.uscsi_status) { 21277 case STATUS_RESERVATION_CONFLICT: 21278 status = EACCES; 21279 break; 21280 default: 21281 break; 21282 } 21283 break; 21284 default: 21285 break; 21286 } 21287 21288 if (status == 0) { 21289 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21290 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21291 } 21292 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21293 21294 return (status); 21295 } 21296 21297 21298 /* 21299 * Function: sd_send_scsi_MODE_SELECT 21300 * 21301 * Description: Utility function for issuing a scsi MODE SELECT command. 21302 * Note: This routine uses a consistent implementation for Group0, 21303 * Group1, and Group2 commands across all platforms. ATAPI devices 21304 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21305 * 21306 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21307 * structure for this target. 21308 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21309 * CDB_GROUP[1|2] (10 byte). 21310 * bufaddr - buffer for page data retrieved from the target. 21311 * buflen - size of page to be retrieved. 21312 * save_page - boolean to determin if SP bit should be set. 21313 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21314 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21315 * to use the USCSI "direct" chain and bypass the normal 21316 * command waitq. 21317 * 21318 * Return Code: 0 - Success 21319 * errno return code from sd_ssc_send() 21320 * 21321 * Context: Can sleep. Does not return until command is completed. 21322 */ 21323 21324 static int 21325 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21326 size_t buflen, uchar_t save_page, int path_flag) 21327 { 21328 struct scsi_extended_sense sense_buf; 21329 union scsi_cdb cdb; 21330 struct uscsi_cmd ucmd_buf; 21331 int status; 21332 struct sd_lun *un; 21333 21334 ASSERT(ssc != NULL); 21335 un = ssc->ssc_un; 21336 ASSERT(un != NULL); 21337 ASSERT(!mutex_owned(SD_MUTEX(un))); 21338 ASSERT(bufaddr != NULL); 21339 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21340 (cdbsize == CDB_GROUP2)); 21341 21342 SD_TRACE(SD_LOG_IO, un, 21343 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21344 21345 bzero(&cdb, sizeof (cdb)); 21346 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21347 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21348 21349 /* Set the PF bit for many third party drives */ 21350 cdb.cdb_opaque[1] = 0x10; 21351 21352 /* Set the savepage(SP) bit if given */ 21353 if (save_page == SD_SAVE_PAGE) { 21354 cdb.cdb_opaque[1] |= 0x01; 21355 } 21356 21357 if (cdbsize == CDB_GROUP0) { 21358 cdb.scc_cmd = SCMD_MODE_SELECT; 21359 FORMG0COUNT(&cdb, buflen); 21360 } else { 21361 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21362 FORMG1COUNT(&cdb, buflen); 21363 } 21364 21365 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21366 21367 ucmd_buf.uscsi_cdb = (char *)&cdb; 21368 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21369 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21370 ucmd_buf.uscsi_buflen = buflen; 21371 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21372 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21373 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21374 ucmd_buf.uscsi_timeout = 60; 21375 21376 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21377 UIO_SYSSPACE, path_flag); 21378 21379 switch (status) { 21380 case 0: 21381 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21382 break; /* Success! */ 21383 case EIO: 21384 switch (ucmd_buf.uscsi_status) { 21385 case STATUS_RESERVATION_CONFLICT: 21386 status = EACCES; 21387 break; 21388 default: 21389 break; 21390 } 21391 break; 21392 default: 21393 break; 21394 } 21395 21396 if (status == 0) { 21397 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21398 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21399 } 21400 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21401 21402 return (status); 21403 } 21404 21405 21406 /* 21407 * Function: sd_send_scsi_RDWR 21408 * 21409 * Description: Issue a scsi READ or WRITE command with the given parameters. 21410 * 21411 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21412 * structure for this target. 21413 * cmd: SCMD_READ or SCMD_WRITE 21414 * bufaddr: Address of caller's buffer to receive the RDWR data 21415 * buflen: Length of caller's buffer receive the RDWR data. 21416 * start_block: Block number for the start of the RDWR operation. 21417 * (Assumes target-native block size.) 21418 * residp: Pointer to variable to receive the redisual of the 21419 * RDWR operation (may be NULL of no residual requested). 21420 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21421 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21422 * to use the USCSI "direct" chain and bypass the normal 21423 * command waitq. 21424 * 21425 * Return Code: 0 - Success 21426 * errno return code from sd_ssc_send() 21427 * 21428 * Context: Can sleep. Does not return until command is completed. 21429 */ 21430 21431 static int 21432 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21433 size_t buflen, daddr_t start_block, int path_flag) 21434 { 21435 struct scsi_extended_sense sense_buf; 21436 union scsi_cdb cdb; 21437 struct uscsi_cmd ucmd_buf; 21438 uint32_t block_count; 21439 int status; 21440 int cdbsize; 21441 uchar_t flag; 21442 struct sd_lun *un; 21443 21444 ASSERT(ssc != NULL); 21445 un = ssc->ssc_un; 21446 ASSERT(un != NULL); 21447 ASSERT(!mutex_owned(SD_MUTEX(un))); 21448 ASSERT(bufaddr != NULL); 21449 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21450 21451 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21452 21453 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21454 return (EINVAL); 21455 } 21456 21457 mutex_enter(SD_MUTEX(un)); 21458 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21459 mutex_exit(SD_MUTEX(un)); 21460 21461 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21462 21463 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21464 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21465 bufaddr, buflen, start_block, block_count); 21466 21467 bzero(&cdb, sizeof (cdb)); 21468 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21469 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21470 21471 /* Compute CDB size to use */ 21472 if (start_block > 0xffffffff) 21473 cdbsize = CDB_GROUP4; 21474 else if ((start_block & 0xFFE00000) || 21475 (un->un_f_cfg_is_atapi == TRUE)) 21476 cdbsize = CDB_GROUP1; 21477 else 21478 cdbsize = CDB_GROUP0; 21479 21480 switch (cdbsize) { 21481 case CDB_GROUP0: /* 6-byte CDBs */ 21482 cdb.scc_cmd = cmd; 21483 FORMG0ADDR(&cdb, start_block); 21484 FORMG0COUNT(&cdb, block_count); 21485 break; 21486 case CDB_GROUP1: /* 10-byte CDBs */ 21487 cdb.scc_cmd = cmd | SCMD_GROUP1; 21488 FORMG1ADDR(&cdb, start_block); 21489 FORMG1COUNT(&cdb, block_count); 21490 break; 21491 case CDB_GROUP4: /* 16-byte CDBs */ 21492 cdb.scc_cmd = cmd | SCMD_GROUP4; 21493 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21494 FORMG4COUNT(&cdb, block_count); 21495 break; 21496 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21497 default: 21498 /* All others reserved */ 21499 return (EINVAL); 21500 } 21501 21502 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21503 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21504 21505 ucmd_buf.uscsi_cdb = (char *)&cdb; 21506 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21507 ucmd_buf.uscsi_bufaddr = bufaddr; 21508 ucmd_buf.uscsi_buflen = buflen; 21509 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21510 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21511 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21512 ucmd_buf.uscsi_timeout = 60; 21513 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21514 UIO_SYSSPACE, path_flag); 21515 21516 switch (status) { 21517 case 0: 21518 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21519 break; /* Success! */ 21520 case EIO: 21521 switch (ucmd_buf.uscsi_status) { 21522 case STATUS_RESERVATION_CONFLICT: 21523 status = EACCES; 21524 break; 21525 default: 21526 break; 21527 } 21528 break; 21529 default: 21530 break; 21531 } 21532 21533 if (status == 0) { 21534 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21535 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21536 } 21537 21538 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21539 21540 return (status); 21541 } 21542 21543 21544 /* 21545 * Function: sd_send_scsi_LOG_SENSE 21546 * 21547 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21548 * 21549 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21550 * structure for this target. 21551 * 21552 * Return Code: 0 - Success 21553 * errno return code from sd_ssc_send() 21554 * 21555 * Context: Can sleep. Does not return until command is completed. 21556 */ 21557 21558 static int 21559 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21560 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21561 int path_flag) 21562 21563 { 21564 struct scsi_extended_sense sense_buf; 21565 union scsi_cdb cdb; 21566 struct uscsi_cmd ucmd_buf; 21567 int status; 21568 struct sd_lun *un; 21569 21570 ASSERT(ssc != NULL); 21571 un = ssc->ssc_un; 21572 ASSERT(un != NULL); 21573 ASSERT(!mutex_owned(SD_MUTEX(un))); 21574 21575 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21576 21577 bzero(&cdb, sizeof (cdb)); 21578 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21579 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21580 21581 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21582 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21583 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21584 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21585 FORMG1COUNT(&cdb, buflen); 21586 21587 ucmd_buf.uscsi_cdb = (char *)&cdb; 21588 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21589 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21590 ucmd_buf.uscsi_buflen = buflen; 21591 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21592 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21593 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21594 ucmd_buf.uscsi_timeout = 60; 21595 21596 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21597 UIO_SYSSPACE, path_flag); 21598 21599 switch (status) { 21600 case 0: 21601 break; 21602 case EIO: 21603 switch (ucmd_buf.uscsi_status) { 21604 case STATUS_RESERVATION_CONFLICT: 21605 status = EACCES; 21606 break; 21607 case STATUS_CHECK: 21608 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21609 (scsi_sense_key((uint8_t *)&sense_buf) == 21610 KEY_ILLEGAL_REQUEST) && 21611 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21612 /* 21613 * ASC 0x24: INVALID FIELD IN CDB 21614 */ 21615 switch (page_code) { 21616 case START_STOP_CYCLE_PAGE: 21617 /* 21618 * The start stop cycle counter is 21619 * implemented as page 0x31 in earlier 21620 * generation disks. In new generation 21621 * disks the start stop cycle counter is 21622 * implemented as page 0xE. To properly 21623 * handle this case if an attempt for 21624 * log page 0xE is made and fails we 21625 * will try again using page 0x31. 21626 * 21627 * Network storage BU committed to 21628 * maintain the page 0x31 for this 21629 * purpose and will not have any other 21630 * page implemented with page code 0x31 21631 * until all disks transition to the 21632 * standard page. 21633 */ 21634 mutex_enter(SD_MUTEX(un)); 21635 un->un_start_stop_cycle_page = 21636 START_STOP_CYCLE_VU_PAGE; 21637 cdb.cdb_opaque[2] = 21638 (char)(page_control << 6) | 21639 un->un_start_stop_cycle_page; 21640 mutex_exit(SD_MUTEX(un)); 21641 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21642 status = sd_ssc_send( 21643 ssc, &ucmd_buf, FKIOCTL, 21644 UIO_SYSSPACE, path_flag); 21645 21646 break; 21647 case TEMPERATURE_PAGE: 21648 status = ENOTTY; 21649 break; 21650 default: 21651 break; 21652 } 21653 } 21654 break; 21655 default: 21656 break; 21657 } 21658 break; 21659 default: 21660 break; 21661 } 21662 21663 if (status == 0) { 21664 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21665 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21666 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21667 } 21668 21669 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21670 21671 return (status); 21672 } 21673 21674 21675 /* 21676 * Function: sdioctl 21677 * 21678 * Description: Driver's ioctl(9e) entry point function. 21679 * 21680 * Arguments: dev - device number 21681 * cmd - ioctl operation to be performed 21682 * arg - user argument, contains data to be set or reference 21683 * parameter for get 21684 * flag - bit flag, indicating open settings, 32/64 bit type 21685 * cred_p - user credential pointer 21686 * rval_p - calling process return value (OPT) 21687 * 21688 * Return Code: EINVAL 21689 * ENOTTY 21690 * ENXIO 21691 * EIO 21692 * EFAULT 21693 * ENOTSUP 21694 * EPERM 21695 * 21696 * Context: Called from the device switch at normal priority. 21697 */ 21698 21699 static int 21700 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21701 { 21702 struct sd_lun *un = NULL; 21703 int err = 0; 21704 int i = 0; 21705 cred_t *cr; 21706 int tmprval = EINVAL; 21707 boolean_t is_valid; 21708 sd_ssc_t *ssc; 21709 21710 /* 21711 * All device accesses go thru sdstrategy where we check on suspend 21712 * status 21713 */ 21714 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21715 return (ENXIO); 21716 } 21717 21718 ASSERT(!mutex_owned(SD_MUTEX(un))); 21719 21720 /* Initialize sd_ssc_t for internal uscsi commands */ 21721 ssc = sd_ssc_init(un); 21722 21723 is_valid = SD_IS_VALID_LABEL(un); 21724 21725 /* 21726 * Moved this wait from sd_uscsi_strategy to here for 21727 * reasons of deadlock prevention. Internal driver commands, 21728 * specifically those to change a devices power level, result 21729 * in a call to sd_uscsi_strategy. 21730 */ 21731 mutex_enter(SD_MUTEX(un)); 21732 while ((un->un_state == SD_STATE_SUSPENDED) || 21733 (un->un_state == SD_STATE_PM_CHANGING)) { 21734 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21735 } 21736 /* 21737 * Twiddling the counter here protects commands from now 21738 * through to the top of sd_uscsi_strategy. Without the 21739 * counter inc. a power down, for example, could get in 21740 * after the above check for state is made and before 21741 * execution gets to the top of sd_uscsi_strategy. 21742 * That would cause problems. 21743 */ 21744 un->un_ncmds_in_driver++; 21745 21746 if (!is_valid && 21747 (flag & (FNDELAY | FNONBLOCK))) { 21748 switch (cmd) { 21749 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21750 case DKIOCGVTOC: 21751 case DKIOCGEXTVTOC: 21752 case DKIOCGAPART: 21753 case DKIOCPARTINFO: 21754 case DKIOCEXTPARTINFO: 21755 case DKIOCSGEOM: 21756 case DKIOCSAPART: 21757 case DKIOCGETEFI: 21758 case DKIOCPARTITION: 21759 case DKIOCSVTOC: 21760 case DKIOCSEXTVTOC: 21761 case DKIOCSETEFI: 21762 case DKIOCGMBOOT: 21763 case DKIOCSMBOOT: 21764 case DKIOCG_PHYGEOM: 21765 case DKIOCG_VIRTGEOM: 21766 #if defined(__i386) || defined(__amd64) 21767 case DKIOCSETEXTPART: 21768 #endif 21769 /* let cmlb handle it */ 21770 goto skip_ready_valid; 21771 21772 case CDROMPAUSE: 21773 case CDROMRESUME: 21774 case CDROMPLAYMSF: 21775 case CDROMPLAYTRKIND: 21776 case CDROMREADTOCHDR: 21777 case CDROMREADTOCENTRY: 21778 case CDROMSTOP: 21779 case CDROMSTART: 21780 case CDROMVOLCTRL: 21781 case CDROMSUBCHNL: 21782 case CDROMREADMODE2: 21783 case CDROMREADMODE1: 21784 case CDROMREADOFFSET: 21785 case CDROMSBLKMODE: 21786 case CDROMGBLKMODE: 21787 case CDROMGDRVSPEED: 21788 case CDROMSDRVSPEED: 21789 case CDROMCDDA: 21790 case CDROMCDXA: 21791 case CDROMSUBCODE: 21792 if (!ISCD(un)) { 21793 un->un_ncmds_in_driver--; 21794 ASSERT(un->un_ncmds_in_driver >= 0); 21795 mutex_exit(SD_MUTEX(un)); 21796 err = ENOTTY; 21797 goto done_without_assess; 21798 } 21799 break; 21800 case FDEJECT: 21801 case DKIOCEJECT: 21802 case CDROMEJECT: 21803 if (!un->un_f_eject_media_supported) { 21804 un->un_ncmds_in_driver--; 21805 ASSERT(un->un_ncmds_in_driver >= 0); 21806 mutex_exit(SD_MUTEX(un)); 21807 err = ENOTTY; 21808 goto done_without_assess; 21809 } 21810 break; 21811 case DKIOCFLUSHWRITECACHE: 21812 mutex_exit(SD_MUTEX(un)); 21813 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21814 if (err != 0) { 21815 mutex_enter(SD_MUTEX(un)); 21816 un->un_ncmds_in_driver--; 21817 ASSERT(un->un_ncmds_in_driver >= 0); 21818 mutex_exit(SD_MUTEX(un)); 21819 err = EIO; 21820 goto done_quick_assess; 21821 } 21822 mutex_enter(SD_MUTEX(un)); 21823 /* FALLTHROUGH */ 21824 case DKIOCREMOVABLE: 21825 case DKIOCHOTPLUGGABLE: 21826 case DKIOCINFO: 21827 case DKIOCGMEDIAINFO: 21828 case DKIOCGMEDIAINFOEXT: 21829 case MHIOCENFAILFAST: 21830 case MHIOCSTATUS: 21831 case MHIOCTKOWN: 21832 case MHIOCRELEASE: 21833 case MHIOCGRP_INKEYS: 21834 case MHIOCGRP_INRESV: 21835 case MHIOCGRP_REGISTER: 21836 case MHIOCGRP_RESERVE: 21837 case MHIOCGRP_PREEMPTANDABORT: 21838 case MHIOCGRP_REGISTERANDIGNOREKEY: 21839 case CDROMCLOSETRAY: 21840 case USCSICMD: 21841 goto skip_ready_valid; 21842 default: 21843 break; 21844 } 21845 21846 mutex_exit(SD_MUTEX(un)); 21847 err = sd_ready_and_valid(ssc, SDPART(dev)); 21848 mutex_enter(SD_MUTEX(un)); 21849 21850 if (err != SD_READY_VALID) { 21851 switch (cmd) { 21852 case DKIOCSTATE: 21853 case CDROMGDRVSPEED: 21854 case CDROMSDRVSPEED: 21855 case FDEJECT: /* for eject command */ 21856 case DKIOCEJECT: 21857 case CDROMEJECT: 21858 case DKIOCREMOVABLE: 21859 case DKIOCHOTPLUGGABLE: 21860 break; 21861 default: 21862 if (un->un_f_has_removable_media) { 21863 err = ENXIO; 21864 } else { 21865 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21866 if (err == SD_RESERVED_BY_OTHERS) { 21867 err = EACCES; 21868 } else { 21869 err = EIO; 21870 } 21871 } 21872 un->un_ncmds_in_driver--; 21873 ASSERT(un->un_ncmds_in_driver >= 0); 21874 mutex_exit(SD_MUTEX(un)); 21875 21876 goto done_without_assess; 21877 } 21878 } 21879 } 21880 21881 skip_ready_valid: 21882 mutex_exit(SD_MUTEX(un)); 21883 21884 switch (cmd) { 21885 case DKIOCINFO: 21886 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21887 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21888 break; 21889 21890 case DKIOCGMEDIAINFO: 21891 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21892 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21893 break; 21894 21895 case DKIOCGMEDIAINFOEXT: 21896 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 21897 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 21898 break; 21899 21900 case DKIOCGGEOM: 21901 case DKIOCGVTOC: 21902 case DKIOCGEXTVTOC: 21903 case DKIOCGAPART: 21904 case DKIOCPARTINFO: 21905 case DKIOCEXTPARTINFO: 21906 case DKIOCSGEOM: 21907 case DKIOCSAPART: 21908 case DKIOCGETEFI: 21909 case DKIOCPARTITION: 21910 case DKIOCSVTOC: 21911 case DKIOCSEXTVTOC: 21912 case DKIOCSETEFI: 21913 case DKIOCGMBOOT: 21914 case DKIOCSMBOOT: 21915 case DKIOCG_PHYGEOM: 21916 case DKIOCG_VIRTGEOM: 21917 #if defined(__i386) || defined(__amd64) 21918 case DKIOCSETEXTPART: 21919 #endif 21920 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21921 21922 /* TUR should spin up */ 21923 21924 if (un->un_f_has_removable_media) 21925 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21926 SD_CHECK_FOR_MEDIA); 21927 21928 else 21929 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21930 21931 if (err != 0) 21932 goto done_with_assess; 21933 21934 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21935 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21936 21937 if ((err == 0) && 21938 ((cmd == DKIOCSETEFI) || 21939 (un->un_f_pkstats_enabled) && 21940 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 21941 cmd == DKIOCSEXTVTOC))) { 21942 21943 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21944 (void *)SD_PATH_DIRECT); 21945 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21946 sd_set_pstats(un); 21947 SD_TRACE(SD_LOG_IO_PARTITION, un, 21948 "sd_ioctl: un:0x%p pstats created and " 21949 "set\n", un); 21950 } 21951 } 21952 21953 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 21954 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21955 21956 mutex_enter(SD_MUTEX(un)); 21957 if (un->un_f_devid_supported && 21958 (un->un_f_opt_fab_devid == TRUE)) { 21959 if (un->un_devid == NULL) { 21960 sd_register_devid(ssc, SD_DEVINFO(un), 21961 SD_TARGET_IS_UNRESERVED); 21962 } else { 21963 /* 21964 * The device id for this disk 21965 * has been fabricated. The 21966 * device id must be preserved 21967 * by writing it back out to 21968 * disk. 21969 */ 21970 if (sd_write_deviceid(ssc) != 0) { 21971 ddi_devid_free(un->un_devid); 21972 un->un_devid = NULL; 21973 } 21974 } 21975 } 21976 mutex_exit(SD_MUTEX(un)); 21977 } 21978 21979 break; 21980 21981 case DKIOCLOCK: 21982 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21983 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21984 SD_PATH_STANDARD); 21985 goto done_with_assess; 21986 21987 case DKIOCUNLOCK: 21988 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21989 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21990 SD_PATH_STANDARD); 21991 goto done_with_assess; 21992 21993 case DKIOCSTATE: { 21994 enum dkio_state state; 21995 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21996 21997 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21998 err = EFAULT; 21999 } else { 22000 err = sd_check_media(dev, state); 22001 if (err == 0) { 22002 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22003 sizeof (int), flag) != 0) 22004 err = EFAULT; 22005 } 22006 } 22007 break; 22008 } 22009 22010 case DKIOCREMOVABLE: 22011 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22012 i = un->un_f_has_removable_media ? 1 : 0; 22013 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22014 err = EFAULT; 22015 } else { 22016 err = 0; 22017 } 22018 break; 22019 22020 case DKIOCHOTPLUGGABLE: 22021 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22022 i = un->un_f_is_hotpluggable ? 1 : 0; 22023 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22024 err = EFAULT; 22025 } else { 22026 err = 0; 22027 } 22028 break; 22029 22030 case DKIOCGTEMPERATURE: 22031 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22032 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22033 break; 22034 22035 case MHIOCENFAILFAST: 22036 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22037 if ((err = drv_priv(cred_p)) == 0) { 22038 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22039 } 22040 break; 22041 22042 case MHIOCTKOWN: 22043 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22044 if ((err = drv_priv(cred_p)) == 0) { 22045 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22046 } 22047 break; 22048 22049 case MHIOCRELEASE: 22050 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22051 if ((err = drv_priv(cred_p)) == 0) { 22052 err = sd_mhdioc_release(dev); 22053 } 22054 break; 22055 22056 case MHIOCSTATUS: 22057 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22058 if ((err = drv_priv(cred_p)) == 0) { 22059 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22060 case 0: 22061 err = 0; 22062 break; 22063 case EACCES: 22064 *rval_p = 1; 22065 err = 0; 22066 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22067 break; 22068 default: 22069 err = EIO; 22070 goto done_with_assess; 22071 } 22072 } 22073 break; 22074 22075 case MHIOCQRESERVE: 22076 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22077 if ((err = drv_priv(cred_p)) == 0) { 22078 err = sd_reserve_release(dev, SD_RESERVE); 22079 } 22080 break; 22081 22082 case MHIOCREREGISTERDEVID: 22083 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22084 if (drv_priv(cred_p) == EPERM) { 22085 err = EPERM; 22086 } else if (!un->un_f_devid_supported) { 22087 err = ENOTTY; 22088 } else { 22089 err = sd_mhdioc_register_devid(dev); 22090 } 22091 break; 22092 22093 case MHIOCGRP_INKEYS: 22094 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22095 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22096 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22097 err = ENOTSUP; 22098 } else { 22099 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22100 flag); 22101 } 22102 } 22103 break; 22104 22105 case MHIOCGRP_INRESV: 22106 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22107 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22108 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22109 err = ENOTSUP; 22110 } else { 22111 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22112 } 22113 } 22114 break; 22115 22116 case MHIOCGRP_REGISTER: 22117 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22118 if ((err = drv_priv(cred_p)) != EPERM) { 22119 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22120 err = ENOTSUP; 22121 } else if (arg != NULL) { 22122 mhioc_register_t reg; 22123 if (ddi_copyin((void *)arg, ®, 22124 sizeof (mhioc_register_t), flag) != 0) { 22125 err = EFAULT; 22126 } else { 22127 err = 22128 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22129 ssc, SD_SCSI3_REGISTER, 22130 (uchar_t *)®); 22131 if (err != 0) 22132 goto done_with_assess; 22133 } 22134 } 22135 } 22136 break; 22137 22138 case MHIOCGRP_RESERVE: 22139 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22140 if ((err = drv_priv(cred_p)) != EPERM) { 22141 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22142 err = ENOTSUP; 22143 } else if (arg != NULL) { 22144 mhioc_resv_desc_t resv_desc; 22145 if (ddi_copyin((void *)arg, &resv_desc, 22146 sizeof (mhioc_resv_desc_t), flag) != 0) { 22147 err = EFAULT; 22148 } else { 22149 err = 22150 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22151 ssc, SD_SCSI3_RESERVE, 22152 (uchar_t *)&resv_desc); 22153 if (err != 0) 22154 goto done_with_assess; 22155 } 22156 } 22157 } 22158 break; 22159 22160 case MHIOCGRP_PREEMPTANDABORT: 22161 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22162 if ((err = drv_priv(cred_p)) != EPERM) { 22163 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22164 err = ENOTSUP; 22165 } else if (arg != NULL) { 22166 mhioc_preemptandabort_t preempt_abort; 22167 if (ddi_copyin((void *)arg, &preempt_abort, 22168 sizeof (mhioc_preemptandabort_t), 22169 flag) != 0) { 22170 err = EFAULT; 22171 } else { 22172 err = 22173 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22174 ssc, SD_SCSI3_PREEMPTANDABORT, 22175 (uchar_t *)&preempt_abort); 22176 if (err != 0) 22177 goto done_with_assess; 22178 } 22179 } 22180 } 22181 break; 22182 22183 case MHIOCGRP_REGISTERANDIGNOREKEY: 22184 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22185 if ((err = drv_priv(cred_p)) != EPERM) { 22186 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22187 err = ENOTSUP; 22188 } else if (arg != NULL) { 22189 mhioc_registerandignorekey_t r_and_i; 22190 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22191 sizeof (mhioc_registerandignorekey_t), 22192 flag) != 0) { 22193 err = EFAULT; 22194 } else { 22195 err = 22196 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22197 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22198 (uchar_t *)&r_and_i); 22199 if (err != 0) 22200 goto done_with_assess; 22201 } 22202 } 22203 } 22204 break; 22205 22206 case USCSICMD: 22207 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22208 cr = ddi_get_cred(); 22209 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22210 err = EPERM; 22211 } else { 22212 enum uio_seg uioseg; 22213 22214 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22215 UIO_USERSPACE; 22216 if (un->un_f_format_in_progress == TRUE) { 22217 err = EAGAIN; 22218 break; 22219 } 22220 22221 err = sd_ssc_send(ssc, 22222 (struct uscsi_cmd *)arg, 22223 flag, uioseg, SD_PATH_STANDARD); 22224 if (err != 0) 22225 goto done_with_assess; 22226 else 22227 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22228 } 22229 break; 22230 22231 case CDROMPAUSE: 22232 case CDROMRESUME: 22233 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22234 if (!ISCD(un)) { 22235 err = ENOTTY; 22236 } else { 22237 err = sr_pause_resume(dev, cmd); 22238 } 22239 break; 22240 22241 case CDROMPLAYMSF: 22242 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22243 if (!ISCD(un)) { 22244 err = ENOTTY; 22245 } else { 22246 err = sr_play_msf(dev, (caddr_t)arg, flag); 22247 } 22248 break; 22249 22250 case CDROMPLAYTRKIND: 22251 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22252 #if defined(__i386) || defined(__amd64) 22253 /* 22254 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22255 */ 22256 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22257 #else 22258 if (!ISCD(un)) { 22259 #endif 22260 err = ENOTTY; 22261 } else { 22262 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22263 } 22264 break; 22265 22266 case CDROMREADTOCHDR: 22267 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22268 if (!ISCD(un)) { 22269 err = ENOTTY; 22270 } else { 22271 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22272 } 22273 break; 22274 22275 case CDROMREADTOCENTRY: 22276 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22277 if (!ISCD(un)) { 22278 err = ENOTTY; 22279 } else { 22280 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22281 } 22282 break; 22283 22284 case CDROMSTOP: 22285 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22286 if (!ISCD(un)) { 22287 err = ENOTTY; 22288 } else { 22289 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 22290 SD_PATH_STANDARD); 22291 goto done_with_assess; 22292 } 22293 break; 22294 22295 case CDROMSTART: 22296 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22297 if (!ISCD(un)) { 22298 err = ENOTTY; 22299 } else { 22300 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 22301 SD_PATH_STANDARD); 22302 goto done_with_assess; 22303 } 22304 break; 22305 22306 case CDROMCLOSETRAY: 22307 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22308 if (!ISCD(un)) { 22309 err = ENOTTY; 22310 } else { 22311 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 22312 SD_PATH_STANDARD); 22313 goto done_with_assess; 22314 } 22315 break; 22316 22317 case FDEJECT: /* for eject command */ 22318 case DKIOCEJECT: 22319 case CDROMEJECT: 22320 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22321 if (!un->un_f_eject_media_supported) { 22322 err = ENOTTY; 22323 } else { 22324 err = sr_eject(dev); 22325 } 22326 break; 22327 22328 case CDROMVOLCTRL: 22329 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22330 if (!ISCD(un)) { 22331 err = ENOTTY; 22332 } else { 22333 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22334 } 22335 break; 22336 22337 case CDROMSUBCHNL: 22338 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22339 if (!ISCD(un)) { 22340 err = ENOTTY; 22341 } else { 22342 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22343 } 22344 break; 22345 22346 case CDROMREADMODE2: 22347 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22348 if (!ISCD(un)) { 22349 err = ENOTTY; 22350 } else if (un->un_f_cfg_is_atapi == TRUE) { 22351 /* 22352 * If the drive supports READ CD, use that instead of 22353 * switching the LBA size via a MODE SELECT 22354 * Block Descriptor 22355 */ 22356 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22357 } else { 22358 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22359 } 22360 break; 22361 22362 case CDROMREADMODE1: 22363 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22364 if (!ISCD(un)) { 22365 err = ENOTTY; 22366 } else { 22367 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22368 } 22369 break; 22370 22371 case CDROMREADOFFSET: 22372 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22373 if (!ISCD(un)) { 22374 err = ENOTTY; 22375 } else { 22376 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22377 flag); 22378 } 22379 break; 22380 22381 case CDROMSBLKMODE: 22382 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22383 /* 22384 * There is no means of changing block size in case of atapi 22385 * drives, thus return ENOTTY if drive type is atapi 22386 */ 22387 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22388 err = ENOTTY; 22389 } else if (un->un_f_mmc_cap == TRUE) { 22390 22391 /* 22392 * MMC Devices do not support changing the 22393 * logical block size 22394 * 22395 * Note: EINVAL is being returned instead of ENOTTY to 22396 * maintain consistancy with the original mmc 22397 * driver update. 22398 */ 22399 err = EINVAL; 22400 } else { 22401 mutex_enter(SD_MUTEX(un)); 22402 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22403 (un->un_ncmds_in_transport > 0)) { 22404 mutex_exit(SD_MUTEX(un)); 22405 err = EINVAL; 22406 } else { 22407 mutex_exit(SD_MUTEX(un)); 22408 err = sr_change_blkmode(dev, cmd, arg, flag); 22409 } 22410 } 22411 break; 22412 22413 case CDROMGBLKMODE: 22414 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22415 if (!ISCD(un)) { 22416 err = ENOTTY; 22417 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22418 (un->un_f_blockcount_is_valid != FALSE)) { 22419 /* 22420 * Drive is an ATAPI drive so return target block 22421 * size for ATAPI drives since we cannot change the 22422 * blocksize on ATAPI drives. Used primarily to detect 22423 * if an ATAPI cdrom is present. 22424 */ 22425 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22426 sizeof (int), flag) != 0) { 22427 err = EFAULT; 22428 } else { 22429 err = 0; 22430 } 22431 22432 } else { 22433 /* 22434 * Drive supports changing block sizes via a Mode 22435 * Select. 22436 */ 22437 err = sr_change_blkmode(dev, cmd, arg, flag); 22438 } 22439 break; 22440 22441 case CDROMGDRVSPEED: 22442 case CDROMSDRVSPEED: 22443 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22444 if (!ISCD(un)) { 22445 err = ENOTTY; 22446 } else if (un->un_f_mmc_cap == TRUE) { 22447 /* 22448 * Note: In the future the driver implementation 22449 * for getting and 22450 * setting cd speed should entail: 22451 * 1) If non-mmc try the Toshiba mode page 22452 * (sr_change_speed) 22453 * 2) If mmc but no support for Real Time Streaming try 22454 * the SET CD SPEED (0xBB) command 22455 * (sr_atapi_change_speed) 22456 * 3) If mmc and support for Real Time Streaming 22457 * try the GET PERFORMANCE and SET STREAMING 22458 * commands (not yet implemented, 4380808) 22459 */ 22460 /* 22461 * As per recent MMC spec, CD-ROM speed is variable 22462 * and changes with LBA. Since there is no such 22463 * things as drive speed now, fail this ioctl. 22464 * 22465 * Note: EINVAL is returned for consistancy of original 22466 * implementation which included support for getting 22467 * the drive speed of mmc devices but not setting 22468 * the drive speed. Thus EINVAL would be returned 22469 * if a set request was made for an mmc device. 22470 * We no longer support get or set speed for 22471 * mmc but need to remain consistent with regard 22472 * to the error code returned. 22473 */ 22474 err = EINVAL; 22475 } else if (un->un_f_cfg_is_atapi == TRUE) { 22476 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22477 } else { 22478 err = sr_change_speed(dev, cmd, arg, flag); 22479 } 22480 break; 22481 22482 case CDROMCDDA: 22483 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22484 if (!ISCD(un)) { 22485 err = ENOTTY; 22486 } else { 22487 err = sr_read_cdda(dev, (void *)arg, flag); 22488 } 22489 break; 22490 22491 case CDROMCDXA: 22492 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22493 if (!ISCD(un)) { 22494 err = ENOTTY; 22495 } else { 22496 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22497 } 22498 break; 22499 22500 case CDROMSUBCODE: 22501 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22502 if (!ISCD(un)) { 22503 err = ENOTTY; 22504 } else { 22505 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22506 } 22507 break; 22508 22509 22510 #ifdef SDDEBUG 22511 /* RESET/ABORTS testing ioctls */ 22512 case DKIOCRESET: { 22513 int reset_level; 22514 22515 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22516 err = EFAULT; 22517 } else { 22518 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22519 "reset_level = 0x%lx\n", reset_level); 22520 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22521 err = 0; 22522 } else { 22523 err = EIO; 22524 } 22525 } 22526 break; 22527 } 22528 22529 case DKIOCABORT: 22530 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22531 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22532 err = 0; 22533 } else { 22534 err = EIO; 22535 } 22536 break; 22537 #endif 22538 22539 #ifdef SD_FAULT_INJECTION 22540 /* SDIOC FaultInjection testing ioctls */ 22541 case SDIOCSTART: 22542 case SDIOCSTOP: 22543 case SDIOCINSERTPKT: 22544 case SDIOCINSERTXB: 22545 case SDIOCINSERTUN: 22546 case SDIOCINSERTARQ: 22547 case SDIOCPUSH: 22548 case SDIOCRETRIEVE: 22549 case SDIOCRUN: 22550 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22551 "SDIOC detected cmd:0x%X:\n", cmd); 22552 /* call error generator */ 22553 sd_faultinjection_ioctl(cmd, arg, un); 22554 err = 0; 22555 break; 22556 22557 #endif /* SD_FAULT_INJECTION */ 22558 22559 case DKIOCFLUSHWRITECACHE: 22560 { 22561 struct dk_callback *dkc = (struct dk_callback *)arg; 22562 22563 mutex_enter(SD_MUTEX(un)); 22564 if (!un->un_f_sync_cache_supported || 22565 !un->un_f_write_cache_enabled) { 22566 err = un->un_f_sync_cache_supported ? 22567 0 : ENOTSUP; 22568 mutex_exit(SD_MUTEX(un)); 22569 if ((flag & FKIOCTL) && dkc != NULL && 22570 dkc->dkc_callback != NULL) { 22571 (*dkc->dkc_callback)(dkc->dkc_cookie, 22572 err); 22573 /* 22574 * Did callback and reported error. 22575 * Since we did a callback, ioctl 22576 * should return 0. 22577 */ 22578 err = 0; 22579 } 22580 break; 22581 } 22582 mutex_exit(SD_MUTEX(un)); 22583 22584 if ((flag & FKIOCTL) && dkc != NULL && 22585 dkc->dkc_callback != NULL) { 22586 /* async SYNC CACHE request */ 22587 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22588 } else { 22589 /* synchronous SYNC CACHE request */ 22590 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22591 } 22592 } 22593 break; 22594 22595 case DKIOCGETWCE: { 22596 22597 int wce; 22598 22599 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22600 break; 22601 } 22602 22603 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22604 err = EFAULT; 22605 } 22606 break; 22607 } 22608 22609 case DKIOCSETWCE: { 22610 22611 int wce, sync_supported; 22612 22613 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22614 err = EFAULT; 22615 break; 22616 } 22617 22618 /* 22619 * Synchronize multiple threads trying to enable 22620 * or disable the cache via the un_f_wcc_cv 22621 * condition variable. 22622 */ 22623 mutex_enter(SD_MUTEX(un)); 22624 22625 /* 22626 * Don't allow the cache to be enabled if the 22627 * config file has it disabled. 22628 */ 22629 if (un->un_f_opt_disable_cache && wce) { 22630 mutex_exit(SD_MUTEX(un)); 22631 err = EINVAL; 22632 break; 22633 } 22634 22635 /* 22636 * Wait for write cache change in progress 22637 * bit to be clear before proceeding. 22638 */ 22639 while (un->un_f_wcc_inprog) 22640 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22641 22642 un->un_f_wcc_inprog = 1; 22643 22644 if (un->un_f_write_cache_enabled && wce == 0) { 22645 /* 22646 * Disable the write cache. Don't clear 22647 * un_f_write_cache_enabled until after 22648 * the mode select and flush are complete. 22649 */ 22650 sync_supported = un->un_f_sync_cache_supported; 22651 22652 /* 22653 * If cache flush is suppressed, we assume that the 22654 * controller firmware will take care of managing the 22655 * write cache for us: no need to explicitly 22656 * disable it. 22657 */ 22658 if (!un->un_f_suppress_cache_flush) { 22659 mutex_exit(SD_MUTEX(un)); 22660 if ((err = sd_cache_control(ssc, 22661 SD_CACHE_NOCHANGE, 22662 SD_CACHE_DISABLE)) == 0 && 22663 sync_supported) { 22664 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22665 NULL); 22666 } 22667 } else { 22668 mutex_exit(SD_MUTEX(un)); 22669 } 22670 22671 mutex_enter(SD_MUTEX(un)); 22672 if (err == 0) { 22673 un->un_f_write_cache_enabled = 0; 22674 } 22675 22676 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22677 /* 22678 * Set un_f_write_cache_enabled first, so there is 22679 * no window where the cache is enabled, but the 22680 * bit says it isn't. 22681 */ 22682 un->un_f_write_cache_enabled = 1; 22683 22684 /* 22685 * If cache flush is suppressed, we assume that the 22686 * controller firmware will take care of managing the 22687 * write cache for us: no need to explicitly 22688 * enable it. 22689 */ 22690 if (!un->un_f_suppress_cache_flush) { 22691 mutex_exit(SD_MUTEX(un)); 22692 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22693 SD_CACHE_ENABLE); 22694 } else { 22695 mutex_exit(SD_MUTEX(un)); 22696 } 22697 22698 mutex_enter(SD_MUTEX(un)); 22699 22700 if (err) { 22701 un->un_f_write_cache_enabled = 0; 22702 } 22703 } 22704 22705 un->un_f_wcc_inprog = 0; 22706 cv_broadcast(&un->un_wcc_cv); 22707 mutex_exit(SD_MUTEX(un)); 22708 break; 22709 } 22710 22711 default: 22712 err = ENOTTY; 22713 break; 22714 } 22715 mutex_enter(SD_MUTEX(un)); 22716 un->un_ncmds_in_driver--; 22717 ASSERT(un->un_ncmds_in_driver >= 0); 22718 mutex_exit(SD_MUTEX(un)); 22719 22720 22721 done_without_assess: 22722 sd_ssc_fini(ssc); 22723 22724 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22725 return (err); 22726 22727 done_with_assess: 22728 mutex_enter(SD_MUTEX(un)); 22729 un->un_ncmds_in_driver--; 22730 ASSERT(un->un_ncmds_in_driver >= 0); 22731 mutex_exit(SD_MUTEX(un)); 22732 22733 done_quick_assess: 22734 if (err != 0) 22735 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22736 /* Uninitialize sd_ssc_t pointer */ 22737 sd_ssc_fini(ssc); 22738 22739 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22740 return (err); 22741 } 22742 22743 22744 /* 22745 * Function: sd_dkio_ctrl_info 22746 * 22747 * Description: This routine is the driver entry point for handling controller 22748 * information ioctl requests (DKIOCINFO). 22749 * 22750 * Arguments: dev - the device number 22751 * arg - pointer to user provided dk_cinfo structure 22752 * specifying the controller type and attributes. 22753 * flag - this argument is a pass through to ddi_copyxxx() 22754 * directly from the mode argument of ioctl(). 22755 * 22756 * Return Code: 0 22757 * EFAULT 22758 * ENXIO 22759 */ 22760 22761 static int 22762 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22763 { 22764 struct sd_lun *un = NULL; 22765 struct dk_cinfo *info; 22766 dev_info_t *pdip; 22767 int lun, tgt; 22768 22769 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22770 return (ENXIO); 22771 } 22772 22773 info = (struct dk_cinfo *) 22774 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22775 22776 switch (un->un_ctype) { 22777 case CTYPE_CDROM: 22778 info->dki_ctype = DKC_CDROM; 22779 break; 22780 default: 22781 info->dki_ctype = DKC_SCSI_CCS; 22782 break; 22783 } 22784 pdip = ddi_get_parent(SD_DEVINFO(un)); 22785 info->dki_cnum = ddi_get_instance(pdip); 22786 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22787 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22788 } else { 22789 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22790 DK_DEVLEN - 1); 22791 } 22792 22793 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22794 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22795 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22796 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22797 22798 /* Unit Information */ 22799 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22800 info->dki_slave = ((tgt << 3) | lun); 22801 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22802 DK_DEVLEN - 1); 22803 info->dki_flags = DKI_FMTVOL; 22804 info->dki_partition = SDPART(dev); 22805 22806 /* Max Transfer size of this device in blocks */ 22807 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22808 info->dki_addr = 0; 22809 info->dki_space = 0; 22810 info->dki_prio = 0; 22811 info->dki_vec = 0; 22812 22813 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22814 kmem_free(info, sizeof (struct dk_cinfo)); 22815 return (EFAULT); 22816 } else { 22817 kmem_free(info, sizeof (struct dk_cinfo)); 22818 return (0); 22819 } 22820 } 22821 22822 22823 /* 22824 * Function: sd_get_media_info 22825 * 22826 * Description: This routine is the driver entry point for handling ioctl 22827 * requests for the media type or command set profile used by the 22828 * drive to operate on the media (DKIOCGMEDIAINFO). 22829 * 22830 * Arguments: dev - the device number 22831 * arg - pointer to user provided dk_minfo structure 22832 * specifying the media type, logical block size and 22833 * drive capacity. 22834 * flag - this argument is a pass through to ddi_copyxxx() 22835 * directly from the mode argument of ioctl(). 22836 * 22837 * Return Code: 0 22838 * EACCESS 22839 * EFAULT 22840 * ENXIO 22841 * EIO 22842 */ 22843 22844 static int 22845 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22846 { 22847 struct sd_lun *un = NULL; 22848 struct uscsi_cmd com; 22849 struct scsi_inquiry *sinq; 22850 struct dk_minfo media_info; 22851 u_longlong_t media_capacity; 22852 uint64_t capacity; 22853 uint_t lbasize; 22854 uchar_t *out_data; 22855 uchar_t *rqbuf; 22856 int rval = 0; 22857 int rtn; 22858 sd_ssc_t *ssc; 22859 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22860 (un->un_state == SD_STATE_OFFLINE)) { 22861 return (ENXIO); 22862 } 22863 22864 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22865 22866 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22867 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22868 22869 /* Issue a TUR to determine if the drive is ready with media present */ 22870 ssc = sd_ssc_init(un); 22871 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22872 if (rval == ENXIO) { 22873 goto done; 22874 } else if (rval != 0) { 22875 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22876 } 22877 22878 /* Now get configuration data */ 22879 if (ISCD(un)) { 22880 media_info.dki_media_type = DK_CDROM; 22881 22882 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22883 if (un->un_f_mmc_cap == TRUE) { 22884 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22885 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22886 SD_PATH_STANDARD); 22887 22888 if (rtn) { 22889 /* 22890 * We ignore all failures for CD and need to 22891 * put the assessment before processing code 22892 * to avoid missing assessment for FMA. 22893 */ 22894 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22895 /* 22896 * Failed for other than an illegal request 22897 * or command not supported 22898 */ 22899 if ((com.uscsi_status == STATUS_CHECK) && 22900 (com.uscsi_rqstatus == STATUS_GOOD)) { 22901 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22902 (rqbuf[12] != 0x20)) { 22903 rval = EIO; 22904 goto no_assessment; 22905 } 22906 } 22907 } else { 22908 /* 22909 * The GET CONFIGURATION command succeeded 22910 * so set the media type according to the 22911 * returned data 22912 */ 22913 media_info.dki_media_type = out_data[6]; 22914 media_info.dki_media_type <<= 8; 22915 media_info.dki_media_type |= out_data[7]; 22916 } 22917 } 22918 } else { 22919 /* 22920 * The profile list is not available, so we attempt to identify 22921 * the media type based on the inquiry data 22922 */ 22923 sinq = un->un_sd->sd_inq; 22924 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22925 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22926 /* This is a direct access device or optical disk */ 22927 media_info.dki_media_type = DK_FIXED_DISK; 22928 22929 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22930 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22931 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22932 media_info.dki_media_type = DK_ZIP; 22933 } else if ( 22934 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22935 media_info.dki_media_type = DK_JAZ; 22936 } 22937 } 22938 } else { 22939 /* 22940 * Not a CD, direct access or optical disk so return 22941 * unknown media 22942 */ 22943 media_info.dki_media_type = DK_UNKNOWN; 22944 } 22945 } 22946 22947 /* Now read the capacity so we can provide the lbasize and capacity */ 22948 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22949 SD_PATH_DIRECT); 22950 switch (rval) { 22951 case 0: 22952 break; 22953 case EACCES: 22954 rval = EACCES; 22955 goto done; 22956 default: 22957 rval = EIO; 22958 goto done; 22959 } 22960 22961 /* 22962 * If lun is expanded dynamically, update the un structure. 22963 */ 22964 mutex_enter(SD_MUTEX(un)); 22965 if ((un->un_f_blockcount_is_valid == TRUE) && 22966 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22967 (capacity > un->un_blockcount)) { 22968 sd_update_block_info(un, lbasize, capacity); 22969 } 22970 mutex_exit(SD_MUTEX(un)); 22971 22972 media_info.dki_lbsize = lbasize; 22973 media_capacity = capacity; 22974 22975 /* 22976 * sd_send_scsi_READ_CAPACITY() reports capacity in 22977 * un->un_sys_blocksize chunks. So we need to convert it into 22978 * cap.lbasize chunks. 22979 */ 22980 media_capacity *= un->un_sys_blocksize; 22981 media_capacity /= lbasize; 22982 media_info.dki_capacity = media_capacity; 22983 22984 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22985 rval = EFAULT; 22986 /* Put goto. Anybody might add some code below in future */ 22987 goto no_assessment; 22988 } 22989 done: 22990 if (rval != 0) { 22991 if (rval == EIO) 22992 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22993 else 22994 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22995 } 22996 no_assessment: 22997 sd_ssc_fini(ssc); 22998 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22999 kmem_free(rqbuf, SENSE_LENGTH); 23000 return (rval); 23001 } 23002 23003 /* 23004 * Function: sd_get_media_info_ext 23005 * 23006 * Description: This routine is the driver entry point for handling ioctl 23007 * requests for the media type or command set profile used by the 23008 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23009 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23010 * of this ioctl contains both logical block size and physical 23011 * block size. 23012 * 23013 * 23014 * Arguments: dev - the device number 23015 * arg - pointer to user provided dk_minfo_ext structure 23016 * specifying the media type, logical block size, 23017 * physical block size and disk capacity. 23018 * flag - this argument is a pass through to ddi_copyxxx() 23019 * directly from the mode argument of ioctl(). 23020 * 23021 * Return Code: 0 23022 * EACCESS 23023 * EFAULT 23024 * ENXIO 23025 * EIO 23026 */ 23027 23028 static int 23029 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23030 { 23031 struct sd_lun *un = NULL; 23032 struct uscsi_cmd com; 23033 struct scsi_inquiry *sinq; 23034 struct dk_minfo_ext media_info_ext; 23035 u_longlong_t media_capacity; 23036 uint64_t capacity; 23037 uint_t lbasize; 23038 uint_t pbsize; 23039 uchar_t *out_data; 23040 uchar_t *rqbuf; 23041 int rval = 0; 23042 int rtn; 23043 sd_ssc_t *ssc; 23044 23045 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23046 (un->un_state == SD_STATE_OFFLINE)) { 23047 return (ENXIO); 23048 } 23049 23050 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_ext: entry\n"); 23051 23052 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23053 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23054 ssc = sd_ssc_init(un); 23055 23056 /* Issue a TUR to determine if the drive is ready with media present */ 23057 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23058 if (rval == ENXIO) { 23059 goto done; 23060 } else if (rval != 0) { 23061 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23062 } 23063 23064 /* Now get configuration data */ 23065 if (ISCD(un)) { 23066 media_info_ext.dki_media_type = DK_CDROM; 23067 23068 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23069 if (un->un_f_mmc_cap == TRUE) { 23070 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23071 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23072 SD_PATH_STANDARD); 23073 23074 if (rtn) { 23075 /* 23076 * We ignore all failures for CD and need to 23077 * put the assessment before processing code 23078 * to avoid missing assessment for FMA. 23079 */ 23080 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23081 /* 23082 * Failed for other than an illegal request 23083 * or command not supported 23084 */ 23085 if ((com.uscsi_status == STATUS_CHECK) && 23086 (com.uscsi_rqstatus == STATUS_GOOD)) { 23087 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23088 (rqbuf[12] != 0x20)) { 23089 rval = EIO; 23090 goto no_assessment; 23091 } 23092 } 23093 } else { 23094 /* 23095 * The GET CONFIGURATION command succeeded 23096 * so set the media type according to the 23097 * returned data 23098 */ 23099 media_info_ext.dki_media_type = out_data[6]; 23100 media_info_ext.dki_media_type <<= 8; 23101 media_info_ext.dki_media_type |= out_data[7]; 23102 } 23103 } 23104 } else { 23105 /* 23106 * The profile list is not available, so we attempt to identify 23107 * the media type based on the inquiry data 23108 */ 23109 sinq = un->un_sd->sd_inq; 23110 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23111 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23112 /* This is a direct access device or optical disk */ 23113 media_info_ext.dki_media_type = DK_FIXED_DISK; 23114 23115 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23116 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23117 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23118 media_info_ext.dki_media_type = DK_ZIP; 23119 } else if ( 23120 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23121 media_info_ext.dki_media_type = DK_JAZ; 23122 } 23123 } 23124 } else { 23125 /* 23126 * Not a CD, direct access or optical disk so return 23127 * unknown media 23128 */ 23129 media_info_ext.dki_media_type = DK_UNKNOWN; 23130 } 23131 } 23132 23133 /* 23134 * Now read the capacity so we can provide the lbasize, 23135 * pbsize and capacity. 23136 */ 23137 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, &pbsize, 23138 SD_PATH_DIRECT); 23139 23140 if (rval != 0) { 23141 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23142 SD_PATH_DIRECT); 23143 23144 switch (rval) { 23145 case 0: 23146 pbsize = lbasize; 23147 media_capacity = capacity; 23148 /* 23149 * sd_send_scsi_READ_CAPACITY() reports capacity in 23150 * un->un_sys_blocksize chunks. So we need to convert 23151 * it into cap.lbsize chunks. 23152 */ 23153 if (un->un_f_has_removable_media) { 23154 media_capacity *= un->un_sys_blocksize; 23155 media_capacity /= lbasize; 23156 } 23157 break; 23158 case EACCES: 23159 rval = EACCES; 23160 goto done; 23161 default: 23162 rval = EIO; 23163 goto done; 23164 } 23165 } else { 23166 media_capacity = capacity; 23167 } 23168 23169 /* 23170 * If lun is expanded dynamically, update the un structure. 23171 */ 23172 mutex_enter(SD_MUTEX(un)); 23173 if ((un->un_f_blockcount_is_valid == TRUE) && 23174 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23175 (capacity > un->un_blockcount)) { 23176 sd_update_block_info(un, lbasize, capacity); 23177 } 23178 mutex_exit(SD_MUTEX(un)); 23179 23180 media_info_ext.dki_lbsize = lbasize; 23181 media_info_ext.dki_capacity = media_capacity; 23182 media_info_ext.dki_pbsize = pbsize; 23183 23184 if (ddi_copyout(&media_info_ext, arg, sizeof (struct dk_minfo_ext), 23185 flag)) { 23186 rval = EFAULT; 23187 goto no_assessment; 23188 } 23189 done: 23190 if (rval != 0) { 23191 if (rval == EIO) 23192 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23193 else 23194 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23195 } 23196 no_assessment: 23197 sd_ssc_fini(ssc); 23198 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23199 kmem_free(rqbuf, SENSE_LENGTH); 23200 return (rval); 23201 } 23202 23203 /* 23204 * Function: sd_check_media 23205 * 23206 * Description: This utility routine implements the functionality for the 23207 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23208 * driver state changes from that specified by the user 23209 * (inserted or ejected). For example, if the user specifies 23210 * DKIO_EJECTED and the current media state is inserted this 23211 * routine will immediately return DKIO_INSERTED. However, if the 23212 * current media state is not inserted the user thread will be 23213 * blocked until the drive state changes. If DKIO_NONE is specified 23214 * the user thread will block until a drive state change occurs. 23215 * 23216 * Arguments: dev - the device number 23217 * state - user pointer to a dkio_state, updated with the current 23218 * drive state at return. 23219 * 23220 * Return Code: ENXIO 23221 * EIO 23222 * EAGAIN 23223 * EINTR 23224 */ 23225 23226 static int 23227 sd_check_media(dev_t dev, enum dkio_state state) 23228 { 23229 struct sd_lun *un = NULL; 23230 enum dkio_state prev_state; 23231 opaque_t token = NULL; 23232 int rval = 0; 23233 sd_ssc_t *ssc; 23234 dev_t sub_dev; 23235 23236 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23237 return (ENXIO); 23238 } 23239 23240 /* 23241 * sub_dev is used when submitting request to scsi watch. 23242 * All submissions are unified to use same device number. 23243 */ 23244 sub_dev = sd_make_device(SD_DEVINFO(un)); 23245 23246 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23247 23248 ssc = sd_ssc_init(un); 23249 23250 mutex_enter(SD_MUTEX(un)); 23251 23252 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23253 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23254 23255 prev_state = un->un_mediastate; 23256 23257 /* is there anything to do? */ 23258 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23259 /* 23260 * submit the request to the scsi_watch service; 23261 * scsi_media_watch_cb() does the real work 23262 */ 23263 mutex_exit(SD_MUTEX(un)); 23264 23265 /* 23266 * This change handles the case where a scsi watch request is 23267 * added to a device that is powered down. To accomplish this 23268 * we power up the device before adding the scsi watch request, 23269 * since the scsi watch sends a TUR directly to the device 23270 * which the device cannot handle if it is powered down. 23271 */ 23272 if (sd_pm_entry(un) != DDI_SUCCESS) { 23273 mutex_enter(SD_MUTEX(un)); 23274 goto done; 23275 } 23276 23277 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23278 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23279 (caddr_t)sub_dev); 23280 23281 sd_pm_exit(un); 23282 23283 mutex_enter(SD_MUTEX(un)); 23284 if (token == NULL) { 23285 rval = EAGAIN; 23286 goto done; 23287 } 23288 23289 /* 23290 * This is a special case IOCTL that doesn't return 23291 * until the media state changes. Routine sdpower 23292 * knows about and handles this so don't count it 23293 * as an active cmd in the driver, which would 23294 * keep the device busy to the pm framework. 23295 * If the count isn't decremented the device can't 23296 * be powered down. 23297 */ 23298 un->un_ncmds_in_driver--; 23299 ASSERT(un->un_ncmds_in_driver >= 0); 23300 23301 /* 23302 * if a prior request had been made, this will be the same 23303 * token, as scsi_watch was designed that way. 23304 */ 23305 un->un_swr_token = token; 23306 un->un_specified_mediastate = state; 23307 23308 /* 23309 * now wait for media change 23310 * we will not be signalled unless mediastate == state but it is 23311 * still better to test for this condition, since there is a 23312 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23313 */ 23314 SD_TRACE(SD_LOG_COMMON, un, 23315 "sd_check_media: waiting for media state change\n"); 23316 while (un->un_mediastate == state) { 23317 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23318 SD_TRACE(SD_LOG_COMMON, un, 23319 "sd_check_media: waiting for media state " 23320 "was interrupted\n"); 23321 un->un_ncmds_in_driver++; 23322 rval = EINTR; 23323 goto done; 23324 } 23325 SD_TRACE(SD_LOG_COMMON, un, 23326 "sd_check_media: received signal, state=%x\n", 23327 un->un_mediastate); 23328 } 23329 /* 23330 * Inc the counter to indicate the device once again 23331 * has an active outstanding cmd. 23332 */ 23333 un->un_ncmds_in_driver++; 23334 } 23335 23336 /* invalidate geometry */ 23337 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23338 sr_ejected(un); 23339 } 23340 23341 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23342 uint64_t capacity; 23343 uint_t lbasize; 23344 23345 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23346 mutex_exit(SD_MUTEX(un)); 23347 /* 23348 * Since the following routines use SD_PATH_DIRECT, we must 23349 * call PM directly before the upcoming disk accesses. This 23350 * may cause the disk to be power/spin up. 23351 */ 23352 23353 if (sd_pm_entry(un) == DDI_SUCCESS) { 23354 rval = sd_send_scsi_READ_CAPACITY(ssc, 23355 &capacity, &lbasize, SD_PATH_DIRECT); 23356 if (rval != 0) { 23357 sd_pm_exit(un); 23358 if (rval == EIO) 23359 sd_ssc_assessment(ssc, 23360 SD_FMT_STATUS_CHECK); 23361 else 23362 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23363 mutex_enter(SD_MUTEX(un)); 23364 goto done; 23365 } 23366 } else { 23367 rval = EIO; 23368 mutex_enter(SD_MUTEX(un)); 23369 goto done; 23370 } 23371 mutex_enter(SD_MUTEX(un)); 23372 23373 sd_update_block_info(un, lbasize, capacity); 23374 23375 /* 23376 * Check if the media in the device is writable or not 23377 */ 23378 if (ISCD(un)) { 23379 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23380 } 23381 23382 mutex_exit(SD_MUTEX(un)); 23383 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23384 if ((cmlb_validate(un->un_cmlbhandle, 0, 23385 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23386 sd_set_pstats(un); 23387 SD_TRACE(SD_LOG_IO_PARTITION, un, 23388 "sd_check_media: un:0x%p pstats created and " 23389 "set\n", un); 23390 } 23391 23392 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23393 SD_PATH_DIRECT); 23394 23395 sd_pm_exit(un); 23396 23397 if (rval != 0) { 23398 if (rval == EIO) 23399 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23400 else 23401 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23402 } 23403 23404 mutex_enter(SD_MUTEX(un)); 23405 } 23406 done: 23407 sd_ssc_fini(ssc); 23408 un->un_f_watcht_stopped = FALSE; 23409 if (token != NULL && un->un_swr_token != NULL) { 23410 /* 23411 * Use of this local token and the mutex ensures that we avoid 23412 * some race conditions associated with terminating the 23413 * scsi watch. 23414 */ 23415 token = un->un_swr_token; 23416 mutex_exit(SD_MUTEX(un)); 23417 (void) scsi_watch_request_terminate(token, 23418 SCSI_WATCH_TERMINATE_WAIT); 23419 if (scsi_watch_get_ref_count(token) == 0) { 23420 mutex_enter(SD_MUTEX(un)); 23421 un->un_swr_token = (opaque_t)NULL; 23422 } else { 23423 mutex_enter(SD_MUTEX(un)); 23424 } 23425 } 23426 23427 /* 23428 * Update the capacity kstat value, if no media previously 23429 * (capacity kstat is 0) and a media has been inserted 23430 * (un_f_blockcount_is_valid == TRUE) 23431 */ 23432 if (un->un_errstats) { 23433 struct sd_errstats *stp = NULL; 23434 23435 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23436 if ((stp->sd_capacity.value.ui64 == 0) && 23437 (un->un_f_blockcount_is_valid == TRUE)) { 23438 stp->sd_capacity.value.ui64 = 23439 (uint64_t)((uint64_t)un->un_blockcount * 23440 un->un_sys_blocksize); 23441 } 23442 } 23443 mutex_exit(SD_MUTEX(un)); 23444 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23445 return (rval); 23446 } 23447 23448 23449 /* 23450 * Function: sd_delayed_cv_broadcast 23451 * 23452 * Description: Delayed cv_broadcast to allow for target to recover from media 23453 * insertion. 23454 * 23455 * Arguments: arg - driver soft state (unit) structure 23456 */ 23457 23458 static void 23459 sd_delayed_cv_broadcast(void *arg) 23460 { 23461 struct sd_lun *un = arg; 23462 23463 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23464 23465 mutex_enter(SD_MUTEX(un)); 23466 un->un_dcvb_timeid = NULL; 23467 cv_broadcast(&un->un_state_cv); 23468 mutex_exit(SD_MUTEX(un)); 23469 } 23470 23471 23472 /* 23473 * Function: sd_media_watch_cb 23474 * 23475 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23476 * routine processes the TUR sense data and updates the driver 23477 * state if a transition has occurred. The user thread 23478 * (sd_check_media) is then signalled. 23479 * 23480 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23481 * among multiple watches that share this callback function 23482 * resultp - scsi watch facility result packet containing scsi 23483 * packet, status byte and sense data 23484 * 23485 * Return Code: 0 for success, -1 for failure 23486 */ 23487 23488 static int 23489 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23490 { 23491 struct sd_lun *un; 23492 struct scsi_status *statusp = resultp->statusp; 23493 uint8_t *sensep = (uint8_t *)resultp->sensep; 23494 enum dkio_state state = DKIO_NONE; 23495 dev_t dev = (dev_t)arg; 23496 uchar_t actual_sense_length; 23497 uint8_t skey, asc, ascq; 23498 23499 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23500 return (-1); 23501 } 23502 actual_sense_length = resultp->actual_sense_length; 23503 23504 mutex_enter(SD_MUTEX(un)); 23505 SD_TRACE(SD_LOG_COMMON, un, 23506 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23507 *((char *)statusp), (void *)sensep, actual_sense_length); 23508 23509 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23510 un->un_mediastate = DKIO_DEV_GONE; 23511 cv_broadcast(&un->un_state_cv); 23512 mutex_exit(SD_MUTEX(un)); 23513 23514 return (0); 23515 } 23516 23517 /* 23518 * If there was a check condition then sensep points to valid sense data 23519 * If status was not a check condition but a reservation or busy status 23520 * then the new state is DKIO_NONE 23521 */ 23522 if (sensep != NULL) { 23523 skey = scsi_sense_key(sensep); 23524 asc = scsi_sense_asc(sensep); 23525 ascq = scsi_sense_ascq(sensep); 23526 23527 SD_INFO(SD_LOG_COMMON, un, 23528 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23529 skey, asc, ascq); 23530 /* This routine only uses up to 13 bytes of sense data. */ 23531 if (actual_sense_length >= 13) { 23532 if (skey == KEY_UNIT_ATTENTION) { 23533 if (asc == 0x28) { 23534 state = DKIO_INSERTED; 23535 } 23536 } else if (skey == KEY_NOT_READY) { 23537 /* 23538 * Sense data of 02/06/00 means that the 23539 * drive could not read the media (No 23540 * reference position found). In this case 23541 * to prevent a hang on the DKIOCSTATE IOCTL 23542 * we set the media state to DKIO_INSERTED. 23543 */ 23544 if (asc == 0x06 && ascq == 0x00) 23545 state = DKIO_INSERTED; 23546 23547 /* 23548 * if 02/04/02 means that the host 23549 * should send start command. Explicitly 23550 * leave the media state as is 23551 * (inserted) as the media is inserted 23552 * and host has stopped device for PM 23553 * reasons. Upon next true read/write 23554 * to this media will bring the 23555 * device to the right state good for 23556 * media access. 23557 */ 23558 if (asc == 0x3a) { 23559 state = DKIO_EJECTED; 23560 } else { 23561 /* 23562 * If the drive is busy with an 23563 * operation or long write, keep the 23564 * media in an inserted state. 23565 */ 23566 23567 if ((asc == 0x04) && 23568 ((ascq == 0x02) || 23569 (ascq == 0x07) || 23570 (ascq == 0x08))) { 23571 state = DKIO_INSERTED; 23572 } 23573 } 23574 } else if (skey == KEY_NO_SENSE) { 23575 if ((asc == 0x00) && (ascq == 0x00)) { 23576 /* 23577 * Sense Data 00/00/00 does not provide 23578 * any information about the state of 23579 * the media. Ignore it. 23580 */ 23581 mutex_exit(SD_MUTEX(un)); 23582 return (0); 23583 } 23584 } 23585 } 23586 } else if ((*((char *)statusp) == STATUS_GOOD) && 23587 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23588 state = DKIO_INSERTED; 23589 } 23590 23591 SD_TRACE(SD_LOG_COMMON, un, 23592 "sd_media_watch_cb: state=%x, specified=%x\n", 23593 state, un->un_specified_mediastate); 23594 23595 /* 23596 * now signal the waiting thread if this is *not* the specified state; 23597 * delay the signal if the state is DKIO_INSERTED to allow the target 23598 * to recover 23599 */ 23600 if (state != un->un_specified_mediastate) { 23601 un->un_mediastate = state; 23602 if (state == DKIO_INSERTED) { 23603 /* 23604 * delay the signal to give the drive a chance 23605 * to do what it apparently needs to do 23606 */ 23607 SD_TRACE(SD_LOG_COMMON, un, 23608 "sd_media_watch_cb: delayed cv_broadcast\n"); 23609 if (un->un_dcvb_timeid == NULL) { 23610 un->un_dcvb_timeid = 23611 timeout(sd_delayed_cv_broadcast, un, 23612 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23613 } 23614 } else { 23615 SD_TRACE(SD_LOG_COMMON, un, 23616 "sd_media_watch_cb: immediate cv_broadcast\n"); 23617 cv_broadcast(&un->un_state_cv); 23618 } 23619 } 23620 mutex_exit(SD_MUTEX(un)); 23621 return (0); 23622 } 23623 23624 23625 /* 23626 * Function: sd_dkio_get_temp 23627 * 23628 * Description: This routine is the driver entry point for handling ioctl 23629 * requests to get the disk temperature. 23630 * 23631 * Arguments: dev - the device number 23632 * arg - pointer to user provided dk_temperature structure. 23633 * flag - this argument is a pass through to ddi_copyxxx() 23634 * directly from the mode argument of ioctl(). 23635 * 23636 * Return Code: 0 23637 * EFAULT 23638 * ENXIO 23639 * EAGAIN 23640 */ 23641 23642 static int 23643 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23644 { 23645 struct sd_lun *un = NULL; 23646 struct dk_temperature *dktemp = NULL; 23647 uchar_t *temperature_page; 23648 int rval = 0; 23649 int path_flag = SD_PATH_STANDARD; 23650 sd_ssc_t *ssc; 23651 23652 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23653 return (ENXIO); 23654 } 23655 23656 ssc = sd_ssc_init(un); 23657 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23658 23659 /* copyin the disk temp argument to get the user flags */ 23660 if (ddi_copyin((void *)arg, dktemp, 23661 sizeof (struct dk_temperature), flag) != 0) { 23662 rval = EFAULT; 23663 goto done; 23664 } 23665 23666 /* Initialize the temperature to invalid. */ 23667 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23668 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23669 23670 /* 23671 * Note: Investigate removing the "bypass pm" semantic. 23672 * Can we just bypass PM always? 23673 */ 23674 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23675 path_flag = SD_PATH_DIRECT; 23676 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23677 mutex_enter(&un->un_pm_mutex); 23678 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23679 /* 23680 * If DKT_BYPASS_PM is set, and the drive happens to be 23681 * in low power mode, we can not wake it up, Need to 23682 * return EAGAIN. 23683 */ 23684 mutex_exit(&un->un_pm_mutex); 23685 rval = EAGAIN; 23686 goto done; 23687 } else { 23688 /* 23689 * Indicate to PM the device is busy. This is required 23690 * to avoid a race - i.e. the ioctl is issuing a 23691 * command and the pm framework brings down the device 23692 * to low power mode (possible power cut-off on some 23693 * platforms). 23694 */ 23695 mutex_exit(&un->un_pm_mutex); 23696 if (sd_pm_entry(un) != DDI_SUCCESS) { 23697 rval = EAGAIN; 23698 goto done; 23699 } 23700 } 23701 } 23702 23703 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23704 23705 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 23706 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 23707 if (rval != 0) 23708 goto done2; 23709 23710 /* 23711 * For the current temperature verify that the parameter length is 0x02 23712 * and the parameter code is 0x00 23713 */ 23714 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23715 (temperature_page[5] == 0x00)) { 23716 if (temperature_page[9] == 0xFF) { 23717 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23718 } else { 23719 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23720 } 23721 } 23722 23723 /* 23724 * For the reference temperature verify that the parameter 23725 * length is 0x02 and the parameter code is 0x01 23726 */ 23727 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23728 (temperature_page[11] == 0x01)) { 23729 if (temperature_page[15] == 0xFF) { 23730 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23731 } else { 23732 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23733 } 23734 } 23735 23736 /* Do the copyout regardless of the temperature commands status. */ 23737 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23738 flag) != 0) { 23739 rval = EFAULT; 23740 goto done1; 23741 } 23742 23743 done2: 23744 if (rval != 0) { 23745 if (rval == EIO) 23746 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23747 else 23748 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23749 } 23750 done1: 23751 if (path_flag == SD_PATH_DIRECT) { 23752 sd_pm_exit(un); 23753 } 23754 23755 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23756 done: 23757 sd_ssc_fini(ssc); 23758 if (dktemp != NULL) { 23759 kmem_free(dktemp, sizeof (struct dk_temperature)); 23760 } 23761 23762 return (rval); 23763 } 23764 23765 23766 /* 23767 * Function: sd_log_page_supported 23768 * 23769 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23770 * supported log pages. 23771 * 23772 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23773 * structure for this target. 23774 * log_page - 23775 * 23776 * Return Code: -1 - on error (log sense is optional and may not be supported). 23777 * 0 - log page not found. 23778 * 1 - log page found. 23779 */ 23780 23781 static int 23782 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23783 { 23784 uchar_t *log_page_data; 23785 int i; 23786 int match = 0; 23787 int log_size; 23788 int status = 0; 23789 struct sd_lun *un; 23790 23791 ASSERT(ssc != NULL); 23792 un = ssc->ssc_un; 23793 ASSERT(un != NULL); 23794 23795 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23796 23797 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23798 SD_PATH_DIRECT); 23799 23800 if (status != 0) { 23801 if (status == EIO) { 23802 /* 23803 * Some disks do not support log sense, we 23804 * should ignore this kind of error(sense key is 23805 * 0x5 - illegal request). 23806 */ 23807 uint8_t *sensep; 23808 int senlen; 23809 23810 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23811 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23812 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23813 23814 if (senlen > 0 && 23815 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23816 sd_ssc_assessment(ssc, 23817 SD_FMT_IGNORE_COMPROMISE); 23818 } else { 23819 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23820 } 23821 } else { 23822 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23823 } 23824 23825 SD_ERROR(SD_LOG_COMMON, un, 23826 "sd_log_page_supported: failed log page retrieval\n"); 23827 kmem_free(log_page_data, 0xFF); 23828 return (-1); 23829 } 23830 23831 log_size = log_page_data[3]; 23832 23833 /* 23834 * The list of supported log pages start from the fourth byte. Check 23835 * until we run out of log pages or a match is found. 23836 */ 23837 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23838 if (log_page_data[i] == log_page) { 23839 match++; 23840 } 23841 } 23842 kmem_free(log_page_data, 0xFF); 23843 return (match); 23844 } 23845 23846 23847 /* 23848 * Function: sd_mhdioc_failfast 23849 * 23850 * Description: This routine is the driver entry point for handling ioctl 23851 * requests to enable/disable the multihost failfast option. 23852 * (MHIOCENFAILFAST) 23853 * 23854 * Arguments: dev - the device number 23855 * arg - user specified probing interval. 23856 * flag - this argument is a pass through to ddi_copyxxx() 23857 * directly from the mode argument of ioctl(). 23858 * 23859 * Return Code: 0 23860 * EFAULT 23861 * ENXIO 23862 */ 23863 23864 static int 23865 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23866 { 23867 struct sd_lun *un = NULL; 23868 int mh_time; 23869 int rval = 0; 23870 23871 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23872 return (ENXIO); 23873 } 23874 23875 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23876 return (EFAULT); 23877 23878 if (mh_time) { 23879 mutex_enter(SD_MUTEX(un)); 23880 un->un_resvd_status |= SD_FAILFAST; 23881 mutex_exit(SD_MUTEX(un)); 23882 /* 23883 * If mh_time is INT_MAX, then this ioctl is being used for 23884 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23885 */ 23886 if (mh_time != INT_MAX) { 23887 rval = sd_check_mhd(dev, mh_time); 23888 } 23889 } else { 23890 (void) sd_check_mhd(dev, 0); 23891 mutex_enter(SD_MUTEX(un)); 23892 un->un_resvd_status &= ~SD_FAILFAST; 23893 mutex_exit(SD_MUTEX(un)); 23894 } 23895 return (rval); 23896 } 23897 23898 23899 /* 23900 * Function: sd_mhdioc_takeown 23901 * 23902 * Description: This routine is the driver entry point for handling ioctl 23903 * requests to forcefully acquire exclusive access rights to the 23904 * multihost disk (MHIOCTKOWN). 23905 * 23906 * Arguments: dev - the device number 23907 * arg - user provided structure specifying the delay 23908 * parameters in milliseconds 23909 * flag - this argument is a pass through to ddi_copyxxx() 23910 * directly from the mode argument of ioctl(). 23911 * 23912 * Return Code: 0 23913 * EFAULT 23914 * ENXIO 23915 */ 23916 23917 static int 23918 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23919 { 23920 struct sd_lun *un = NULL; 23921 struct mhioctkown *tkown = NULL; 23922 int rval = 0; 23923 23924 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23925 return (ENXIO); 23926 } 23927 23928 if (arg != NULL) { 23929 tkown = (struct mhioctkown *) 23930 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23931 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23932 if (rval != 0) { 23933 rval = EFAULT; 23934 goto error; 23935 } 23936 } 23937 23938 rval = sd_take_ownership(dev, tkown); 23939 mutex_enter(SD_MUTEX(un)); 23940 if (rval == 0) { 23941 un->un_resvd_status |= SD_RESERVE; 23942 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23943 sd_reinstate_resv_delay = 23944 tkown->reinstate_resv_delay * 1000; 23945 } else { 23946 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23947 } 23948 /* 23949 * Give the scsi_watch routine interval set by 23950 * the MHIOCENFAILFAST ioctl precedence here. 23951 */ 23952 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23953 mutex_exit(SD_MUTEX(un)); 23954 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23955 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23956 "sd_mhdioc_takeown : %d\n", 23957 sd_reinstate_resv_delay); 23958 } else { 23959 mutex_exit(SD_MUTEX(un)); 23960 } 23961 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23962 sd_mhd_reset_notify_cb, (caddr_t)un); 23963 } else { 23964 un->un_resvd_status &= ~SD_RESERVE; 23965 mutex_exit(SD_MUTEX(un)); 23966 } 23967 23968 error: 23969 if (tkown != NULL) { 23970 kmem_free(tkown, sizeof (struct mhioctkown)); 23971 } 23972 return (rval); 23973 } 23974 23975 23976 /* 23977 * Function: sd_mhdioc_release 23978 * 23979 * Description: This routine is the driver entry point for handling ioctl 23980 * requests to release exclusive access rights to the multihost 23981 * disk (MHIOCRELEASE). 23982 * 23983 * Arguments: dev - the device number 23984 * 23985 * Return Code: 0 23986 * ENXIO 23987 */ 23988 23989 static int 23990 sd_mhdioc_release(dev_t dev) 23991 { 23992 struct sd_lun *un = NULL; 23993 timeout_id_t resvd_timeid_save; 23994 int resvd_status_save; 23995 int rval = 0; 23996 23997 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23998 return (ENXIO); 23999 } 24000 24001 mutex_enter(SD_MUTEX(un)); 24002 resvd_status_save = un->un_resvd_status; 24003 un->un_resvd_status &= 24004 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24005 if (un->un_resvd_timeid) { 24006 resvd_timeid_save = un->un_resvd_timeid; 24007 un->un_resvd_timeid = NULL; 24008 mutex_exit(SD_MUTEX(un)); 24009 (void) untimeout(resvd_timeid_save); 24010 } else { 24011 mutex_exit(SD_MUTEX(un)); 24012 } 24013 24014 /* 24015 * destroy any pending timeout thread that may be attempting to 24016 * reinstate reservation on this device. 24017 */ 24018 sd_rmv_resv_reclaim_req(dev); 24019 24020 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24021 mutex_enter(SD_MUTEX(un)); 24022 if ((un->un_mhd_token) && 24023 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24024 mutex_exit(SD_MUTEX(un)); 24025 (void) sd_check_mhd(dev, 0); 24026 } else { 24027 mutex_exit(SD_MUTEX(un)); 24028 } 24029 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24030 sd_mhd_reset_notify_cb, (caddr_t)un); 24031 } else { 24032 /* 24033 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24034 */ 24035 mutex_enter(SD_MUTEX(un)); 24036 un->un_resvd_status = resvd_status_save; 24037 mutex_exit(SD_MUTEX(un)); 24038 } 24039 return (rval); 24040 } 24041 24042 24043 /* 24044 * Function: sd_mhdioc_register_devid 24045 * 24046 * Description: This routine is the driver entry point for handling ioctl 24047 * requests to register the device id (MHIOCREREGISTERDEVID). 24048 * 24049 * Note: The implementation for this ioctl has been updated to 24050 * be consistent with the original PSARC case (1999/357) 24051 * (4375899, 4241671, 4220005) 24052 * 24053 * Arguments: dev - the device number 24054 * 24055 * Return Code: 0 24056 * ENXIO 24057 */ 24058 24059 static int 24060 sd_mhdioc_register_devid(dev_t dev) 24061 { 24062 struct sd_lun *un = NULL; 24063 int rval = 0; 24064 sd_ssc_t *ssc; 24065 24066 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24067 return (ENXIO); 24068 } 24069 24070 ASSERT(!mutex_owned(SD_MUTEX(un))); 24071 24072 mutex_enter(SD_MUTEX(un)); 24073 24074 /* If a devid already exists, de-register it */ 24075 if (un->un_devid != NULL) { 24076 ddi_devid_unregister(SD_DEVINFO(un)); 24077 /* 24078 * After unregister devid, needs to free devid memory 24079 */ 24080 ddi_devid_free(un->un_devid); 24081 un->un_devid = NULL; 24082 } 24083 24084 /* Check for reservation conflict */ 24085 mutex_exit(SD_MUTEX(un)); 24086 ssc = sd_ssc_init(un); 24087 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24088 mutex_enter(SD_MUTEX(un)); 24089 24090 switch (rval) { 24091 case 0: 24092 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24093 break; 24094 case EACCES: 24095 break; 24096 default: 24097 rval = EIO; 24098 } 24099 24100 mutex_exit(SD_MUTEX(un)); 24101 if (rval != 0) { 24102 if (rval == EIO) 24103 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24104 else 24105 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24106 } 24107 sd_ssc_fini(ssc); 24108 return (rval); 24109 } 24110 24111 24112 /* 24113 * Function: sd_mhdioc_inkeys 24114 * 24115 * Description: This routine is the driver entry point for handling ioctl 24116 * requests to issue the SCSI-3 Persistent In Read Keys command 24117 * to the device (MHIOCGRP_INKEYS). 24118 * 24119 * Arguments: dev - the device number 24120 * arg - user provided in_keys structure 24121 * flag - this argument is a pass through to ddi_copyxxx() 24122 * directly from the mode argument of ioctl(). 24123 * 24124 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24125 * ENXIO 24126 * EFAULT 24127 */ 24128 24129 static int 24130 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24131 { 24132 struct sd_lun *un; 24133 mhioc_inkeys_t inkeys; 24134 int rval = 0; 24135 24136 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24137 return (ENXIO); 24138 } 24139 24140 #ifdef _MULTI_DATAMODEL 24141 switch (ddi_model_convert_from(flag & FMODELS)) { 24142 case DDI_MODEL_ILP32: { 24143 struct mhioc_inkeys32 inkeys32; 24144 24145 if (ddi_copyin(arg, &inkeys32, 24146 sizeof (struct mhioc_inkeys32), flag) != 0) { 24147 return (EFAULT); 24148 } 24149 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24150 if ((rval = sd_persistent_reservation_in_read_keys(un, 24151 &inkeys, flag)) != 0) { 24152 return (rval); 24153 } 24154 inkeys32.generation = inkeys.generation; 24155 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24156 flag) != 0) { 24157 return (EFAULT); 24158 } 24159 break; 24160 } 24161 case DDI_MODEL_NONE: 24162 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24163 flag) != 0) { 24164 return (EFAULT); 24165 } 24166 if ((rval = sd_persistent_reservation_in_read_keys(un, 24167 &inkeys, flag)) != 0) { 24168 return (rval); 24169 } 24170 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24171 flag) != 0) { 24172 return (EFAULT); 24173 } 24174 break; 24175 } 24176 24177 #else /* ! _MULTI_DATAMODEL */ 24178 24179 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24180 return (EFAULT); 24181 } 24182 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24183 if (rval != 0) { 24184 return (rval); 24185 } 24186 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24187 return (EFAULT); 24188 } 24189 24190 #endif /* _MULTI_DATAMODEL */ 24191 24192 return (rval); 24193 } 24194 24195 24196 /* 24197 * Function: sd_mhdioc_inresv 24198 * 24199 * Description: This routine is the driver entry point for handling ioctl 24200 * requests to issue the SCSI-3 Persistent In Read Reservations 24201 * command to the device (MHIOCGRP_INKEYS). 24202 * 24203 * Arguments: dev - the device number 24204 * arg - user provided in_resv structure 24205 * flag - this argument is a pass through to ddi_copyxxx() 24206 * directly from the mode argument of ioctl(). 24207 * 24208 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24209 * ENXIO 24210 * EFAULT 24211 */ 24212 24213 static int 24214 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24215 { 24216 struct sd_lun *un; 24217 mhioc_inresvs_t inresvs; 24218 int rval = 0; 24219 24220 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24221 return (ENXIO); 24222 } 24223 24224 #ifdef _MULTI_DATAMODEL 24225 24226 switch (ddi_model_convert_from(flag & FMODELS)) { 24227 case DDI_MODEL_ILP32: { 24228 struct mhioc_inresvs32 inresvs32; 24229 24230 if (ddi_copyin(arg, &inresvs32, 24231 sizeof (struct mhioc_inresvs32), flag) != 0) { 24232 return (EFAULT); 24233 } 24234 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24235 if ((rval = sd_persistent_reservation_in_read_resv(un, 24236 &inresvs, flag)) != 0) { 24237 return (rval); 24238 } 24239 inresvs32.generation = inresvs.generation; 24240 if (ddi_copyout(&inresvs32, arg, 24241 sizeof (struct mhioc_inresvs32), flag) != 0) { 24242 return (EFAULT); 24243 } 24244 break; 24245 } 24246 case DDI_MODEL_NONE: 24247 if (ddi_copyin(arg, &inresvs, 24248 sizeof (mhioc_inresvs_t), flag) != 0) { 24249 return (EFAULT); 24250 } 24251 if ((rval = sd_persistent_reservation_in_read_resv(un, 24252 &inresvs, flag)) != 0) { 24253 return (rval); 24254 } 24255 if (ddi_copyout(&inresvs, arg, 24256 sizeof (mhioc_inresvs_t), flag) != 0) { 24257 return (EFAULT); 24258 } 24259 break; 24260 } 24261 24262 #else /* ! _MULTI_DATAMODEL */ 24263 24264 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24265 return (EFAULT); 24266 } 24267 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24268 if (rval != 0) { 24269 return (rval); 24270 } 24271 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24272 return (EFAULT); 24273 } 24274 24275 #endif /* ! _MULTI_DATAMODEL */ 24276 24277 return (rval); 24278 } 24279 24280 24281 /* 24282 * The following routines support the clustering functionality described below 24283 * and implement lost reservation reclaim functionality. 24284 * 24285 * Clustering 24286 * ---------- 24287 * The clustering code uses two different, independent forms of SCSI 24288 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24289 * Persistent Group Reservations. For any particular disk, it will use either 24290 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24291 * 24292 * SCSI-2 24293 * The cluster software takes ownership of a multi-hosted disk by issuing the 24294 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24295 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24296 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24297 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24298 * driver. The meaning of failfast is that if the driver (on this host) ever 24299 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24300 * it should immediately panic the host. The motivation for this ioctl is that 24301 * if this host does encounter reservation conflict, the underlying cause is 24302 * that some other host of the cluster has decided that this host is no longer 24303 * in the cluster and has seized control of the disks for itself. Since this 24304 * host is no longer in the cluster, it ought to panic itself. The 24305 * MHIOCENFAILFAST ioctl does two things: 24306 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24307 * error to panic the host 24308 * (b) it sets up a periodic timer to test whether this host still has 24309 * "access" (in that no other host has reserved the device): if the 24310 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24311 * purpose of that periodic timer is to handle scenarios where the host is 24312 * otherwise temporarily quiescent, temporarily doing no real i/o. 24313 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24314 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24315 * the device itself. 24316 * 24317 * SCSI-3 PGR 24318 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24319 * facility is supported through the shared multihost disk ioctls 24320 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24321 * MHIOCGRP_PREEMPTANDABORT) 24322 * 24323 * Reservation Reclaim: 24324 * -------------------- 24325 * To support the lost reservation reclaim operations this driver creates a 24326 * single thread to handle reinstating reservations on all devices that have 24327 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24328 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24329 * and the reservation reclaim thread loops through the requests to regain the 24330 * lost reservations. 24331 */ 24332 24333 /* 24334 * Function: sd_check_mhd() 24335 * 24336 * Description: This function sets up and submits a scsi watch request or 24337 * terminates an existing watch request. This routine is used in 24338 * support of reservation reclaim. 24339 * 24340 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24341 * among multiple watches that share the callback function 24342 * interval - the number of microseconds specifying the watch 24343 * interval for issuing TEST UNIT READY commands. If 24344 * set to 0 the watch should be terminated. If the 24345 * interval is set to 0 and if the device is required 24346 * to hold reservation while disabling failfast, the 24347 * watch is restarted with an interval of 24348 * reinstate_resv_delay. 24349 * 24350 * Return Code: 0 - Successful submit/terminate of scsi watch request 24351 * ENXIO - Indicates an invalid device was specified 24352 * EAGAIN - Unable to submit the scsi watch request 24353 */ 24354 24355 static int 24356 sd_check_mhd(dev_t dev, int interval) 24357 { 24358 struct sd_lun *un; 24359 opaque_t token; 24360 24361 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24362 return (ENXIO); 24363 } 24364 24365 /* is this a watch termination request? */ 24366 if (interval == 0) { 24367 mutex_enter(SD_MUTEX(un)); 24368 /* if there is an existing watch task then terminate it */ 24369 if (un->un_mhd_token) { 24370 token = un->un_mhd_token; 24371 un->un_mhd_token = NULL; 24372 mutex_exit(SD_MUTEX(un)); 24373 (void) scsi_watch_request_terminate(token, 24374 SCSI_WATCH_TERMINATE_ALL_WAIT); 24375 mutex_enter(SD_MUTEX(un)); 24376 } else { 24377 mutex_exit(SD_MUTEX(un)); 24378 /* 24379 * Note: If we return here we don't check for the 24380 * failfast case. This is the original legacy 24381 * implementation but perhaps we should be checking 24382 * the failfast case. 24383 */ 24384 return (0); 24385 } 24386 /* 24387 * If the device is required to hold reservation while 24388 * disabling failfast, we need to restart the scsi_watch 24389 * routine with an interval of reinstate_resv_delay. 24390 */ 24391 if (un->un_resvd_status & SD_RESERVE) { 24392 interval = sd_reinstate_resv_delay/1000; 24393 } else { 24394 /* no failfast so bail */ 24395 mutex_exit(SD_MUTEX(un)); 24396 return (0); 24397 } 24398 mutex_exit(SD_MUTEX(un)); 24399 } 24400 24401 /* 24402 * adjust minimum time interval to 1 second, 24403 * and convert from msecs to usecs 24404 */ 24405 if (interval > 0 && interval < 1000) { 24406 interval = 1000; 24407 } 24408 interval *= 1000; 24409 24410 /* 24411 * submit the request to the scsi_watch service 24412 */ 24413 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24414 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24415 if (token == NULL) { 24416 return (EAGAIN); 24417 } 24418 24419 /* 24420 * save token for termination later on 24421 */ 24422 mutex_enter(SD_MUTEX(un)); 24423 un->un_mhd_token = token; 24424 mutex_exit(SD_MUTEX(un)); 24425 return (0); 24426 } 24427 24428 24429 /* 24430 * Function: sd_mhd_watch_cb() 24431 * 24432 * Description: This function is the call back function used by the scsi watch 24433 * facility. The scsi watch facility sends the "Test Unit Ready" 24434 * and processes the status. If applicable (i.e. a "Unit Attention" 24435 * status and automatic "Request Sense" not used) the scsi watch 24436 * facility will send a "Request Sense" and retrieve the sense data 24437 * to be passed to this callback function. In either case the 24438 * automatic "Request Sense" or the facility submitting one, this 24439 * callback is passed the status and sense data. 24440 * 24441 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24442 * among multiple watches that share this callback function 24443 * resultp - scsi watch facility result packet containing scsi 24444 * packet, status byte and sense data 24445 * 24446 * Return Code: 0 - continue the watch task 24447 * non-zero - terminate the watch task 24448 */ 24449 24450 static int 24451 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24452 { 24453 struct sd_lun *un; 24454 struct scsi_status *statusp; 24455 uint8_t *sensep; 24456 struct scsi_pkt *pkt; 24457 uchar_t actual_sense_length; 24458 dev_t dev = (dev_t)arg; 24459 24460 ASSERT(resultp != NULL); 24461 statusp = resultp->statusp; 24462 sensep = (uint8_t *)resultp->sensep; 24463 pkt = resultp->pkt; 24464 actual_sense_length = resultp->actual_sense_length; 24465 24466 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24467 return (ENXIO); 24468 } 24469 24470 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24471 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24472 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24473 24474 /* Begin processing of the status and/or sense data */ 24475 if (pkt->pkt_reason != CMD_CMPLT) { 24476 /* Handle the incomplete packet */ 24477 sd_mhd_watch_incomplete(un, pkt); 24478 return (0); 24479 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24480 if (*((unsigned char *)statusp) 24481 == STATUS_RESERVATION_CONFLICT) { 24482 /* 24483 * Handle a reservation conflict by panicking if 24484 * configured for failfast or by logging the conflict 24485 * and updating the reservation status 24486 */ 24487 mutex_enter(SD_MUTEX(un)); 24488 if ((un->un_resvd_status & SD_FAILFAST) && 24489 (sd_failfast_enable)) { 24490 sd_panic_for_res_conflict(un); 24491 /*NOTREACHED*/ 24492 } 24493 SD_INFO(SD_LOG_IOCTL_MHD, un, 24494 "sd_mhd_watch_cb: Reservation Conflict\n"); 24495 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24496 mutex_exit(SD_MUTEX(un)); 24497 } 24498 } 24499 24500 if (sensep != NULL) { 24501 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24502 mutex_enter(SD_MUTEX(un)); 24503 if ((scsi_sense_asc(sensep) == 24504 SD_SCSI_RESET_SENSE_CODE) && 24505 (un->un_resvd_status & SD_RESERVE)) { 24506 /* 24507 * The additional sense code indicates a power 24508 * on or bus device reset has occurred; update 24509 * the reservation status. 24510 */ 24511 un->un_resvd_status |= 24512 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24513 SD_INFO(SD_LOG_IOCTL_MHD, un, 24514 "sd_mhd_watch_cb: Lost Reservation\n"); 24515 } 24516 } else { 24517 return (0); 24518 } 24519 } else { 24520 mutex_enter(SD_MUTEX(un)); 24521 } 24522 24523 if ((un->un_resvd_status & SD_RESERVE) && 24524 (un->un_resvd_status & SD_LOST_RESERVE)) { 24525 if (un->un_resvd_status & SD_WANT_RESERVE) { 24526 /* 24527 * A reset occurred in between the last probe and this 24528 * one so if a timeout is pending cancel it. 24529 */ 24530 if (un->un_resvd_timeid) { 24531 timeout_id_t temp_id = un->un_resvd_timeid; 24532 un->un_resvd_timeid = NULL; 24533 mutex_exit(SD_MUTEX(un)); 24534 (void) untimeout(temp_id); 24535 mutex_enter(SD_MUTEX(un)); 24536 } 24537 un->un_resvd_status &= ~SD_WANT_RESERVE; 24538 } 24539 if (un->un_resvd_timeid == 0) { 24540 /* Schedule a timeout to handle the lost reservation */ 24541 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24542 (void *)dev, 24543 drv_usectohz(sd_reinstate_resv_delay)); 24544 } 24545 } 24546 mutex_exit(SD_MUTEX(un)); 24547 return (0); 24548 } 24549 24550 24551 /* 24552 * Function: sd_mhd_watch_incomplete() 24553 * 24554 * Description: This function is used to find out why a scsi pkt sent by the 24555 * scsi watch facility was not completed. Under some scenarios this 24556 * routine will return. Otherwise it will send a bus reset to see 24557 * if the drive is still online. 24558 * 24559 * Arguments: un - driver soft state (unit) structure 24560 * pkt - incomplete scsi pkt 24561 */ 24562 24563 static void 24564 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24565 { 24566 int be_chatty; 24567 int perr; 24568 24569 ASSERT(pkt != NULL); 24570 ASSERT(un != NULL); 24571 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24572 perr = (pkt->pkt_statistics & STAT_PERR); 24573 24574 mutex_enter(SD_MUTEX(un)); 24575 if (un->un_state == SD_STATE_DUMPING) { 24576 mutex_exit(SD_MUTEX(un)); 24577 return; 24578 } 24579 24580 switch (pkt->pkt_reason) { 24581 case CMD_UNX_BUS_FREE: 24582 /* 24583 * If we had a parity error that caused the target to drop BSY*, 24584 * don't be chatty about it. 24585 */ 24586 if (perr && be_chatty) { 24587 be_chatty = 0; 24588 } 24589 break; 24590 case CMD_TAG_REJECT: 24591 /* 24592 * The SCSI-2 spec states that a tag reject will be sent by the 24593 * target if tagged queuing is not supported. A tag reject may 24594 * also be sent during certain initialization periods or to 24595 * control internal resources. For the latter case the target 24596 * may also return Queue Full. 24597 * 24598 * If this driver receives a tag reject from a target that is 24599 * going through an init period or controlling internal 24600 * resources tagged queuing will be disabled. This is a less 24601 * than optimal behavior but the driver is unable to determine 24602 * the target state and assumes tagged queueing is not supported 24603 */ 24604 pkt->pkt_flags = 0; 24605 un->un_tagflags = 0; 24606 24607 if (un->un_f_opt_queueing == TRUE) { 24608 un->un_throttle = min(un->un_throttle, 3); 24609 } else { 24610 un->un_throttle = 1; 24611 } 24612 mutex_exit(SD_MUTEX(un)); 24613 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24614 mutex_enter(SD_MUTEX(un)); 24615 break; 24616 case CMD_INCOMPLETE: 24617 /* 24618 * The transport stopped with an abnormal state, fallthrough and 24619 * reset the target and/or bus unless selection did not complete 24620 * (indicated by STATE_GOT_BUS) in which case we don't want to 24621 * go through a target/bus reset 24622 */ 24623 if (pkt->pkt_state == STATE_GOT_BUS) { 24624 break; 24625 } 24626 /*FALLTHROUGH*/ 24627 24628 case CMD_TIMEOUT: 24629 default: 24630 /* 24631 * The lun may still be running the command, so a lun reset 24632 * should be attempted. If the lun reset fails or cannot be 24633 * issued, than try a target reset. Lastly try a bus reset. 24634 */ 24635 if ((pkt->pkt_statistics & 24636 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24637 int reset_retval = 0; 24638 mutex_exit(SD_MUTEX(un)); 24639 if (un->un_f_allow_bus_device_reset == TRUE) { 24640 if (un->un_f_lun_reset_enabled == TRUE) { 24641 reset_retval = 24642 scsi_reset(SD_ADDRESS(un), 24643 RESET_LUN); 24644 } 24645 if (reset_retval == 0) { 24646 reset_retval = 24647 scsi_reset(SD_ADDRESS(un), 24648 RESET_TARGET); 24649 } 24650 } 24651 if (reset_retval == 0) { 24652 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24653 } 24654 mutex_enter(SD_MUTEX(un)); 24655 } 24656 break; 24657 } 24658 24659 /* A device/bus reset has occurred; update the reservation status. */ 24660 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24661 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24662 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24663 un->un_resvd_status |= 24664 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24665 SD_INFO(SD_LOG_IOCTL_MHD, un, 24666 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24667 } 24668 } 24669 24670 /* 24671 * The disk has been turned off; Update the device state. 24672 * 24673 * Note: Should we be offlining the disk here? 24674 */ 24675 if (pkt->pkt_state == STATE_GOT_BUS) { 24676 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24677 "Disk not responding to selection\n"); 24678 if (un->un_state != SD_STATE_OFFLINE) { 24679 New_state(un, SD_STATE_OFFLINE); 24680 } 24681 } else if (be_chatty) { 24682 /* 24683 * suppress messages if they are all the same pkt reason; 24684 * with TQ, many (up to 256) are returned with the same 24685 * pkt_reason 24686 */ 24687 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24688 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24689 "sd_mhd_watch_incomplete: " 24690 "SCSI transport failed: reason '%s'\n", 24691 scsi_rname(pkt->pkt_reason)); 24692 } 24693 } 24694 un->un_last_pkt_reason = pkt->pkt_reason; 24695 mutex_exit(SD_MUTEX(un)); 24696 } 24697 24698 24699 /* 24700 * Function: sd_sname() 24701 * 24702 * Description: This is a simple little routine to return a string containing 24703 * a printable description of command status byte for use in 24704 * logging. 24705 * 24706 * Arguments: status - pointer to a status byte 24707 * 24708 * Return Code: char * - string containing status description. 24709 */ 24710 24711 static char * 24712 sd_sname(uchar_t status) 24713 { 24714 switch (status & STATUS_MASK) { 24715 case STATUS_GOOD: 24716 return ("good status"); 24717 case STATUS_CHECK: 24718 return ("check condition"); 24719 case STATUS_MET: 24720 return ("condition met"); 24721 case STATUS_BUSY: 24722 return ("busy"); 24723 case STATUS_INTERMEDIATE: 24724 return ("intermediate"); 24725 case STATUS_INTERMEDIATE_MET: 24726 return ("intermediate - condition met"); 24727 case STATUS_RESERVATION_CONFLICT: 24728 return ("reservation_conflict"); 24729 case STATUS_TERMINATED: 24730 return ("command terminated"); 24731 case STATUS_QFULL: 24732 return ("queue full"); 24733 default: 24734 return ("<unknown status>"); 24735 } 24736 } 24737 24738 24739 /* 24740 * Function: sd_mhd_resvd_recover() 24741 * 24742 * Description: This function adds a reservation entry to the 24743 * sd_resv_reclaim_request list and signals the reservation 24744 * reclaim thread that there is work pending. If the reservation 24745 * reclaim thread has not been previously created this function 24746 * will kick it off. 24747 * 24748 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24749 * among multiple watches that share this callback function 24750 * 24751 * Context: This routine is called by timeout() and is run in interrupt 24752 * context. It must not sleep or call other functions which may 24753 * sleep. 24754 */ 24755 24756 static void 24757 sd_mhd_resvd_recover(void *arg) 24758 { 24759 dev_t dev = (dev_t)arg; 24760 struct sd_lun *un; 24761 struct sd_thr_request *sd_treq = NULL; 24762 struct sd_thr_request *sd_cur = NULL; 24763 struct sd_thr_request *sd_prev = NULL; 24764 int already_there = 0; 24765 24766 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24767 return; 24768 } 24769 24770 mutex_enter(SD_MUTEX(un)); 24771 un->un_resvd_timeid = NULL; 24772 if (un->un_resvd_status & SD_WANT_RESERVE) { 24773 /* 24774 * There was a reset so don't issue the reserve, allow the 24775 * sd_mhd_watch_cb callback function to notice this and 24776 * reschedule the timeout for reservation. 24777 */ 24778 mutex_exit(SD_MUTEX(un)); 24779 return; 24780 } 24781 mutex_exit(SD_MUTEX(un)); 24782 24783 /* 24784 * Add this device to the sd_resv_reclaim_request list and the 24785 * sd_resv_reclaim_thread should take care of the rest. 24786 * 24787 * Note: We can't sleep in this context so if the memory allocation 24788 * fails allow the sd_mhd_watch_cb callback function to notice this and 24789 * reschedule the timeout for reservation. (4378460) 24790 */ 24791 sd_treq = (struct sd_thr_request *) 24792 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24793 if (sd_treq == NULL) { 24794 return; 24795 } 24796 24797 sd_treq->sd_thr_req_next = NULL; 24798 sd_treq->dev = dev; 24799 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24800 if (sd_tr.srq_thr_req_head == NULL) { 24801 sd_tr.srq_thr_req_head = sd_treq; 24802 } else { 24803 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24804 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24805 if (sd_cur->dev == dev) { 24806 /* 24807 * already in Queue so don't log 24808 * another request for the device 24809 */ 24810 already_there = 1; 24811 break; 24812 } 24813 sd_prev = sd_cur; 24814 } 24815 if (!already_there) { 24816 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24817 "logging request for %lx\n", dev); 24818 sd_prev->sd_thr_req_next = sd_treq; 24819 } else { 24820 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24821 } 24822 } 24823 24824 /* 24825 * Create a kernel thread to do the reservation reclaim and free up this 24826 * thread. We cannot block this thread while we go away to do the 24827 * reservation reclaim 24828 */ 24829 if (sd_tr.srq_resv_reclaim_thread == NULL) 24830 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24831 sd_resv_reclaim_thread, NULL, 24832 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24833 24834 /* Tell the reservation reclaim thread that it has work to do */ 24835 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24836 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24837 } 24838 24839 /* 24840 * Function: sd_resv_reclaim_thread() 24841 * 24842 * Description: This function implements the reservation reclaim operations 24843 * 24844 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24845 * among multiple watches that share this callback function 24846 */ 24847 24848 static void 24849 sd_resv_reclaim_thread() 24850 { 24851 struct sd_lun *un; 24852 struct sd_thr_request *sd_mhreq; 24853 24854 /* Wait for work */ 24855 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24856 if (sd_tr.srq_thr_req_head == NULL) { 24857 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24858 &sd_tr.srq_resv_reclaim_mutex); 24859 } 24860 24861 /* Loop while we have work */ 24862 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24863 un = ddi_get_soft_state(sd_state, 24864 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24865 if (un == NULL) { 24866 /* 24867 * softstate structure is NULL so just 24868 * dequeue the request and continue 24869 */ 24870 sd_tr.srq_thr_req_head = 24871 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24872 kmem_free(sd_tr.srq_thr_cur_req, 24873 sizeof (struct sd_thr_request)); 24874 continue; 24875 } 24876 24877 /* dequeue the request */ 24878 sd_mhreq = sd_tr.srq_thr_cur_req; 24879 sd_tr.srq_thr_req_head = 24880 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24881 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24882 24883 /* 24884 * Reclaim reservation only if SD_RESERVE is still set. There 24885 * may have been a call to MHIOCRELEASE before we got here. 24886 */ 24887 mutex_enter(SD_MUTEX(un)); 24888 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24889 /* 24890 * Note: The SD_LOST_RESERVE flag is cleared before 24891 * reclaiming the reservation. If this is done after the 24892 * call to sd_reserve_release a reservation loss in the 24893 * window between pkt completion of reserve cmd and 24894 * mutex_enter below may not be recognized 24895 */ 24896 un->un_resvd_status &= ~SD_LOST_RESERVE; 24897 mutex_exit(SD_MUTEX(un)); 24898 24899 if (sd_reserve_release(sd_mhreq->dev, 24900 SD_RESERVE) == 0) { 24901 mutex_enter(SD_MUTEX(un)); 24902 un->un_resvd_status |= SD_RESERVE; 24903 mutex_exit(SD_MUTEX(un)); 24904 SD_INFO(SD_LOG_IOCTL_MHD, un, 24905 "sd_resv_reclaim_thread: " 24906 "Reservation Recovered\n"); 24907 } else { 24908 mutex_enter(SD_MUTEX(un)); 24909 un->un_resvd_status |= SD_LOST_RESERVE; 24910 mutex_exit(SD_MUTEX(un)); 24911 SD_INFO(SD_LOG_IOCTL_MHD, un, 24912 "sd_resv_reclaim_thread: Failed " 24913 "Reservation Recovery\n"); 24914 } 24915 } else { 24916 mutex_exit(SD_MUTEX(un)); 24917 } 24918 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24919 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24920 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24921 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24922 /* 24923 * wakeup the destroy thread if anyone is waiting on 24924 * us to complete. 24925 */ 24926 cv_signal(&sd_tr.srq_inprocess_cv); 24927 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24928 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24929 } 24930 24931 /* 24932 * cleanup the sd_tr structure now that this thread will not exist 24933 */ 24934 ASSERT(sd_tr.srq_thr_req_head == NULL); 24935 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24936 sd_tr.srq_resv_reclaim_thread = NULL; 24937 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24938 thread_exit(); 24939 } 24940 24941 24942 /* 24943 * Function: sd_rmv_resv_reclaim_req() 24944 * 24945 * Description: This function removes any pending reservation reclaim requests 24946 * for the specified device. 24947 * 24948 * Arguments: dev - the device 'dev_t' 24949 */ 24950 24951 static void 24952 sd_rmv_resv_reclaim_req(dev_t dev) 24953 { 24954 struct sd_thr_request *sd_mhreq; 24955 struct sd_thr_request *sd_prev; 24956 24957 /* Remove a reservation reclaim request from the list */ 24958 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24959 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24960 /* 24961 * We are attempting to reinstate reservation for 24962 * this device. We wait for sd_reserve_release() 24963 * to return before we return. 24964 */ 24965 cv_wait(&sd_tr.srq_inprocess_cv, 24966 &sd_tr.srq_resv_reclaim_mutex); 24967 } else { 24968 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24969 if (sd_mhreq && sd_mhreq->dev == dev) { 24970 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24971 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24972 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24973 return; 24974 } 24975 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24976 if (sd_mhreq && sd_mhreq->dev == dev) { 24977 break; 24978 } 24979 sd_prev = sd_mhreq; 24980 } 24981 if (sd_mhreq != NULL) { 24982 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24983 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24984 } 24985 } 24986 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24987 } 24988 24989 24990 /* 24991 * Function: sd_mhd_reset_notify_cb() 24992 * 24993 * Description: This is a call back function for scsi_reset_notify. This 24994 * function updates the softstate reserved status and logs the 24995 * reset. The driver scsi watch facility callback function 24996 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24997 * will reclaim the reservation. 24998 * 24999 * Arguments: arg - driver soft state (unit) structure 25000 */ 25001 25002 static void 25003 sd_mhd_reset_notify_cb(caddr_t arg) 25004 { 25005 struct sd_lun *un = (struct sd_lun *)arg; 25006 25007 mutex_enter(SD_MUTEX(un)); 25008 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25009 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25010 SD_INFO(SD_LOG_IOCTL_MHD, un, 25011 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25012 } 25013 mutex_exit(SD_MUTEX(un)); 25014 } 25015 25016 25017 /* 25018 * Function: sd_take_ownership() 25019 * 25020 * Description: This routine implements an algorithm to achieve a stable 25021 * reservation on disks which don't implement priority reserve, 25022 * and makes sure that other host lose re-reservation attempts. 25023 * This algorithm contains of a loop that keeps issuing the RESERVE 25024 * for some period of time (min_ownership_delay, default 6 seconds) 25025 * During that loop, it looks to see if there has been a bus device 25026 * reset or bus reset (both of which cause an existing reservation 25027 * to be lost). If the reservation is lost issue RESERVE until a 25028 * period of min_ownership_delay with no resets has gone by, or 25029 * until max_ownership_delay has expired. This loop ensures that 25030 * the host really did manage to reserve the device, in spite of 25031 * resets. The looping for min_ownership_delay (default six 25032 * seconds) is important to early generation clustering products, 25033 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25034 * MHIOCENFAILFAST periodic timer of two seconds. By having 25035 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25036 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25037 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25038 * have already noticed, via the MHIOCENFAILFAST polling, that it 25039 * no longer "owns" the disk and will have panicked itself. Thus, 25040 * the host issuing the MHIOCTKOWN is assured (with timing 25041 * dependencies) that by the time it actually starts to use the 25042 * disk for real work, the old owner is no longer accessing it. 25043 * 25044 * min_ownership_delay is the minimum amount of time for which the 25045 * disk must be reserved continuously devoid of resets before the 25046 * MHIOCTKOWN ioctl will return success. 25047 * 25048 * max_ownership_delay indicates the amount of time by which the 25049 * take ownership should succeed or timeout with an error. 25050 * 25051 * Arguments: dev - the device 'dev_t' 25052 * *p - struct containing timing info. 25053 * 25054 * Return Code: 0 for success or error code 25055 */ 25056 25057 static int 25058 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25059 { 25060 struct sd_lun *un; 25061 int rval; 25062 int err; 25063 int reservation_count = 0; 25064 int min_ownership_delay = 6000000; /* in usec */ 25065 int max_ownership_delay = 30000000; /* in usec */ 25066 clock_t start_time; /* starting time of this algorithm */ 25067 clock_t end_time; /* time limit for giving up */ 25068 clock_t ownership_time; /* time limit for stable ownership */ 25069 clock_t current_time; 25070 clock_t previous_current_time; 25071 25072 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25073 return (ENXIO); 25074 } 25075 25076 /* 25077 * Attempt a device reservation. A priority reservation is requested. 25078 */ 25079 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25080 != SD_SUCCESS) { 25081 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25082 "sd_take_ownership: return(1)=%d\n", rval); 25083 return (rval); 25084 } 25085 25086 /* Update the softstate reserved status to indicate the reservation */ 25087 mutex_enter(SD_MUTEX(un)); 25088 un->un_resvd_status |= SD_RESERVE; 25089 un->un_resvd_status &= 25090 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25091 mutex_exit(SD_MUTEX(un)); 25092 25093 if (p != NULL) { 25094 if (p->min_ownership_delay != 0) { 25095 min_ownership_delay = p->min_ownership_delay * 1000; 25096 } 25097 if (p->max_ownership_delay != 0) { 25098 max_ownership_delay = p->max_ownership_delay * 1000; 25099 } 25100 } 25101 SD_INFO(SD_LOG_IOCTL_MHD, un, 25102 "sd_take_ownership: min, max delays: %d, %d\n", 25103 min_ownership_delay, max_ownership_delay); 25104 25105 start_time = ddi_get_lbolt(); 25106 current_time = start_time; 25107 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25108 end_time = start_time + drv_usectohz(max_ownership_delay); 25109 25110 while (current_time - end_time < 0) { 25111 delay(drv_usectohz(500000)); 25112 25113 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25114 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25115 mutex_enter(SD_MUTEX(un)); 25116 rval = (un->un_resvd_status & 25117 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25118 mutex_exit(SD_MUTEX(un)); 25119 break; 25120 } 25121 } 25122 previous_current_time = current_time; 25123 current_time = ddi_get_lbolt(); 25124 mutex_enter(SD_MUTEX(un)); 25125 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25126 ownership_time = ddi_get_lbolt() + 25127 drv_usectohz(min_ownership_delay); 25128 reservation_count = 0; 25129 } else { 25130 reservation_count++; 25131 } 25132 un->un_resvd_status |= SD_RESERVE; 25133 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25134 mutex_exit(SD_MUTEX(un)); 25135 25136 SD_INFO(SD_LOG_IOCTL_MHD, un, 25137 "sd_take_ownership: ticks for loop iteration=%ld, " 25138 "reservation=%s\n", (current_time - previous_current_time), 25139 reservation_count ? "ok" : "reclaimed"); 25140 25141 if (current_time - ownership_time >= 0 && 25142 reservation_count >= 4) { 25143 rval = 0; /* Achieved a stable ownership */ 25144 break; 25145 } 25146 if (current_time - end_time >= 0) { 25147 rval = EACCES; /* No ownership in max possible time */ 25148 break; 25149 } 25150 } 25151 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25152 "sd_take_ownership: return(2)=%d\n", rval); 25153 return (rval); 25154 } 25155 25156 25157 /* 25158 * Function: sd_reserve_release() 25159 * 25160 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25161 * PRIORITY RESERVE commands based on a user specified command type 25162 * 25163 * Arguments: dev - the device 'dev_t' 25164 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25165 * SD_RESERVE, SD_RELEASE 25166 * 25167 * Return Code: 0 or Error Code 25168 */ 25169 25170 static int 25171 sd_reserve_release(dev_t dev, int cmd) 25172 { 25173 struct uscsi_cmd *com = NULL; 25174 struct sd_lun *un = NULL; 25175 char cdb[CDB_GROUP0]; 25176 int rval; 25177 25178 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25179 (cmd == SD_PRIORITY_RESERVE)); 25180 25181 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25182 return (ENXIO); 25183 } 25184 25185 /* instantiate and initialize the command and cdb */ 25186 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25187 bzero(cdb, CDB_GROUP0); 25188 com->uscsi_flags = USCSI_SILENT; 25189 com->uscsi_timeout = un->un_reserve_release_time; 25190 com->uscsi_cdblen = CDB_GROUP0; 25191 com->uscsi_cdb = cdb; 25192 if (cmd == SD_RELEASE) { 25193 cdb[0] = SCMD_RELEASE; 25194 } else { 25195 cdb[0] = SCMD_RESERVE; 25196 } 25197 25198 /* Send the command. */ 25199 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25200 SD_PATH_STANDARD); 25201 25202 /* 25203 * "break" a reservation that is held by another host, by issuing a 25204 * reset if priority reserve is desired, and we could not get the 25205 * device. 25206 */ 25207 if ((cmd == SD_PRIORITY_RESERVE) && 25208 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25209 /* 25210 * First try to reset the LUN. If we cannot, then try a target 25211 * reset, followed by a bus reset if the target reset fails. 25212 */ 25213 int reset_retval = 0; 25214 if (un->un_f_lun_reset_enabled == TRUE) { 25215 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25216 } 25217 if (reset_retval == 0) { 25218 /* The LUN reset either failed or was not issued */ 25219 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25220 } 25221 if ((reset_retval == 0) && 25222 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25223 rval = EIO; 25224 kmem_free(com, sizeof (*com)); 25225 return (rval); 25226 } 25227 25228 bzero(com, sizeof (struct uscsi_cmd)); 25229 com->uscsi_flags = USCSI_SILENT; 25230 com->uscsi_cdb = cdb; 25231 com->uscsi_cdblen = CDB_GROUP0; 25232 com->uscsi_timeout = 5; 25233 25234 /* 25235 * Reissue the last reserve command, this time without request 25236 * sense. Assume that it is just a regular reserve command. 25237 */ 25238 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25239 SD_PATH_STANDARD); 25240 } 25241 25242 /* Return an error if still getting a reservation conflict. */ 25243 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25244 rval = EACCES; 25245 } 25246 25247 kmem_free(com, sizeof (*com)); 25248 return (rval); 25249 } 25250 25251 25252 #define SD_NDUMP_RETRIES 12 25253 /* 25254 * System Crash Dump routine 25255 */ 25256 25257 static int 25258 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25259 { 25260 int instance; 25261 int partition; 25262 int i; 25263 int err; 25264 struct sd_lun *un; 25265 struct scsi_pkt *wr_pktp; 25266 struct buf *wr_bp; 25267 struct buf wr_buf; 25268 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25269 daddr_t tgt_blkno; /* rmw - blkno for target */ 25270 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25271 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25272 size_t io_start_offset; 25273 int doing_rmw = FALSE; 25274 int rval; 25275 ssize_t dma_resid; 25276 daddr_t oblkno; 25277 diskaddr_t nblks = 0; 25278 diskaddr_t start_block; 25279 25280 instance = SDUNIT(dev); 25281 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25282 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25283 return (ENXIO); 25284 } 25285 25286 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25287 25288 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25289 25290 partition = SDPART(dev); 25291 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25292 25293 if (!(NOT_DEVBSIZE(un))) { 25294 int secmask = 0; 25295 int blknomask = 0; 25296 25297 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25298 secmask = un->un_tgt_blocksize - 1; 25299 25300 if (blkno & blknomask) { 25301 SD_TRACE(SD_LOG_DUMP, un, 25302 "sddump: dump start block not modulo %d\n", 25303 un->un_tgt_blocksize); 25304 return (EINVAL); 25305 } 25306 25307 if ((nblk * DEV_BSIZE) & secmask) { 25308 SD_TRACE(SD_LOG_DUMP, un, 25309 "sddump: dump length not modulo %d\n", 25310 un->un_tgt_blocksize); 25311 return (EINVAL); 25312 } 25313 25314 } 25315 25316 /* Validate blocks to dump at against partition size. */ 25317 25318 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25319 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25320 25321 if (NOT_DEVBSIZE(un)) { 25322 if ((blkno + nblk) > nblks) { 25323 SD_TRACE(SD_LOG_DUMP, un, 25324 "sddump: dump range larger than partition: " 25325 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25326 blkno, nblk, nblks); 25327 return (EINVAL); 25328 } 25329 } else { 25330 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25331 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25332 SD_TRACE(SD_LOG_DUMP, un, 25333 "sddump: dump range larger than partition: " 25334 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25335 blkno, nblk, nblks); 25336 return (EINVAL); 25337 } 25338 } 25339 25340 mutex_enter(&un->un_pm_mutex); 25341 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25342 struct scsi_pkt *start_pktp; 25343 25344 mutex_exit(&un->un_pm_mutex); 25345 25346 /* 25347 * use pm framework to power on HBA 1st 25348 */ 25349 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25350 25351 /* 25352 * Dump no long uses sdpower to power on a device, it's 25353 * in-line here so it can be done in polled mode. 25354 */ 25355 25356 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25357 25358 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25359 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25360 25361 if (start_pktp == NULL) { 25362 /* We were not given a SCSI packet, fail. */ 25363 return (EIO); 25364 } 25365 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25366 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25367 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25368 start_pktp->pkt_flags = FLAG_NOINTR; 25369 25370 mutex_enter(SD_MUTEX(un)); 25371 SD_FILL_SCSI1_LUN(un, start_pktp); 25372 mutex_exit(SD_MUTEX(un)); 25373 /* 25374 * Scsi_poll returns 0 (success) if the command completes and 25375 * the status block is STATUS_GOOD. 25376 */ 25377 if (sd_scsi_poll(un, start_pktp) != 0) { 25378 scsi_destroy_pkt(start_pktp); 25379 return (EIO); 25380 } 25381 scsi_destroy_pkt(start_pktp); 25382 (void) sd_ddi_pm_resume(un); 25383 } else { 25384 mutex_exit(&un->un_pm_mutex); 25385 } 25386 25387 mutex_enter(SD_MUTEX(un)); 25388 un->un_throttle = 0; 25389 25390 /* 25391 * The first time through, reset the specific target device. 25392 * However, when cpr calls sddump we know that sd is in a 25393 * a good state so no bus reset is required. 25394 * Clear sense data via Request Sense cmd. 25395 * In sddump we don't care about allow_bus_device_reset anymore 25396 */ 25397 25398 if ((un->un_state != SD_STATE_SUSPENDED) && 25399 (un->un_state != SD_STATE_DUMPING)) { 25400 25401 New_state(un, SD_STATE_DUMPING); 25402 25403 if (un->un_f_is_fibre == FALSE) { 25404 mutex_exit(SD_MUTEX(un)); 25405 /* 25406 * Attempt a bus reset for parallel scsi. 25407 * 25408 * Note: A bus reset is required because on some host 25409 * systems (i.e. E420R) a bus device reset is 25410 * insufficient to reset the state of the target. 25411 * 25412 * Note: Don't issue the reset for fibre-channel, 25413 * because this tends to hang the bus (loop) for 25414 * too long while everyone is logging out and in 25415 * and the deadman timer for dumping will fire 25416 * before the dump is complete. 25417 */ 25418 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25419 mutex_enter(SD_MUTEX(un)); 25420 Restore_state(un); 25421 mutex_exit(SD_MUTEX(un)); 25422 return (EIO); 25423 } 25424 25425 /* Delay to give the device some recovery time. */ 25426 drv_usecwait(10000); 25427 25428 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25429 SD_INFO(SD_LOG_DUMP, un, 25430 "sddump: sd_send_polled_RQS failed\n"); 25431 } 25432 mutex_enter(SD_MUTEX(un)); 25433 } 25434 } 25435 25436 /* 25437 * Convert the partition-relative block number to a 25438 * disk physical block number. 25439 */ 25440 if (NOT_DEVBSIZE(un)) { 25441 blkno += start_block; 25442 } else { 25443 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25444 blkno += start_block; 25445 } 25446 25447 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25448 25449 25450 /* 25451 * Check if the device has a non-512 block size. 25452 */ 25453 wr_bp = NULL; 25454 if (NOT_DEVBSIZE(un)) { 25455 tgt_byte_offset = blkno * un->un_sys_blocksize; 25456 tgt_byte_count = nblk * un->un_sys_blocksize; 25457 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25458 (tgt_byte_count % un->un_tgt_blocksize)) { 25459 doing_rmw = TRUE; 25460 /* 25461 * Calculate the block number and number of block 25462 * in terms of the media block size. 25463 */ 25464 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25465 tgt_nblk = 25466 ((tgt_byte_offset + tgt_byte_count + 25467 (un->un_tgt_blocksize - 1)) / 25468 un->un_tgt_blocksize) - tgt_blkno; 25469 25470 /* 25471 * Invoke the routine which is going to do read part 25472 * of read-modify-write. 25473 * Note that this routine returns a pointer to 25474 * a valid bp in wr_bp. 25475 */ 25476 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25477 &wr_bp); 25478 if (err) { 25479 mutex_exit(SD_MUTEX(un)); 25480 return (err); 25481 } 25482 /* 25483 * Offset is being calculated as - 25484 * (original block # * system block size) - 25485 * (new block # * target block size) 25486 */ 25487 io_start_offset = 25488 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25489 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25490 25491 ASSERT((io_start_offset >= 0) && 25492 (io_start_offset < un->un_tgt_blocksize)); 25493 /* 25494 * Do the modify portion of read modify write. 25495 */ 25496 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25497 (size_t)nblk * un->un_sys_blocksize); 25498 } else { 25499 doing_rmw = FALSE; 25500 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25501 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25502 } 25503 25504 /* Convert blkno and nblk to target blocks */ 25505 blkno = tgt_blkno; 25506 nblk = tgt_nblk; 25507 } else { 25508 wr_bp = &wr_buf; 25509 bzero(wr_bp, sizeof (struct buf)); 25510 wr_bp->b_flags = B_BUSY; 25511 wr_bp->b_un.b_addr = addr; 25512 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25513 wr_bp->b_resid = 0; 25514 } 25515 25516 mutex_exit(SD_MUTEX(un)); 25517 25518 /* 25519 * Obtain a SCSI packet for the write command. 25520 * It should be safe to call the allocator here without 25521 * worrying about being locked for DVMA mapping because 25522 * the address we're passed is already a DVMA mapping 25523 * 25524 * We are also not going to worry about semaphore ownership 25525 * in the dump buffer. Dumping is single threaded at present. 25526 */ 25527 25528 wr_pktp = NULL; 25529 25530 dma_resid = wr_bp->b_bcount; 25531 oblkno = blkno; 25532 25533 if (!(NOT_DEVBSIZE(un))) { 25534 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 25535 } 25536 25537 while (dma_resid != 0) { 25538 25539 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25540 wr_bp->b_flags &= ~B_ERROR; 25541 25542 if (un->un_partial_dma_supported == 1) { 25543 blkno = oblkno + 25544 ((wr_bp->b_bcount - dma_resid) / 25545 un->un_tgt_blocksize); 25546 nblk = dma_resid / un->un_tgt_blocksize; 25547 25548 if (wr_pktp) { 25549 /* 25550 * Partial DMA transfers after initial transfer 25551 */ 25552 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25553 blkno, nblk); 25554 } else { 25555 /* Initial transfer */ 25556 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25557 un->un_pkt_flags, NULL_FUNC, NULL, 25558 blkno, nblk); 25559 } 25560 } else { 25561 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25562 0, NULL_FUNC, NULL, blkno, nblk); 25563 } 25564 25565 if (rval == 0) { 25566 /* We were given a SCSI packet, continue. */ 25567 break; 25568 } 25569 25570 if (i == 0) { 25571 if (wr_bp->b_flags & B_ERROR) { 25572 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25573 "no resources for dumping; " 25574 "error code: 0x%x, retrying", 25575 geterror(wr_bp)); 25576 } else { 25577 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25578 "no resources for dumping; retrying"); 25579 } 25580 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25581 if (wr_bp->b_flags & B_ERROR) { 25582 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25583 "no resources for dumping; error code: " 25584 "0x%x, retrying\n", geterror(wr_bp)); 25585 } 25586 } else { 25587 if (wr_bp->b_flags & B_ERROR) { 25588 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25589 "no resources for dumping; " 25590 "error code: 0x%x, retries failed, " 25591 "giving up.\n", geterror(wr_bp)); 25592 } else { 25593 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25594 "no resources for dumping; " 25595 "retries failed, giving up.\n"); 25596 } 25597 mutex_enter(SD_MUTEX(un)); 25598 Restore_state(un); 25599 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25600 mutex_exit(SD_MUTEX(un)); 25601 scsi_free_consistent_buf(wr_bp); 25602 } else { 25603 mutex_exit(SD_MUTEX(un)); 25604 } 25605 return (EIO); 25606 } 25607 drv_usecwait(10000); 25608 } 25609 25610 if (un->un_partial_dma_supported == 1) { 25611 /* 25612 * save the resid from PARTIAL_DMA 25613 */ 25614 dma_resid = wr_pktp->pkt_resid; 25615 if (dma_resid != 0) 25616 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25617 wr_pktp->pkt_resid = 0; 25618 } else { 25619 dma_resid = 0; 25620 } 25621 25622 /* SunBug 1222170 */ 25623 wr_pktp->pkt_flags = FLAG_NOINTR; 25624 25625 err = EIO; 25626 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25627 25628 /* 25629 * Scsi_poll returns 0 (success) if the command completes and 25630 * the status block is STATUS_GOOD. We should only check 25631 * errors if this condition is not true. Even then we should 25632 * send our own request sense packet only if we have a check 25633 * condition and auto request sense has not been performed by 25634 * the hba. 25635 */ 25636 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25637 25638 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25639 (wr_pktp->pkt_resid == 0)) { 25640 err = SD_SUCCESS; 25641 break; 25642 } 25643 25644 /* 25645 * Check CMD_DEV_GONE 1st, give up if device is gone. 25646 */ 25647 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25648 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25649 "Error while dumping state...Device is gone\n"); 25650 break; 25651 } 25652 25653 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25654 SD_INFO(SD_LOG_DUMP, un, 25655 "sddump: write failed with CHECK, try # %d\n", i); 25656 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25657 (void) sd_send_polled_RQS(un); 25658 } 25659 25660 continue; 25661 } 25662 25663 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25664 int reset_retval = 0; 25665 25666 SD_INFO(SD_LOG_DUMP, un, 25667 "sddump: write failed with BUSY, try # %d\n", i); 25668 25669 if (un->un_f_lun_reset_enabled == TRUE) { 25670 reset_retval = scsi_reset(SD_ADDRESS(un), 25671 RESET_LUN); 25672 } 25673 if (reset_retval == 0) { 25674 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25675 } 25676 (void) sd_send_polled_RQS(un); 25677 25678 } else { 25679 SD_INFO(SD_LOG_DUMP, un, 25680 "sddump: write failed with 0x%x, try # %d\n", 25681 SD_GET_PKT_STATUS(wr_pktp), i); 25682 mutex_enter(SD_MUTEX(un)); 25683 sd_reset_target(un, wr_pktp); 25684 mutex_exit(SD_MUTEX(un)); 25685 } 25686 25687 /* 25688 * If we are not getting anywhere with lun/target resets, 25689 * let's reset the bus. 25690 */ 25691 if (i == SD_NDUMP_RETRIES/2) { 25692 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25693 (void) sd_send_polled_RQS(un); 25694 } 25695 } 25696 } 25697 25698 scsi_destroy_pkt(wr_pktp); 25699 mutex_enter(SD_MUTEX(un)); 25700 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25701 mutex_exit(SD_MUTEX(un)); 25702 scsi_free_consistent_buf(wr_bp); 25703 } else { 25704 mutex_exit(SD_MUTEX(un)); 25705 } 25706 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25707 return (err); 25708 } 25709 25710 /* 25711 * Function: sd_scsi_poll() 25712 * 25713 * Description: This is a wrapper for the scsi_poll call. 25714 * 25715 * Arguments: sd_lun - The unit structure 25716 * scsi_pkt - The scsi packet being sent to the device. 25717 * 25718 * Return Code: 0 - Command completed successfully with good status 25719 * -1 - Command failed. This could indicate a check condition 25720 * or other status value requiring recovery action. 25721 * 25722 * NOTE: This code is only called off sddump(). 25723 */ 25724 25725 static int 25726 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25727 { 25728 int status; 25729 25730 ASSERT(un != NULL); 25731 ASSERT(!mutex_owned(SD_MUTEX(un))); 25732 ASSERT(pktp != NULL); 25733 25734 status = SD_SUCCESS; 25735 25736 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25737 pktp->pkt_flags |= un->un_tagflags; 25738 pktp->pkt_flags &= ~FLAG_NODISCON; 25739 } 25740 25741 status = sd_ddi_scsi_poll(pktp); 25742 /* 25743 * Scsi_poll returns 0 (success) if the command completes and the 25744 * status block is STATUS_GOOD. We should only check errors if this 25745 * condition is not true. Even then we should send our own request 25746 * sense packet only if we have a check condition and auto 25747 * request sense has not been performed by the hba. 25748 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25749 */ 25750 if ((status != SD_SUCCESS) && 25751 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25752 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25753 (pktp->pkt_reason != CMD_DEV_GONE)) 25754 (void) sd_send_polled_RQS(un); 25755 25756 return (status); 25757 } 25758 25759 /* 25760 * Function: sd_send_polled_RQS() 25761 * 25762 * Description: This sends the request sense command to a device. 25763 * 25764 * Arguments: sd_lun - The unit structure 25765 * 25766 * Return Code: 0 - Command completed successfully with good status 25767 * -1 - Command failed. 25768 * 25769 */ 25770 25771 static int 25772 sd_send_polled_RQS(struct sd_lun *un) 25773 { 25774 int ret_val; 25775 struct scsi_pkt *rqs_pktp; 25776 struct buf *rqs_bp; 25777 25778 ASSERT(un != NULL); 25779 ASSERT(!mutex_owned(SD_MUTEX(un))); 25780 25781 ret_val = SD_SUCCESS; 25782 25783 rqs_pktp = un->un_rqs_pktp; 25784 rqs_bp = un->un_rqs_bp; 25785 25786 mutex_enter(SD_MUTEX(un)); 25787 25788 if (un->un_sense_isbusy) { 25789 ret_val = SD_FAILURE; 25790 mutex_exit(SD_MUTEX(un)); 25791 return (ret_val); 25792 } 25793 25794 /* 25795 * If the request sense buffer (and packet) is not in use, 25796 * let's set the un_sense_isbusy and send our packet 25797 */ 25798 un->un_sense_isbusy = 1; 25799 rqs_pktp->pkt_resid = 0; 25800 rqs_pktp->pkt_reason = 0; 25801 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25802 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25803 25804 mutex_exit(SD_MUTEX(un)); 25805 25806 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25807 " 0x%p\n", rqs_bp->b_un.b_addr); 25808 25809 /* 25810 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25811 * axle - it has a call into us! 25812 */ 25813 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25814 SD_INFO(SD_LOG_COMMON, un, 25815 "sd_send_polled_RQS: RQS failed\n"); 25816 } 25817 25818 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25819 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25820 25821 mutex_enter(SD_MUTEX(un)); 25822 un->un_sense_isbusy = 0; 25823 mutex_exit(SD_MUTEX(un)); 25824 25825 return (ret_val); 25826 } 25827 25828 /* 25829 * Defines needed for localized version of the scsi_poll routine. 25830 */ 25831 #define CSEC 10000 /* usecs */ 25832 #define SEC_TO_CSEC (1000000/CSEC) 25833 25834 /* 25835 * Function: sd_ddi_scsi_poll() 25836 * 25837 * Description: Localized version of the scsi_poll routine. The purpose is to 25838 * send a scsi_pkt to a device as a polled command. This version 25839 * is to ensure more robust handling of transport errors. 25840 * Specifically this routine cures not ready, coming ready 25841 * transition for power up and reset of sonoma's. This can take 25842 * up to 45 seconds for power-on and 20 seconds for reset of a 25843 * sonoma lun. 25844 * 25845 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25846 * 25847 * Return Code: 0 - Command completed successfully with good status 25848 * -1 - Command failed. 25849 * 25850 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25851 * be fixed (removing this code), we need to determine how to handle the 25852 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25853 * 25854 * NOTE: This code is only called off sddump(). 25855 */ 25856 static int 25857 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25858 { 25859 int rval = -1; 25860 int savef; 25861 long savet; 25862 void (*savec)(); 25863 int timeout; 25864 int busy_count; 25865 int poll_delay; 25866 int rc; 25867 uint8_t *sensep; 25868 struct scsi_arq_status *arqstat; 25869 extern int do_polled_io; 25870 25871 ASSERT(pkt->pkt_scbp); 25872 25873 /* 25874 * save old flags.. 25875 */ 25876 savef = pkt->pkt_flags; 25877 savec = pkt->pkt_comp; 25878 savet = pkt->pkt_time; 25879 25880 pkt->pkt_flags |= FLAG_NOINTR; 25881 25882 /* 25883 * XXX there is nothing in the SCSA spec that states that we should not 25884 * do a callback for polled cmds; however, removing this will break sd 25885 * and probably other target drivers 25886 */ 25887 pkt->pkt_comp = NULL; 25888 25889 /* 25890 * we don't like a polled command without timeout. 25891 * 60 seconds seems long enough. 25892 */ 25893 if (pkt->pkt_time == 0) 25894 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25895 25896 /* 25897 * Send polled cmd. 25898 * 25899 * We do some error recovery for various errors. Tran_busy, 25900 * queue full, and non-dispatched commands are retried every 10 msec. 25901 * as they are typically transient failures. Busy status and Not 25902 * Ready are retried every second as this status takes a while to 25903 * change. 25904 */ 25905 timeout = pkt->pkt_time * SEC_TO_CSEC; 25906 25907 for (busy_count = 0; busy_count < timeout; busy_count++) { 25908 /* 25909 * Initialize pkt status variables. 25910 */ 25911 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25912 25913 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25914 if (rc != TRAN_BUSY) { 25915 /* Transport failed - give up. */ 25916 break; 25917 } else { 25918 /* Transport busy - try again. */ 25919 poll_delay = 1 * CSEC; /* 10 msec. */ 25920 } 25921 } else { 25922 /* 25923 * Transport accepted - check pkt status. 25924 */ 25925 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25926 if ((pkt->pkt_reason == CMD_CMPLT) && 25927 (rc == STATUS_CHECK) && 25928 (pkt->pkt_state & STATE_ARQ_DONE)) { 25929 arqstat = 25930 (struct scsi_arq_status *)(pkt->pkt_scbp); 25931 sensep = (uint8_t *)&arqstat->sts_sensedata; 25932 } else { 25933 sensep = NULL; 25934 } 25935 25936 if ((pkt->pkt_reason == CMD_CMPLT) && 25937 (rc == STATUS_GOOD)) { 25938 /* No error - we're done */ 25939 rval = 0; 25940 break; 25941 25942 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25943 /* Lost connection - give up */ 25944 break; 25945 25946 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25947 (pkt->pkt_state == 0)) { 25948 /* Pkt not dispatched - try again. */ 25949 poll_delay = 1 * CSEC; /* 10 msec. */ 25950 25951 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25952 (rc == STATUS_QFULL)) { 25953 /* Queue full - try again. */ 25954 poll_delay = 1 * CSEC; /* 10 msec. */ 25955 25956 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25957 (rc == STATUS_BUSY)) { 25958 /* Busy - try again. */ 25959 poll_delay = 100 * CSEC; /* 1 sec. */ 25960 busy_count += (SEC_TO_CSEC - 1); 25961 25962 } else if ((sensep != NULL) && 25963 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25964 /* 25965 * Unit Attention - try again. 25966 * Pretend it took 1 sec. 25967 * NOTE: 'continue' avoids poll_delay 25968 */ 25969 busy_count += (SEC_TO_CSEC - 1); 25970 continue; 25971 25972 } else if ((sensep != NULL) && 25973 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25974 (scsi_sense_asc(sensep) == 0x04) && 25975 (scsi_sense_ascq(sensep) == 0x01)) { 25976 /* 25977 * Not ready -> ready - try again. 25978 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25979 * ...same as STATUS_BUSY 25980 */ 25981 poll_delay = 100 * CSEC; /* 1 sec. */ 25982 busy_count += (SEC_TO_CSEC - 1); 25983 25984 } else { 25985 /* BAD status - give up. */ 25986 break; 25987 } 25988 } 25989 25990 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25991 !do_polled_io) { 25992 delay(drv_usectohz(poll_delay)); 25993 } else { 25994 /* we busy wait during cpr_dump or interrupt threads */ 25995 drv_usecwait(poll_delay); 25996 } 25997 } 25998 25999 pkt->pkt_flags = savef; 26000 pkt->pkt_comp = savec; 26001 pkt->pkt_time = savet; 26002 26003 /* return on error */ 26004 if (rval) 26005 return (rval); 26006 26007 /* 26008 * This is not a performance critical code path. 26009 * 26010 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26011 * issues associated with looking at DMA memory prior to 26012 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26013 */ 26014 scsi_sync_pkt(pkt); 26015 return (0); 26016 } 26017 26018 26019 26020 /* 26021 * Function: sd_persistent_reservation_in_read_keys 26022 * 26023 * Description: This routine is the driver entry point for handling CD-ROM 26024 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26025 * by sending the SCSI-3 PRIN commands to the device. 26026 * Processes the read keys command response by copying the 26027 * reservation key information into the user provided buffer. 26028 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26029 * 26030 * Arguments: un - Pointer to soft state struct for the target. 26031 * usrp - user provided pointer to multihost Persistent In Read 26032 * Keys structure (mhioc_inkeys_t) 26033 * flag - this argument is a pass through to ddi_copyxxx() 26034 * directly from the mode argument of ioctl(). 26035 * 26036 * Return Code: 0 - Success 26037 * EACCES 26038 * ENOTSUP 26039 * errno return code from sd_send_scsi_cmd() 26040 * 26041 * Context: Can sleep. Does not return until command is completed. 26042 */ 26043 26044 static int 26045 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26046 mhioc_inkeys_t *usrp, int flag) 26047 { 26048 #ifdef _MULTI_DATAMODEL 26049 struct mhioc_key_list32 li32; 26050 #endif 26051 sd_prin_readkeys_t *in; 26052 mhioc_inkeys_t *ptr; 26053 mhioc_key_list_t li; 26054 uchar_t *data_bufp; 26055 int data_len; 26056 int rval = 0; 26057 size_t copysz; 26058 sd_ssc_t *ssc; 26059 26060 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26061 return (EINVAL); 26062 } 26063 bzero(&li, sizeof (mhioc_key_list_t)); 26064 26065 ssc = sd_ssc_init(un); 26066 26067 /* 26068 * Get the listsize from user 26069 */ 26070 #ifdef _MULTI_DATAMODEL 26071 26072 switch (ddi_model_convert_from(flag & FMODELS)) { 26073 case DDI_MODEL_ILP32: 26074 copysz = sizeof (struct mhioc_key_list32); 26075 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26076 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26077 "sd_persistent_reservation_in_read_keys: " 26078 "failed ddi_copyin: mhioc_key_list32_t\n"); 26079 rval = EFAULT; 26080 goto done; 26081 } 26082 li.listsize = li32.listsize; 26083 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26084 break; 26085 26086 case DDI_MODEL_NONE: 26087 copysz = sizeof (mhioc_key_list_t); 26088 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26089 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26090 "sd_persistent_reservation_in_read_keys: " 26091 "failed ddi_copyin: mhioc_key_list_t\n"); 26092 rval = EFAULT; 26093 goto done; 26094 } 26095 break; 26096 } 26097 26098 #else /* ! _MULTI_DATAMODEL */ 26099 copysz = sizeof (mhioc_key_list_t); 26100 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26101 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26102 "sd_persistent_reservation_in_read_keys: " 26103 "failed ddi_copyin: mhioc_key_list_t\n"); 26104 rval = EFAULT; 26105 goto done; 26106 } 26107 #endif 26108 26109 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26110 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26111 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26112 26113 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26114 data_len, data_bufp); 26115 if (rval != 0) { 26116 if (rval == EIO) 26117 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26118 else 26119 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26120 goto done; 26121 } 26122 in = (sd_prin_readkeys_t *)data_bufp; 26123 ptr->generation = BE_32(in->generation); 26124 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26125 26126 /* 26127 * Return the min(listsize, listlen) keys 26128 */ 26129 #ifdef _MULTI_DATAMODEL 26130 26131 switch (ddi_model_convert_from(flag & FMODELS)) { 26132 case DDI_MODEL_ILP32: 26133 li32.listlen = li.listlen; 26134 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26135 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26136 "sd_persistent_reservation_in_read_keys: " 26137 "failed ddi_copyout: mhioc_key_list32_t\n"); 26138 rval = EFAULT; 26139 goto done; 26140 } 26141 break; 26142 26143 case DDI_MODEL_NONE: 26144 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26145 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26146 "sd_persistent_reservation_in_read_keys: " 26147 "failed ddi_copyout: mhioc_key_list_t\n"); 26148 rval = EFAULT; 26149 goto done; 26150 } 26151 break; 26152 } 26153 26154 #else /* ! _MULTI_DATAMODEL */ 26155 26156 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26157 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26158 "sd_persistent_reservation_in_read_keys: " 26159 "failed ddi_copyout: mhioc_key_list_t\n"); 26160 rval = EFAULT; 26161 goto done; 26162 } 26163 26164 #endif /* _MULTI_DATAMODEL */ 26165 26166 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26167 li.listsize * MHIOC_RESV_KEY_SIZE); 26168 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26169 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26170 "sd_persistent_reservation_in_read_keys: " 26171 "failed ddi_copyout: keylist\n"); 26172 rval = EFAULT; 26173 } 26174 done: 26175 sd_ssc_fini(ssc); 26176 kmem_free(data_bufp, data_len); 26177 return (rval); 26178 } 26179 26180 26181 /* 26182 * Function: sd_persistent_reservation_in_read_resv 26183 * 26184 * Description: This routine is the driver entry point for handling CD-ROM 26185 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26186 * by sending the SCSI-3 PRIN commands to the device. 26187 * Process the read persistent reservations command response by 26188 * copying the reservation information into the user provided 26189 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26190 * 26191 * Arguments: un - Pointer to soft state struct for the target. 26192 * usrp - user provided pointer to multihost Persistent In Read 26193 * Keys structure (mhioc_inkeys_t) 26194 * flag - this argument is a pass through to ddi_copyxxx() 26195 * directly from the mode argument of ioctl(). 26196 * 26197 * Return Code: 0 - Success 26198 * EACCES 26199 * ENOTSUP 26200 * errno return code from sd_send_scsi_cmd() 26201 * 26202 * Context: Can sleep. Does not return until command is completed. 26203 */ 26204 26205 static int 26206 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26207 mhioc_inresvs_t *usrp, int flag) 26208 { 26209 #ifdef _MULTI_DATAMODEL 26210 struct mhioc_resv_desc_list32 resvlist32; 26211 #endif 26212 sd_prin_readresv_t *in; 26213 mhioc_inresvs_t *ptr; 26214 sd_readresv_desc_t *readresv_ptr; 26215 mhioc_resv_desc_list_t resvlist; 26216 mhioc_resv_desc_t resvdesc; 26217 uchar_t *data_bufp = NULL; 26218 int data_len; 26219 int rval = 0; 26220 int i; 26221 size_t copysz; 26222 mhioc_resv_desc_t *bufp; 26223 sd_ssc_t *ssc; 26224 26225 if ((ptr = usrp) == NULL) { 26226 return (EINVAL); 26227 } 26228 26229 ssc = sd_ssc_init(un); 26230 26231 /* 26232 * Get the listsize from user 26233 */ 26234 #ifdef _MULTI_DATAMODEL 26235 switch (ddi_model_convert_from(flag & FMODELS)) { 26236 case DDI_MODEL_ILP32: 26237 copysz = sizeof (struct mhioc_resv_desc_list32); 26238 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26239 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26240 "sd_persistent_reservation_in_read_resv: " 26241 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26242 rval = EFAULT; 26243 goto done; 26244 } 26245 resvlist.listsize = resvlist32.listsize; 26246 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26247 break; 26248 26249 case DDI_MODEL_NONE: 26250 copysz = sizeof (mhioc_resv_desc_list_t); 26251 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26252 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26253 "sd_persistent_reservation_in_read_resv: " 26254 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26255 rval = EFAULT; 26256 goto done; 26257 } 26258 break; 26259 } 26260 #else /* ! _MULTI_DATAMODEL */ 26261 copysz = sizeof (mhioc_resv_desc_list_t); 26262 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26263 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26264 "sd_persistent_reservation_in_read_resv: " 26265 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26266 rval = EFAULT; 26267 goto done; 26268 } 26269 #endif /* ! _MULTI_DATAMODEL */ 26270 26271 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26272 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26273 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26274 26275 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26276 data_len, data_bufp); 26277 if (rval != 0) { 26278 if (rval == EIO) 26279 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26280 else 26281 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26282 goto done; 26283 } 26284 in = (sd_prin_readresv_t *)data_bufp; 26285 ptr->generation = BE_32(in->generation); 26286 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26287 26288 /* 26289 * Return the min(listsize, listlen( keys 26290 */ 26291 #ifdef _MULTI_DATAMODEL 26292 26293 switch (ddi_model_convert_from(flag & FMODELS)) { 26294 case DDI_MODEL_ILP32: 26295 resvlist32.listlen = resvlist.listlen; 26296 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26297 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26298 "sd_persistent_reservation_in_read_resv: " 26299 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26300 rval = EFAULT; 26301 goto done; 26302 } 26303 break; 26304 26305 case DDI_MODEL_NONE: 26306 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26307 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26308 "sd_persistent_reservation_in_read_resv: " 26309 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26310 rval = EFAULT; 26311 goto done; 26312 } 26313 break; 26314 } 26315 26316 #else /* ! _MULTI_DATAMODEL */ 26317 26318 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26319 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26320 "sd_persistent_reservation_in_read_resv: " 26321 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26322 rval = EFAULT; 26323 goto done; 26324 } 26325 26326 #endif /* ! _MULTI_DATAMODEL */ 26327 26328 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26329 bufp = resvlist.list; 26330 copysz = sizeof (mhioc_resv_desc_t); 26331 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26332 i++, readresv_ptr++, bufp++) { 26333 26334 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26335 MHIOC_RESV_KEY_SIZE); 26336 resvdesc.type = readresv_ptr->type; 26337 resvdesc.scope = readresv_ptr->scope; 26338 resvdesc.scope_specific_addr = 26339 BE_32(readresv_ptr->scope_specific_addr); 26340 26341 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26342 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26343 "sd_persistent_reservation_in_read_resv: " 26344 "failed ddi_copyout: resvlist\n"); 26345 rval = EFAULT; 26346 goto done; 26347 } 26348 } 26349 done: 26350 sd_ssc_fini(ssc); 26351 /* only if data_bufp is allocated, we need to free it */ 26352 if (data_bufp) { 26353 kmem_free(data_bufp, data_len); 26354 } 26355 return (rval); 26356 } 26357 26358 26359 /* 26360 * Function: sr_change_blkmode() 26361 * 26362 * Description: This routine is the driver entry point for handling CD-ROM 26363 * block mode ioctl requests. Support for returning and changing 26364 * the current block size in use by the device is implemented. The 26365 * LBA size is changed via a MODE SELECT Block Descriptor. 26366 * 26367 * This routine issues a mode sense with an allocation length of 26368 * 12 bytes for the mode page header and a single block descriptor. 26369 * 26370 * Arguments: dev - the device 'dev_t' 26371 * cmd - the request type; one of CDROMGBLKMODE (get) or 26372 * CDROMSBLKMODE (set) 26373 * data - current block size or requested block size 26374 * flag - this argument is a pass through to ddi_copyxxx() directly 26375 * from the mode argument of ioctl(). 26376 * 26377 * Return Code: the code returned by sd_send_scsi_cmd() 26378 * EINVAL if invalid arguments are provided 26379 * EFAULT if ddi_copyxxx() fails 26380 * ENXIO if fail ddi_get_soft_state 26381 * EIO if invalid mode sense block descriptor length 26382 * 26383 */ 26384 26385 static int 26386 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26387 { 26388 struct sd_lun *un = NULL; 26389 struct mode_header *sense_mhp, *select_mhp; 26390 struct block_descriptor *sense_desc, *select_desc; 26391 int current_bsize; 26392 int rval = EINVAL; 26393 uchar_t *sense = NULL; 26394 uchar_t *select = NULL; 26395 sd_ssc_t *ssc; 26396 26397 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26398 26399 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26400 return (ENXIO); 26401 } 26402 26403 /* 26404 * The block length is changed via the Mode Select block descriptor, the 26405 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26406 * required as part of this routine. Therefore the mode sense allocation 26407 * length is specified to be the length of a mode page header and a 26408 * block descriptor. 26409 */ 26410 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26411 26412 ssc = sd_ssc_init(un); 26413 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26414 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26415 sd_ssc_fini(ssc); 26416 if (rval != 0) { 26417 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26418 "sr_change_blkmode: Mode Sense Failed\n"); 26419 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26420 return (rval); 26421 } 26422 26423 /* Check the block descriptor len to handle only 1 block descriptor */ 26424 sense_mhp = (struct mode_header *)sense; 26425 if ((sense_mhp->bdesc_length == 0) || 26426 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26427 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26428 "sr_change_blkmode: Mode Sense returned invalid block" 26429 " descriptor length\n"); 26430 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26431 return (EIO); 26432 } 26433 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26434 current_bsize = ((sense_desc->blksize_hi << 16) | 26435 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26436 26437 /* Process command */ 26438 switch (cmd) { 26439 case CDROMGBLKMODE: 26440 /* Return the block size obtained during the mode sense */ 26441 if (ddi_copyout(¤t_bsize, (void *)data, 26442 sizeof (int), flag) != 0) 26443 rval = EFAULT; 26444 break; 26445 case CDROMSBLKMODE: 26446 /* Validate the requested block size */ 26447 switch (data) { 26448 case CDROM_BLK_512: 26449 case CDROM_BLK_1024: 26450 case CDROM_BLK_2048: 26451 case CDROM_BLK_2056: 26452 case CDROM_BLK_2336: 26453 case CDROM_BLK_2340: 26454 case CDROM_BLK_2352: 26455 case CDROM_BLK_2368: 26456 case CDROM_BLK_2448: 26457 case CDROM_BLK_2646: 26458 case CDROM_BLK_2647: 26459 break; 26460 default: 26461 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26462 "sr_change_blkmode: " 26463 "Block Size '%ld' Not Supported\n", data); 26464 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26465 return (EINVAL); 26466 } 26467 26468 /* 26469 * The current block size matches the requested block size so 26470 * there is no need to send the mode select to change the size 26471 */ 26472 if (current_bsize == data) { 26473 break; 26474 } 26475 26476 /* Build the select data for the requested block size */ 26477 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26478 select_mhp = (struct mode_header *)select; 26479 select_desc = 26480 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26481 /* 26482 * The LBA size is changed via the block descriptor, so the 26483 * descriptor is built according to the user data 26484 */ 26485 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26486 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26487 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26488 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26489 26490 /* Send the mode select for the requested block size */ 26491 ssc = sd_ssc_init(un); 26492 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26493 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26494 SD_PATH_STANDARD); 26495 sd_ssc_fini(ssc); 26496 if (rval != 0) { 26497 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26498 "sr_change_blkmode: Mode Select Failed\n"); 26499 /* 26500 * The mode select failed for the requested block size, 26501 * so reset the data for the original block size and 26502 * send it to the target. The error is indicated by the 26503 * return value for the failed mode select. 26504 */ 26505 select_desc->blksize_hi = sense_desc->blksize_hi; 26506 select_desc->blksize_mid = sense_desc->blksize_mid; 26507 select_desc->blksize_lo = sense_desc->blksize_lo; 26508 ssc = sd_ssc_init(un); 26509 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26510 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26511 SD_PATH_STANDARD); 26512 sd_ssc_fini(ssc); 26513 } else { 26514 ASSERT(!mutex_owned(SD_MUTEX(un))); 26515 mutex_enter(SD_MUTEX(un)); 26516 sd_update_block_info(un, (uint32_t)data, 0); 26517 mutex_exit(SD_MUTEX(un)); 26518 } 26519 break; 26520 default: 26521 /* should not reach here, but check anyway */ 26522 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26523 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26524 rval = EINVAL; 26525 break; 26526 } 26527 26528 if (select) { 26529 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26530 } 26531 if (sense) { 26532 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26533 } 26534 return (rval); 26535 } 26536 26537 26538 /* 26539 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26540 * implement driver support for getting and setting the CD speed. The command 26541 * set used will be based on the device type. If the device has not been 26542 * identified as MMC the Toshiba vendor specific mode page will be used. If 26543 * the device is MMC but does not support the Real Time Streaming feature 26544 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26545 * be used to read the speed. 26546 */ 26547 26548 /* 26549 * Function: sr_change_speed() 26550 * 26551 * Description: This routine is the driver entry point for handling CD-ROM 26552 * drive speed ioctl requests for devices supporting the Toshiba 26553 * vendor specific drive speed mode page. Support for returning 26554 * and changing the current drive speed in use by the device is 26555 * implemented. 26556 * 26557 * Arguments: dev - the device 'dev_t' 26558 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26559 * CDROMSDRVSPEED (set) 26560 * data - current drive speed or requested drive speed 26561 * flag - this argument is a pass through to ddi_copyxxx() directly 26562 * from the mode argument of ioctl(). 26563 * 26564 * Return Code: the code returned by sd_send_scsi_cmd() 26565 * EINVAL if invalid arguments are provided 26566 * EFAULT if ddi_copyxxx() fails 26567 * ENXIO if fail ddi_get_soft_state 26568 * EIO if invalid mode sense block descriptor length 26569 */ 26570 26571 static int 26572 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26573 { 26574 struct sd_lun *un = NULL; 26575 struct mode_header *sense_mhp, *select_mhp; 26576 struct mode_speed *sense_page, *select_page; 26577 int current_speed; 26578 int rval = EINVAL; 26579 int bd_len; 26580 uchar_t *sense = NULL; 26581 uchar_t *select = NULL; 26582 sd_ssc_t *ssc; 26583 26584 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26585 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26586 return (ENXIO); 26587 } 26588 26589 /* 26590 * Note: The drive speed is being modified here according to a Toshiba 26591 * vendor specific mode page (0x31). 26592 */ 26593 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26594 26595 ssc = sd_ssc_init(un); 26596 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26597 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26598 SD_PATH_STANDARD); 26599 sd_ssc_fini(ssc); 26600 if (rval != 0) { 26601 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26602 "sr_change_speed: Mode Sense Failed\n"); 26603 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26604 return (rval); 26605 } 26606 sense_mhp = (struct mode_header *)sense; 26607 26608 /* Check the block descriptor len to handle only 1 block descriptor */ 26609 bd_len = sense_mhp->bdesc_length; 26610 if (bd_len > MODE_BLK_DESC_LENGTH) { 26611 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26612 "sr_change_speed: Mode Sense returned invalid block " 26613 "descriptor length\n"); 26614 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26615 return (EIO); 26616 } 26617 26618 sense_page = (struct mode_speed *) 26619 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26620 current_speed = sense_page->speed; 26621 26622 /* Process command */ 26623 switch (cmd) { 26624 case CDROMGDRVSPEED: 26625 /* Return the drive speed obtained during the mode sense */ 26626 if (current_speed == 0x2) { 26627 current_speed = CDROM_TWELVE_SPEED; 26628 } 26629 if (ddi_copyout(¤t_speed, (void *)data, 26630 sizeof (int), flag) != 0) { 26631 rval = EFAULT; 26632 } 26633 break; 26634 case CDROMSDRVSPEED: 26635 /* Validate the requested drive speed */ 26636 switch ((uchar_t)data) { 26637 case CDROM_TWELVE_SPEED: 26638 data = 0x2; 26639 /*FALLTHROUGH*/ 26640 case CDROM_NORMAL_SPEED: 26641 case CDROM_DOUBLE_SPEED: 26642 case CDROM_QUAD_SPEED: 26643 case CDROM_MAXIMUM_SPEED: 26644 break; 26645 default: 26646 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26647 "sr_change_speed: " 26648 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26649 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26650 return (EINVAL); 26651 } 26652 26653 /* 26654 * The current drive speed matches the requested drive speed so 26655 * there is no need to send the mode select to change the speed 26656 */ 26657 if (current_speed == data) { 26658 break; 26659 } 26660 26661 /* Build the select data for the requested drive speed */ 26662 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26663 select_mhp = (struct mode_header *)select; 26664 select_mhp->bdesc_length = 0; 26665 select_page = 26666 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26667 select_page = 26668 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26669 select_page->mode_page.code = CDROM_MODE_SPEED; 26670 select_page->mode_page.length = 2; 26671 select_page->speed = (uchar_t)data; 26672 26673 /* Send the mode select for the requested block size */ 26674 ssc = sd_ssc_init(un); 26675 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26676 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26677 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26678 sd_ssc_fini(ssc); 26679 if (rval != 0) { 26680 /* 26681 * The mode select failed for the requested drive speed, 26682 * so reset the data for the original drive speed and 26683 * send it to the target. The error is indicated by the 26684 * return value for the failed mode select. 26685 */ 26686 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26687 "sr_drive_speed: Mode Select Failed\n"); 26688 select_page->speed = sense_page->speed; 26689 ssc = sd_ssc_init(un); 26690 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26691 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26692 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26693 sd_ssc_fini(ssc); 26694 } 26695 break; 26696 default: 26697 /* should not reach here, but check anyway */ 26698 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26699 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26700 rval = EINVAL; 26701 break; 26702 } 26703 26704 if (select) { 26705 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26706 } 26707 if (sense) { 26708 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26709 } 26710 26711 return (rval); 26712 } 26713 26714 26715 /* 26716 * Function: sr_atapi_change_speed() 26717 * 26718 * Description: This routine is the driver entry point for handling CD-ROM 26719 * drive speed ioctl requests for MMC devices that do not support 26720 * the Real Time Streaming feature (0x107). 26721 * 26722 * Note: This routine will use the SET SPEED command which may not 26723 * be supported by all devices. 26724 * 26725 * Arguments: dev- the device 'dev_t' 26726 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26727 * CDROMSDRVSPEED (set) 26728 * data- current drive speed or requested drive speed 26729 * flag- this argument is a pass through to ddi_copyxxx() directly 26730 * from the mode argument of ioctl(). 26731 * 26732 * Return Code: the code returned by sd_send_scsi_cmd() 26733 * EINVAL if invalid arguments are provided 26734 * EFAULT if ddi_copyxxx() fails 26735 * ENXIO if fail ddi_get_soft_state 26736 * EIO if invalid mode sense block descriptor length 26737 */ 26738 26739 static int 26740 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26741 { 26742 struct sd_lun *un; 26743 struct uscsi_cmd *com = NULL; 26744 struct mode_header_grp2 *sense_mhp; 26745 uchar_t *sense_page; 26746 uchar_t *sense = NULL; 26747 char cdb[CDB_GROUP5]; 26748 int bd_len; 26749 int current_speed = 0; 26750 int max_speed = 0; 26751 int rval; 26752 sd_ssc_t *ssc; 26753 26754 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26755 26756 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26757 return (ENXIO); 26758 } 26759 26760 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26761 26762 ssc = sd_ssc_init(un); 26763 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 26764 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26765 SD_PATH_STANDARD); 26766 sd_ssc_fini(ssc); 26767 if (rval != 0) { 26768 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26769 "sr_atapi_change_speed: Mode Sense Failed\n"); 26770 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26771 return (rval); 26772 } 26773 26774 /* Check the block descriptor len to handle only 1 block descriptor */ 26775 sense_mhp = (struct mode_header_grp2 *)sense; 26776 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26777 if (bd_len > MODE_BLK_DESC_LENGTH) { 26778 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26779 "sr_atapi_change_speed: Mode Sense returned invalid " 26780 "block descriptor length\n"); 26781 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26782 return (EIO); 26783 } 26784 26785 /* Calculate the current and maximum drive speeds */ 26786 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26787 current_speed = (sense_page[14] << 8) | sense_page[15]; 26788 max_speed = (sense_page[8] << 8) | sense_page[9]; 26789 26790 /* Process the command */ 26791 switch (cmd) { 26792 case CDROMGDRVSPEED: 26793 current_speed /= SD_SPEED_1X; 26794 if (ddi_copyout(¤t_speed, (void *)data, 26795 sizeof (int), flag) != 0) 26796 rval = EFAULT; 26797 break; 26798 case CDROMSDRVSPEED: 26799 /* Convert the speed code to KB/sec */ 26800 switch ((uchar_t)data) { 26801 case CDROM_NORMAL_SPEED: 26802 current_speed = SD_SPEED_1X; 26803 break; 26804 case CDROM_DOUBLE_SPEED: 26805 current_speed = 2 * SD_SPEED_1X; 26806 break; 26807 case CDROM_QUAD_SPEED: 26808 current_speed = 4 * SD_SPEED_1X; 26809 break; 26810 case CDROM_TWELVE_SPEED: 26811 current_speed = 12 * SD_SPEED_1X; 26812 break; 26813 case CDROM_MAXIMUM_SPEED: 26814 current_speed = 0xffff; 26815 break; 26816 default: 26817 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26818 "sr_atapi_change_speed: invalid drive speed %d\n", 26819 (uchar_t)data); 26820 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26821 return (EINVAL); 26822 } 26823 26824 /* Check the request against the drive's max speed. */ 26825 if (current_speed != 0xffff) { 26826 if (current_speed > max_speed) { 26827 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26828 return (EINVAL); 26829 } 26830 } 26831 26832 /* 26833 * Build and send the SET SPEED command 26834 * 26835 * Note: The SET SPEED (0xBB) command used in this routine is 26836 * obsolete per the SCSI MMC spec but still supported in the 26837 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26838 * therefore the command is still implemented in this routine. 26839 */ 26840 bzero(cdb, sizeof (cdb)); 26841 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26842 cdb[2] = (uchar_t)(current_speed >> 8); 26843 cdb[3] = (uchar_t)current_speed; 26844 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26845 com->uscsi_cdb = (caddr_t)cdb; 26846 com->uscsi_cdblen = CDB_GROUP5; 26847 com->uscsi_bufaddr = NULL; 26848 com->uscsi_buflen = 0; 26849 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26850 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26851 break; 26852 default: 26853 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26854 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26855 rval = EINVAL; 26856 } 26857 26858 if (sense) { 26859 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26860 } 26861 if (com) { 26862 kmem_free(com, sizeof (*com)); 26863 } 26864 return (rval); 26865 } 26866 26867 26868 /* 26869 * Function: sr_pause_resume() 26870 * 26871 * Description: This routine is the driver entry point for handling CD-ROM 26872 * pause/resume ioctl requests. This only affects the audio play 26873 * operation. 26874 * 26875 * Arguments: dev - the device 'dev_t' 26876 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26877 * for setting the resume bit of the cdb. 26878 * 26879 * Return Code: the code returned by sd_send_scsi_cmd() 26880 * EINVAL if invalid mode specified 26881 * 26882 */ 26883 26884 static int 26885 sr_pause_resume(dev_t dev, int cmd) 26886 { 26887 struct sd_lun *un; 26888 struct uscsi_cmd *com; 26889 char cdb[CDB_GROUP1]; 26890 int rval; 26891 26892 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26893 return (ENXIO); 26894 } 26895 26896 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26897 bzero(cdb, CDB_GROUP1); 26898 cdb[0] = SCMD_PAUSE_RESUME; 26899 switch (cmd) { 26900 case CDROMRESUME: 26901 cdb[8] = 1; 26902 break; 26903 case CDROMPAUSE: 26904 cdb[8] = 0; 26905 break; 26906 default: 26907 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26908 " Command '%x' Not Supported\n", cmd); 26909 rval = EINVAL; 26910 goto done; 26911 } 26912 26913 com->uscsi_cdb = cdb; 26914 com->uscsi_cdblen = CDB_GROUP1; 26915 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26916 26917 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26918 SD_PATH_STANDARD); 26919 26920 done: 26921 kmem_free(com, sizeof (*com)); 26922 return (rval); 26923 } 26924 26925 26926 /* 26927 * Function: sr_play_msf() 26928 * 26929 * Description: This routine is the driver entry point for handling CD-ROM 26930 * ioctl requests to output the audio signals at the specified 26931 * starting address and continue the audio play until the specified 26932 * ending address (CDROMPLAYMSF) The address is in Minute Second 26933 * Frame (MSF) format. 26934 * 26935 * Arguments: dev - the device 'dev_t' 26936 * data - pointer to user provided audio msf structure, 26937 * specifying start/end addresses. 26938 * flag - this argument is a pass through to ddi_copyxxx() 26939 * directly from the mode argument of ioctl(). 26940 * 26941 * Return Code: the code returned by sd_send_scsi_cmd() 26942 * EFAULT if ddi_copyxxx() fails 26943 * ENXIO if fail ddi_get_soft_state 26944 * EINVAL if data pointer is NULL 26945 */ 26946 26947 static int 26948 sr_play_msf(dev_t dev, caddr_t data, int flag) 26949 { 26950 struct sd_lun *un; 26951 struct uscsi_cmd *com; 26952 struct cdrom_msf msf_struct; 26953 struct cdrom_msf *msf = &msf_struct; 26954 char cdb[CDB_GROUP1]; 26955 int rval; 26956 26957 if (data == NULL) { 26958 return (EINVAL); 26959 } 26960 26961 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26962 return (ENXIO); 26963 } 26964 26965 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26966 return (EFAULT); 26967 } 26968 26969 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26970 bzero(cdb, CDB_GROUP1); 26971 cdb[0] = SCMD_PLAYAUDIO_MSF; 26972 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26973 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26974 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26975 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26976 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26977 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26978 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26979 } else { 26980 cdb[3] = msf->cdmsf_min0; 26981 cdb[4] = msf->cdmsf_sec0; 26982 cdb[5] = msf->cdmsf_frame0; 26983 cdb[6] = msf->cdmsf_min1; 26984 cdb[7] = msf->cdmsf_sec1; 26985 cdb[8] = msf->cdmsf_frame1; 26986 } 26987 com->uscsi_cdb = cdb; 26988 com->uscsi_cdblen = CDB_GROUP1; 26989 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26990 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26991 SD_PATH_STANDARD); 26992 kmem_free(com, sizeof (*com)); 26993 return (rval); 26994 } 26995 26996 26997 /* 26998 * Function: sr_play_trkind() 26999 * 27000 * Description: This routine is the driver entry point for handling CD-ROM 27001 * ioctl requests to output the audio signals at the specified 27002 * starting address and continue the audio play until the specified 27003 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27004 * format. 27005 * 27006 * Arguments: dev - the device 'dev_t' 27007 * data - pointer to user provided audio track/index structure, 27008 * specifying start/end addresses. 27009 * flag - this argument is a pass through to ddi_copyxxx() 27010 * directly from the mode argument of ioctl(). 27011 * 27012 * Return Code: the code returned by sd_send_scsi_cmd() 27013 * EFAULT if ddi_copyxxx() fails 27014 * ENXIO if fail ddi_get_soft_state 27015 * EINVAL if data pointer is NULL 27016 */ 27017 27018 static int 27019 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27020 { 27021 struct cdrom_ti ti_struct; 27022 struct cdrom_ti *ti = &ti_struct; 27023 struct uscsi_cmd *com = NULL; 27024 char cdb[CDB_GROUP1]; 27025 int rval; 27026 27027 if (data == NULL) { 27028 return (EINVAL); 27029 } 27030 27031 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27032 return (EFAULT); 27033 } 27034 27035 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27036 bzero(cdb, CDB_GROUP1); 27037 cdb[0] = SCMD_PLAYAUDIO_TI; 27038 cdb[4] = ti->cdti_trk0; 27039 cdb[5] = ti->cdti_ind0; 27040 cdb[7] = ti->cdti_trk1; 27041 cdb[8] = ti->cdti_ind1; 27042 com->uscsi_cdb = cdb; 27043 com->uscsi_cdblen = CDB_GROUP1; 27044 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27045 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27046 SD_PATH_STANDARD); 27047 kmem_free(com, sizeof (*com)); 27048 return (rval); 27049 } 27050 27051 27052 /* 27053 * Function: sr_read_all_subcodes() 27054 * 27055 * Description: This routine is the driver entry point for handling CD-ROM 27056 * ioctl requests to return raw subcode data while the target is 27057 * playing audio (CDROMSUBCODE). 27058 * 27059 * Arguments: dev - the device 'dev_t' 27060 * data - pointer to user provided cdrom subcode structure, 27061 * specifying the transfer length and address. 27062 * flag - this argument is a pass through to ddi_copyxxx() 27063 * directly from the mode argument of ioctl(). 27064 * 27065 * Return Code: the code returned by sd_send_scsi_cmd() 27066 * EFAULT if ddi_copyxxx() fails 27067 * ENXIO if fail ddi_get_soft_state 27068 * EINVAL if data pointer is NULL 27069 */ 27070 27071 static int 27072 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27073 { 27074 struct sd_lun *un = NULL; 27075 struct uscsi_cmd *com = NULL; 27076 struct cdrom_subcode *subcode = NULL; 27077 int rval; 27078 size_t buflen; 27079 char cdb[CDB_GROUP5]; 27080 27081 #ifdef _MULTI_DATAMODEL 27082 /* To support ILP32 applications in an LP64 world */ 27083 struct cdrom_subcode32 cdrom_subcode32; 27084 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27085 #endif 27086 if (data == NULL) { 27087 return (EINVAL); 27088 } 27089 27090 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27091 return (ENXIO); 27092 } 27093 27094 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27095 27096 #ifdef _MULTI_DATAMODEL 27097 switch (ddi_model_convert_from(flag & FMODELS)) { 27098 case DDI_MODEL_ILP32: 27099 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27100 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27101 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27102 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27103 return (EFAULT); 27104 } 27105 /* Convert the ILP32 uscsi data from the application to LP64 */ 27106 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27107 break; 27108 case DDI_MODEL_NONE: 27109 if (ddi_copyin(data, subcode, 27110 sizeof (struct cdrom_subcode), flag)) { 27111 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27112 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27113 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27114 return (EFAULT); 27115 } 27116 break; 27117 } 27118 #else /* ! _MULTI_DATAMODEL */ 27119 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27120 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27121 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27122 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27123 return (EFAULT); 27124 } 27125 #endif /* _MULTI_DATAMODEL */ 27126 27127 /* 27128 * Since MMC-2 expects max 3 bytes for length, check if the 27129 * length input is greater than 3 bytes 27130 */ 27131 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27132 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27133 "sr_read_all_subcodes: " 27134 "cdrom transfer length too large: %d (limit %d)\n", 27135 subcode->cdsc_length, 0xFFFFFF); 27136 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27137 return (EINVAL); 27138 } 27139 27140 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27141 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27142 bzero(cdb, CDB_GROUP5); 27143 27144 if (un->un_f_mmc_cap == TRUE) { 27145 cdb[0] = (char)SCMD_READ_CD; 27146 cdb[2] = (char)0xff; 27147 cdb[3] = (char)0xff; 27148 cdb[4] = (char)0xff; 27149 cdb[5] = (char)0xff; 27150 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27151 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27152 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27153 cdb[10] = 1; 27154 } else { 27155 /* 27156 * Note: A vendor specific command (0xDF) is being used her to 27157 * request a read of all subcodes. 27158 */ 27159 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27160 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27161 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27162 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27163 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27164 } 27165 com->uscsi_cdb = cdb; 27166 com->uscsi_cdblen = CDB_GROUP5; 27167 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27168 com->uscsi_buflen = buflen; 27169 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27170 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27171 SD_PATH_STANDARD); 27172 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27173 kmem_free(com, sizeof (*com)); 27174 return (rval); 27175 } 27176 27177 27178 /* 27179 * Function: sr_read_subchannel() 27180 * 27181 * Description: This routine is the driver entry point for handling CD-ROM 27182 * ioctl requests to return the Q sub-channel data of the CD 27183 * current position block. (CDROMSUBCHNL) The data includes the 27184 * track number, index number, absolute CD-ROM address (LBA or MSF 27185 * format per the user) , track relative CD-ROM address (LBA or MSF 27186 * format per the user), control data and audio status. 27187 * 27188 * Arguments: dev - the device 'dev_t' 27189 * data - pointer to user provided cdrom sub-channel structure 27190 * flag - this argument is a pass through to ddi_copyxxx() 27191 * directly from the mode argument of ioctl(). 27192 * 27193 * Return Code: the code returned by sd_send_scsi_cmd() 27194 * EFAULT if ddi_copyxxx() fails 27195 * ENXIO if fail ddi_get_soft_state 27196 * EINVAL if data pointer is NULL 27197 */ 27198 27199 static int 27200 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27201 { 27202 struct sd_lun *un; 27203 struct uscsi_cmd *com; 27204 struct cdrom_subchnl subchanel; 27205 struct cdrom_subchnl *subchnl = &subchanel; 27206 char cdb[CDB_GROUP1]; 27207 caddr_t buffer; 27208 int rval; 27209 27210 if (data == NULL) { 27211 return (EINVAL); 27212 } 27213 27214 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27215 (un->un_state == SD_STATE_OFFLINE)) { 27216 return (ENXIO); 27217 } 27218 27219 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27220 return (EFAULT); 27221 } 27222 27223 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27224 bzero(cdb, CDB_GROUP1); 27225 cdb[0] = SCMD_READ_SUBCHANNEL; 27226 /* Set the MSF bit based on the user requested address format */ 27227 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27228 /* 27229 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27230 * returned 27231 */ 27232 cdb[2] = 0x40; 27233 /* 27234 * Set byte 3 to specify the return data format. A value of 0x01 27235 * indicates that the CD-ROM current position should be returned. 27236 */ 27237 cdb[3] = 0x01; 27238 cdb[8] = 0x10; 27239 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27240 com->uscsi_cdb = cdb; 27241 com->uscsi_cdblen = CDB_GROUP1; 27242 com->uscsi_bufaddr = buffer; 27243 com->uscsi_buflen = 16; 27244 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27245 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27246 SD_PATH_STANDARD); 27247 if (rval != 0) { 27248 kmem_free(buffer, 16); 27249 kmem_free(com, sizeof (*com)); 27250 return (rval); 27251 } 27252 27253 /* Process the returned Q sub-channel data */ 27254 subchnl->cdsc_audiostatus = buffer[1]; 27255 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27256 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27257 subchnl->cdsc_trk = buffer[6]; 27258 subchnl->cdsc_ind = buffer[7]; 27259 if (subchnl->cdsc_format & CDROM_LBA) { 27260 subchnl->cdsc_absaddr.lba = 27261 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27262 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27263 subchnl->cdsc_reladdr.lba = 27264 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27265 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27266 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27267 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27268 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27269 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27270 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27271 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27272 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27273 } else { 27274 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27275 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27276 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27277 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27278 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27279 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27280 } 27281 kmem_free(buffer, 16); 27282 kmem_free(com, sizeof (*com)); 27283 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27284 != 0) { 27285 return (EFAULT); 27286 } 27287 return (rval); 27288 } 27289 27290 27291 /* 27292 * Function: sr_read_tocentry() 27293 * 27294 * Description: This routine is the driver entry point for handling CD-ROM 27295 * ioctl requests to read from the Table of Contents (TOC) 27296 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27297 * fields, the starting address (LBA or MSF format per the user) 27298 * and the data mode if the user specified track is a data track. 27299 * 27300 * Note: The READ HEADER (0x44) command used in this routine is 27301 * obsolete per the SCSI MMC spec but still supported in the 27302 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27303 * therefore the command is still implemented in this routine. 27304 * 27305 * Arguments: dev - the device 'dev_t' 27306 * data - pointer to user provided toc entry structure, 27307 * specifying the track # and the address format 27308 * (LBA or MSF). 27309 * flag - this argument is a pass through to ddi_copyxxx() 27310 * directly from the mode argument of ioctl(). 27311 * 27312 * Return Code: the code returned by sd_send_scsi_cmd() 27313 * EFAULT if ddi_copyxxx() fails 27314 * ENXIO if fail ddi_get_soft_state 27315 * EINVAL if data pointer is NULL 27316 */ 27317 27318 static int 27319 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27320 { 27321 struct sd_lun *un = NULL; 27322 struct uscsi_cmd *com; 27323 struct cdrom_tocentry toc_entry; 27324 struct cdrom_tocentry *entry = &toc_entry; 27325 caddr_t buffer; 27326 int rval; 27327 char cdb[CDB_GROUP1]; 27328 27329 if (data == NULL) { 27330 return (EINVAL); 27331 } 27332 27333 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27334 (un->un_state == SD_STATE_OFFLINE)) { 27335 return (ENXIO); 27336 } 27337 27338 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27339 return (EFAULT); 27340 } 27341 27342 /* Validate the requested track and address format */ 27343 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27344 return (EINVAL); 27345 } 27346 27347 if (entry->cdte_track == 0) { 27348 return (EINVAL); 27349 } 27350 27351 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27352 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27353 bzero(cdb, CDB_GROUP1); 27354 27355 cdb[0] = SCMD_READ_TOC; 27356 /* Set the MSF bit based on the user requested address format */ 27357 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27358 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27359 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27360 } else { 27361 cdb[6] = entry->cdte_track; 27362 } 27363 27364 /* 27365 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27366 * (4 byte TOC response header + 8 byte track descriptor) 27367 */ 27368 cdb[8] = 12; 27369 com->uscsi_cdb = cdb; 27370 com->uscsi_cdblen = CDB_GROUP1; 27371 com->uscsi_bufaddr = buffer; 27372 com->uscsi_buflen = 0x0C; 27373 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27374 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27375 SD_PATH_STANDARD); 27376 if (rval != 0) { 27377 kmem_free(buffer, 12); 27378 kmem_free(com, sizeof (*com)); 27379 return (rval); 27380 } 27381 27382 /* Process the toc entry */ 27383 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27384 entry->cdte_ctrl = (buffer[5] & 0x0F); 27385 if (entry->cdte_format & CDROM_LBA) { 27386 entry->cdte_addr.lba = 27387 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27388 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27389 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27390 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27391 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27392 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27393 /* 27394 * Send a READ TOC command using the LBA address format to get 27395 * the LBA for the track requested so it can be used in the 27396 * READ HEADER request 27397 * 27398 * Note: The MSF bit of the READ HEADER command specifies the 27399 * output format. The block address specified in that command 27400 * must be in LBA format. 27401 */ 27402 cdb[1] = 0; 27403 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27404 SD_PATH_STANDARD); 27405 if (rval != 0) { 27406 kmem_free(buffer, 12); 27407 kmem_free(com, sizeof (*com)); 27408 return (rval); 27409 } 27410 } else { 27411 entry->cdte_addr.msf.minute = buffer[9]; 27412 entry->cdte_addr.msf.second = buffer[10]; 27413 entry->cdte_addr.msf.frame = buffer[11]; 27414 /* 27415 * Send a READ TOC command using the LBA address format to get 27416 * the LBA for the track requested so it can be used in the 27417 * READ HEADER request 27418 * 27419 * Note: The MSF bit of the READ HEADER command specifies the 27420 * output format. The block address specified in that command 27421 * must be in LBA format. 27422 */ 27423 cdb[1] = 0; 27424 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27425 SD_PATH_STANDARD); 27426 if (rval != 0) { 27427 kmem_free(buffer, 12); 27428 kmem_free(com, sizeof (*com)); 27429 return (rval); 27430 } 27431 } 27432 27433 /* 27434 * Build and send the READ HEADER command to determine the data mode of 27435 * the user specified track. 27436 */ 27437 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27438 (entry->cdte_track != CDROM_LEADOUT)) { 27439 bzero(cdb, CDB_GROUP1); 27440 cdb[0] = SCMD_READ_HEADER; 27441 cdb[2] = buffer[8]; 27442 cdb[3] = buffer[9]; 27443 cdb[4] = buffer[10]; 27444 cdb[5] = buffer[11]; 27445 cdb[8] = 0x08; 27446 com->uscsi_buflen = 0x08; 27447 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27448 SD_PATH_STANDARD); 27449 if (rval == 0) { 27450 entry->cdte_datamode = buffer[0]; 27451 } else { 27452 /* 27453 * READ HEADER command failed, since this is 27454 * obsoleted in one spec, its better to return 27455 * -1 for an invlid track so that we can still 27456 * receive the rest of the TOC data. 27457 */ 27458 entry->cdte_datamode = (uchar_t)-1; 27459 } 27460 } else { 27461 entry->cdte_datamode = (uchar_t)-1; 27462 } 27463 27464 kmem_free(buffer, 12); 27465 kmem_free(com, sizeof (*com)); 27466 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27467 return (EFAULT); 27468 27469 return (rval); 27470 } 27471 27472 27473 /* 27474 * Function: sr_read_tochdr() 27475 * 27476 * Description: This routine is the driver entry point for handling CD-ROM 27477 * ioctl requests to read the Table of Contents (TOC) header 27478 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27479 * and ending track numbers 27480 * 27481 * Arguments: dev - the device 'dev_t' 27482 * data - pointer to user provided toc header structure, 27483 * specifying the starting and ending track numbers. 27484 * flag - this argument is a pass through to ddi_copyxxx() 27485 * directly from the mode argument of ioctl(). 27486 * 27487 * Return Code: the code returned by sd_send_scsi_cmd() 27488 * EFAULT if ddi_copyxxx() fails 27489 * ENXIO if fail ddi_get_soft_state 27490 * EINVAL if data pointer is NULL 27491 */ 27492 27493 static int 27494 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27495 { 27496 struct sd_lun *un; 27497 struct uscsi_cmd *com; 27498 struct cdrom_tochdr toc_header; 27499 struct cdrom_tochdr *hdr = &toc_header; 27500 char cdb[CDB_GROUP1]; 27501 int rval; 27502 caddr_t buffer; 27503 27504 if (data == NULL) { 27505 return (EINVAL); 27506 } 27507 27508 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27509 (un->un_state == SD_STATE_OFFLINE)) { 27510 return (ENXIO); 27511 } 27512 27513 buffer = kmem_zalloc(4, KM_SLEEP); 27514 bzero(cdb, CDB_GROUP1); 27515 cdb[0] = SCMD_READ_TOC; 27516 /* 27517 * Specifying a track number of 0x00 in the READ TOC command indicates 27518 * that the TOC header should be returned 27519 */ 27520 cdb[6] = 0x00; 27521 /* 27522 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27523 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27524 */ 27525 cdb[8] = 0x04; 27526 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27527 com->uscsi_cdb = cdb; 27528 com->uscsi_cdblen = CDB_GROUP1; 27529 com->uscsi_bufaddr = buffer; 27530 com->uscsi_buflen = 0x04; 27531 com->uscsi_timeout = 300; 27532 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27533 27534 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27535 SD_PATH_STANDARD); 27536 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27537 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27538 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27539 } else { 27540 hdr->cdth_trk0 = buffer[2]; 27541 hdr->cdth_trk1 = buffer[3]; 27542 } 27543 kmem_free(buffer, 4); 27544 kmem_free(com, sizeof (*com)); 27545 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27546 return (EFAULT); 27547 } 27548 return (rval); 27549 } 27550 27551 27552 /* 27553 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27554 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27555 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27556 * digital audio and extended architecture digital audio. These modes are 27557 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27558 * MMC specs. 27559 * 27560 * In addition to support for the various data formats these routines also 27561 * include support for devices that implement only the direct access READ 27562 * commands (0x08, 0x28), devices that implement the READ_CD commands 27563 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27564 * READ CDXA commands (0xD8, 0xDB) 27565 */ 27566 27567 /* 27568 * Function: sr_read_mode1() 27569 * 27570 * Description: This routine is the driver entry point for handling CD-ROM 27571 * ioctl read mode1 requests (CDROMREADMODE1). 27572 * 27573 * Arguments: dev - the device 'dev_t' 27574 * data - pointer to user provided cd read structure specifying 27575 * the lba buffer address and length. 27576 * flag - this argument is a pass through to ddi_copyxxx() 27577 * directly from the mode argument of ioctl(). 27578 * 27579 * Return Code: the code returned by sd_send_scsi_cmd() 27580 * EFAULT if ddi_copyxxx() fails 27581 * ENXIO if fail ddi_get_soft_state 27582 * EINVAL if data pointer is NULL 27583 */ 27584 27585 static int 27586 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27587 { 27588 struct sd_lun *un; 27589 struct cdrom_read mode1_struct; 27590 struct cdrom_read *mode1 = &mode1_struct; 27591 int rval; 27592 sd_ssc_t *ssc; 27593 27594 #ifdef _MULTI_DATAMODEL 27595 /* To support ILP32 applications in an LP64 world */ 27596 struct cdrom_read32 cdrom_read32; 27597 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27598 #endif /* _MULTI_DATAMODEL */ 27599 27600 if (data == NULL) { 27601 return (EINVAL); 27602 } 27603 27604 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27605 (un->un_state == SD_STATE_OFFLINE)) { 27606 return (ENXIO); 27607 } 27608 27609 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27610 "sd_read_mode1: entry: un:0x%p\n", un); 27611 27612 #ifdef _MULTI_DATAMODEL 27613 switch (ddi_model_convert_from(flag & FMODELS)) { 27614 case DDI_MODEL_ILP32: 27615 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27616 return (EFAULT); 27617 } 27618 /* Convert the ILP32 uscsi data from the application to LP64 */ 27619 cdrom_read32tocdrom_read(cdrd32, mode1); 27620 break; 27621 case DDI_MODEL_NONE: 27622 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27623 return (EFAULT); 27624 } 27625 } 27626 #else /* ! _MULTI_DATAMODEL */ 27627 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27628 return (EFAULT); 27629 } 27630 #endif /* _MULTI_DATAMODEL */ 27631 27632 ssc = sd_ssc_init(un); 27633 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 27634 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27635 sd_ssc_fini(ssc); 27636 27637 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27638 "sd_read_mode1: exit: un:0x%p\n", un); 27639 27640 return (rval); 27641 } 27642 27643 27644 /* 27645 * Function: sr_read_cd_mode2() 27646 * 27647 * Description: This routine is the driver entry point for handling CD-ROM 27648 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27649 * support the READ CD (0xBE) command or the 1st generation 27650 * READ CD (0xD4) command. 27651 * 27652 * Arguments: dev - the device 'dev_t' 27653 * data - pointer to user provided cd read structure specifying 27654 * the lba buffer address and length. 27655 * flag - this argument is a pass through to ddi_copyxxx() 27656 * directly from the mode argument of ioctl(). 27657 * 27658 * Return Code: the code returned by sd_send_scsi_cmd() 27659 * EFAULT if ddi_copyxxx() fails 27660 * ENXIO if fail ddi_get_soft_state 27661 * EINVAL if data pointer is NULL 27662 */ 27663 27664 static int 27665 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27666 { 27667 struct sd_lun *un; 27668 struct uscsi_cmd *com; 27669 struct cdrom_read mode2_struct; 27670 struct cdrom_read *mode2 = &mode2_struct; 27671 uchar_t cdb[CDB_GROUP5]; 27672 int nblocks; 27673 int rval; 27674 #ifdef _MULTI_DATAMODEL 27675 /* To support ILP32 applications in an LP64 world */ 27676 struct cdrom_read32 cdrom_read32; 27677 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27678 #endif /* _MULTI_DATAMODEL */ 27679 27680 if (data == NULL) { 27681 return (EINVAL); 27682 } 27683 27684 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27685 (un->un_state == SD_STATE_OFFLINE)) { 27686 return (ENXIO); 27687 } 27688 27689 #ifdef _MULTI_DATAMODEL 27690 switch (ddi_model_convert_from(flag & FMODELS)) { 27691 case DDI_MODEL_ILP32: 27692 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27693 return (EFAULT); 27694 } 27695 /* Convert the ILP32 uscsi data from the application to LP64 */ 27696 cdrom_read32tocdrom_read(cdrd32, mode2); 27697 break; 27698 case DDI_MODEL_NONE: 27699 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27700 return (EFAULT); 27701 } 27702 break; 27703 } 27704 27705 #else /* ! _MULTI_DATAMODEL */ 27706 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27707 return (EFAULT); 27708 } 27709 #endif /* _MULTI_DATAMODEL */ 27710 27711 bzero(cdb, sizeof (cdb)); 27712 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27713 /* Read command supported by 1st generation atapi drives */ 27714 cdb[0] = SCMD_READ_CDD4; 27715 } else { 27716 /* Universal CD Access Command */ 27717 cdb[0] = SCMD_READ_CD; 27718 } 27719 27720 /* 27721 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27722 */ 27723 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27724 27725 /* set the start address */ 27726 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27727 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27728 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27729 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27730 27731 /* set the transfer length */ 27732 nblocks = mode2->cdread_buflen / 2336; 27733 cdb[6] = (uchar_t)(nblocks >> 16); 27734 cdb[7] = (uchar_t)(nblocks >> 8); 27735 cdb[8] = (uchar_t)nblocks; 27736 27737 /* set the filter bits */ 27738 cdb[9] = CDROM_READ_CD_USERDATA; 27739 27740 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27741 com->uscsi_cdb = (caddr_t)cdb; 27742 com->uscsi_cdblen = sizeof (cdb); 27743 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27744 com->uscsi_buflen = mode2->cdread_buflen; 27745 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27746 27747 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27748 SD_PATH_STANDARD); 27749 kmem_free(com, sizeof (*com)); 27750 return (rval); 27751 } 27752 27753 27754 /* 27755 * Function: sr_read_mode2() 27756 * 27757 * Description: This routine is the driver entry point for handling CD-ROM 27758 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27759 * do not support the READ CD (0xBE) command. 27760 * 27761 * Arguments: dev - the device 'dev_t' 27762 * data - pointer to user provided cd read structure specifying 27763 * the lba buffer address and length. 27764 * flag - this argument is a pass through to ddi_copyxxx() 27765 * directly from the mode argument of ioctl(). 27766 * 27767 * Return Code: the code returned by sd_send_scsi_cmd() 27768 * EFAULT if ddi_copyxxx() fails 27769 * ENXIO if fail ddi_get_soft_state 27770 * EINVAL if data pointer is NULL 27771 * EIO if fail to reset block size 27772 * EAGAIN if commands are in progress in the driver 27773 */ 27774 27775 static int 27776 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27777 { 27778 struct sd_lun *un; 27779 struct cdrom_read mode2_struct; 27780 struct cdrom_read *mode2 = &mode2_struct; 27781 int rval; 27782 uint32_t restore_blksize; 27783 struct uscsi_cmd *com; 27784 uchar_t cdb[CDB_GROUP0]; 27785 int nblocks; 27786 27787 #ifdef _MULTI_DATAMODEL 27788 /* To support ILP32 applications in an LP64 world */ 27789 struct cdrom_read32 cdrom_read32; 27790 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27791 #endif /* _MULTI_DATAMODEL */ 27792 27793 if (data == NULL) { 27794 return (EINVAL); 27795 } 27796 27797 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27798 (un->un_state == SD_STATE_OFFLINE)) { 27799 return (ENXIO); 27800 } 27801 27802 /* 27803 * Because this routine will update the device and driver block size 27804 * being used we want to make sure there are no commands in progress. 27805 * If commands are in progress the user will have to try again. 27806 * 27807 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27808 * in sdioctl to protect commands from sdioctl through to the top of 27809 * sd_uscsi_strategy. See sdioctl for details. 27810 */ 27811 mutex_enter(SD_MUTEX(un)); 27812 if (un->un_ncmds_in_driver != 1) { 27813 mutex_exit(SD_MUTEX(un)); 27814 return (EAGAIN); 27815 } 27816 mutex_exit(SD_MUTEX(un)); 27817 27818 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27819 "sd_read_mode2: entry: un:0x%p\n", un); 27820 27821 #ifdef _MULTI_DATAMODEL 27822 switch (ddi_model_convert_from(flag & FMODELS)) { 27823 case DDI_MODEL_ILP32: 27824 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27825 return (EFAULT); 27826 } 27827 /* Convert the ILP32 uscsi data from the application to LP64 */ 27828 cdrom_read32tocdrom_read(cdrd32, mode2); 27829 break; 27830 case DDI_MODEL_NONE: 27831 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27832 return (EFAULT); 27833 } 27834 break; 27835 } 27836 #else /* ! _MULTI_DATAMODEL */ 27837 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27838 return (EFAULT); 27839 } 27840 #endif /* _MULTI_DATAMODEL */ 27841 27842 /* Store the current target block size for restoration later */ 27843 restore_blksize = un->un_tgt_blocksize; 27844 27845 /* Change the device and soft state target block size to 2336 */ 27846 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27847 rval = EIO; 27848 goto done; 27849 } 27850 27851 27852 bzero(cdb, sizeof (cdb)); 27853 27854 /* set READ operation */ 27855 cdb[0] = SCMD_READ; 27856 27857 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27858 mode2->cdread_lba >>= 2; 27859 27860 /* set the start address */ 27861 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27862 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27863 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27864 27865 /* set the transfer length */ 27866 nblocks = mode2->cdread_buflen / 2336; 27867 cdb[4] = (uchar_t)nblocks & 0xFF; 27868 27869 /* build command */ 27870 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27871 com->uscsi_cdb = (caddr_t)cdb; 27872 com->uscsi_cdblen = sizeof (cdb); 27873 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27874 com->uscsi_buflen = mode2->cdread_buflen; 27875 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27876 27877 /* 27878 * Issue SCSI command with user space address for read buffer. 27879 * 27880 * This sends the command through main channel in the driver. 27881 * 27882 * Since this is accessed via an IOCTL call, we go through the 27883 * standard path, so that if the device was powered down, then 27884 * it would be 'awakened' to handle the command. 27885 */ 27886 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27887 SD_PATH_STANDARD); 27888 27889 kmem_free(com, sizeof (*com)); 27890 27891 /* Restore the device and soft state target block size */ 27892 if (sr_sector_mode(dev, restore_blksize) != 0) { 27893 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27894 "can't do switch back to mode 1\n"); 27895 /* 27896 * If sd_send_scsi_READ succeeded we still need to report 27897 * an error because we failed to reset the block size 27898 */ 27899 if (rval == 0) { 27900 rval = EIO; 27901 } 27902 } 27903 27904 done: 27905 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27906 "sd_read_mode2: exit: un:0x%p\n", un); 27907 27908 return (rval); 27909 } 27910 27911 27912 /* 27913 * Function: sr_sector_mode() 27914 * 27915 * Description: This utility function is used by sr_read_mode2 to set the target 27916 * block size based on the user specified size. This is a legacy 27917 * implementation based upon a vendor specific mode page 27918 * 27919 * Arguments: dev - the device 'dev_t' 27920 * data - flag indicating if block size is being set to 2336 or 27921 * 512. 27922 * 27923 * Return Code: the code returned by sd_send_scsi_cmd() 27924 * EFAULT if ddi_copyxxx() fails 27925 * ENXIO if fail ddi_get_soft_state 27926 * EINVAL if data pointer is NULL 27927 */ 27928 27929 static int 27930 sr_sector_mode(dev_t dev, uint32_t blksize) 27931 { 27932 struct sd_lun *un; 27933 uchar_t *sense; 27934 uchar_t *select; 27935 int rval; 27936 sd_ssc_t *ssc; 27937 27938 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27939 (un->un_state == SD_STATE_OFFLINE)) { 27940 return (ENXIO); 27941 } 27942 27943 sense = kmem_zalloc(20, KM_SLEEP); 27944 27945 /* Note: This is a vendor specific mode page (0x81) */ 27946 ssc = sd_ssc_init(un); 27947 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27948 SD_PATH_STANDARD); 27949 sd_ssc_fini(ssc); 27950 if (rval != 0) { 27951 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27952 "sr_sector_mode: Mode Sense failed\n"); 27953 kmem_free(sense, 20); 27954 return (rval); 27955 } 27956 select = kmem_zalloc(20, KM_SLEEP); 27957 select[3] = 0x08; 27958 select[10] = ((blksize >> 8) & 0xff); 27959 select[11] = (blksize & 0xff); 27960 select[12] = 0x01; 27961 select[13] = 0x06; 27962 select[14] = sense[14]; 27963 select[15] = sense[15]; 27964 if (blksize == SD_MODE2_BLKSIZE) { 27965 select[14] |= 0x01; 27966 } 27967 27968 ssc = sd_ssc_init(un); 27969 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27970 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27971 sd_ssc_fini(ssc); 27972 if (rval != 0) { 27973 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27974 "sr_sector_mode: Mode Select failed\n"); 27975 } else { 27976 /* 27977 * Only update the softstate block size if we successfully 27978 * changed the device block mode. 27979 */ 27980 mutex_enter(SD_MUTEX(un)); 27981 sd_update_block_info(un, blksize, 0); 27982 mutex_exit(SD_MUTEX(un)); 27983 } 27984 kmem_free(sense, 20); 27985 kmem_free(select, 20); 27986 return (rval); 27987 } 27988 27989 27990 /* 27991 * Function: sr_read_cdda() 27992 * 27993 * Description: This routine is the driver entry point for handling CD-ROM 27994 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27995 * the target supports CDDA these requests are handled via a vendor 27996 * specific command (0xD8) If the target does not support CDDA 27997 * these requests are handled via the READ CD command (0xBE). 27998 * 27999 * Arguments: dev - the device 'dev_t' 28000 * data - pointer to user provided CD-DA structure specifying 28001 * the track starting address, transfer length, and 28002 * subcode options. 28003 * flag - this argument is a pass through to ddi_copyxxx() 28004 * directly from the mode argument of ioctl(). 28005 * 28006 * Return Code: the code returned by sd_send_scsi_cmd() 28007 * EFAULT if ddi_copyxxx() fails 28008 * ENXIO if fail ddi_get_soft_state 28009 * EINVAL if invalid arguments are provided 28010 * ENOTTY 28011 */ 28012 28013 static int 28014 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28015 { 28016 struct sd_lun *un; 28017 struct uscsi_cmd *com; 28018 struct cdrom_cdda *cdda; 28019 int rval; 28020 size_t buflen; 28021 char cdb[CDB_GROUP5]; 28022 28023 #ifdef _MULTI_DATAMODEL 28024 /* To support ILP32 applications in an LP64 world */ 28025 struct cdrom_cdda32 cdrom_cdda32; 28026 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28027 #endif /* _MULTI_DATAMODEL */ 28028 28029 if (data == NULL) { 28030 return (EINVAL); 28031 } 28032 28033 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28034 return (ENXIO); 28035 } 28036 28037 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28038 28039 #ifdef _MULTI_DATAMODEL 28040 switch (ddi_model_convert_from(flag & FMODELS)) { 28041 case DDI_MODEL_ILP32: 28042 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28043 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28044 "sr_read_cdda: ddi_copyin Failed\n"); 28045 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28046 return (EFAULT); 28047 } 28048 /* Convert the ILP32 uscsi data from the application to LP64 */ 28049 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28050 break; 28051 case DDI_MODEL_NONE: 28052 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28053 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28054 "sr_read_cdda: ddi_copyin Failed\n"); 28055 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28056 return (EFAULT); 28057 } 28058 break; 28059 } 28060 #else /* ! _MULTI_DATAMODEL */ 28061 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28062 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28063 "sr_read_cdda: ddi_copyin Failed\n"); 28064 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28065 return (EFAULT); 28066 } 28067 #endif /* _MULTI_DATAMODEL */ 28068 28069 /* 28070 * Since MMC-2 expects max 3 bytes for length, check if the 28071 * length input is greater than 3 bytes 28072 */ 28073 if ((cdda->cdda_length & 0xFF000000) != 0) { 28074 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28075 "cdrom transfer length too large: %d (limit %d)\n", 28076 cdda->cdda_length, 0xFFFFFF); 28077 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28078 return (EINVAL); 28079 } 28080 28081 switch (cdda->cdda_subcode) { 28082 case CDROM_DA_NO_SUBCODE: 28083 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28084 break; 28085 case CDROM_DA_SUBQ: 28086 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28087 break; 28088 case CDROM_DA_ALL_SUBCODE: 28089 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28090 break; 28091 case CDROM_DA_SUBCODE_ONLY: 28092 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28093 break; 28094 default: 28095 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28096 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28097 cdda->cdda_subcode); 28098 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28099 return (EINVAL); 28100 } 28101 28102 /* Build and send the command */ 28103 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28104 bzero(cdb, CDB_GROUP5); 28105 28106 if (un->un_f_cfg_cdda == TRUE) { 28107 cdb[0] = (char)SCMD_READ_CD; 28108 cdb[1] = 0x04; 28109 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28110 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28111 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28112 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28113 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28114 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28115 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28116 cdb[9] = 0x10; 28117 switch (cdda->cdda_subcode) { 28118 case CDROM_DA_NO_SUBCODE : 28119 cdb[10] = 0x0; 28120 break; 28121 case CDROM_DA_SUBQ : 28122 cdb[10] = 0x2; 28123 break; 28124 case CDROM_DA_ALL_SUBCODE : 28125 cdb[10] = 0x1; 28126 break; 28127 case CDROM_DA_SUBCODE_ONLY : 28128 /* FALLTHROUGH */ 28129 default : 28130 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28131 kmem_free(com, sizeof (*com)); 28132 return (ENOTTY); 28133 } 28134 } else { 28135 cdb[0] = (char)SCMD_READ_CDDA; 28136 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28137 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28138 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28139 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28140 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28141 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28142 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28143 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28144 cdb[10] = cdda->cdda_subcode; 28145 } 28146 28147 com->uscsi_cdb = cdb; 28148 com->uscsi_cdblen = CDB_GROUP5; 28149 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28150 com->uscsi_buflen = buflen; 28151 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28152 28153 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28154 SD_PATH_STANDARD); 28155 28156 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28157 kmem_free(com, sizeof (*com)); 28158 return (rval); 28159 } 28160 28161 28162 /* 28163 * Function: sr_read_cdxa() 28164 * 28165 * Description: This routine is the driver entry point for handling CD-ROM 28166 * ioctl requests to return CD-XA (Extended Architecture) data. 28167 * (CDROMCDXA). 28168 * 28169 * Arguments: dev - the device 'dev_t' 28170 * data - pointer to user provided CD-XA structure specifying 28171 * the data starting address, transfer length, and format 28172 * flag - this argument is a pass through to ddi_copyxxx() 28173 * directly from the mode argument of ioctl(). 28174 * 28175 * Return Code: the code returned by sd_send_scsi_cmd() 28176 * EFAULT if ddi_copyxxx() fails 28177 * ENXIO if fail ddi_get_soft_state 28178 * EINVAL if data pointer is NULL 28179 */ 28180 28181 static int 28182 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28183 { 28184 struct sd_lun *un; 28185 struct uscsi_cmd *com; 28186 struct cdrom_cdxa *cdxa; 28187 int rval; 28188 size_t buflen; 28189 char cdb[CDB_GROUP5]; 28190 uchar_t read_flags; 28191 28192 #ifdef _MULTI_DATAMODEL 28193 /* To support ILP32 applications in an LP64 world */ 28194 struct cdrom_cdxa32 cdrom_cdxa32; 28195 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28196 #endif /* _MULTI_DATAMODEL */ 28197 28198 if (data == NULL) { 28199 return (EINVAL); 28200 } 28201 28202 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28203 return (ENXIO); 28204 } 28205 28206 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28207 28208 #ifdef _MULTI_DATAMODEL 28209 switch (ddi_model_convert_from(flag & FMODELS)) { 28210 case DDI_MODEL_ILP32: 28211 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28212 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28213 return (EFAULT); 28214 } 28215 /* 28216 * Convert the ILP32 uscsi data from the 28217 * application to LP64 for internal use. 28218 */ 28219 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28220 break; 28221 case DDI_MODEL_NONE: 28222 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28223 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28224 return (EFAULT); 28225 } 28226 break; 28227 } 28228 #else /* ! _MULTI_DATAMODEL */ 28229 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28230 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28231 return (EFAULT); 28232 } 28233 #endif /* _MULTI_DATAMODEL */ 28234 28235 /* 28236 * Since MMC-2 expects max 3 bytes for length, check if the 28237 * length input is greater than 3 bytes 28238 */ 28239 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28240 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28241 "cdrom transfer length too large: %d (limit %d)\n", 28242 cdxa->cdxa_length, 0xFFFFFF); 28243 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28244 return (EINVAL); 28245 } 28246 28247 switch (cdxa->cdxa_format) { 28248 case CDROM_XA_DATA: 28249 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28250 read_flags = 0x10; 28251 break; 28252 case CDROM_XA_SECTOR_DATA: 28253 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28254 read_flags = 0xf8; 28255 break; 28256 case CDROM_XA_DATA_W_ERROR: 28257 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28258 read_flags = 0xfc; 28259 break; 28260 default: 28261 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28262 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28263 cdxa->cdxa_format); 28264 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28265 return (EINVAL); 28266 } 28267 28268 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28269 bzero(cdb, CDB_GROUP5); 28270 if (un->un_f_mmc_cap == TRUE) { 28271 cdb[0] = (char)SCMD_READ_CD; 28272 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28273 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28274 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28275 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28276 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28277 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28278 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28279 cdb[9] = (char)read_flags; 28280 } else { 28281 /* 28282 * Note: A vendor specific command (0xDB) is being used her to 28283 * request a read of all subcodes. 28284 */ 28285 cdb[0] = (char)SCMD_READ_CDXA; 28286 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28287 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28288 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28289 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28290 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28291 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28292 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28293 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28294 cdb[10] = cdxa->cdxa_format; 28295 } 28296 com->uscsi_cdb = cdb; 28297 com->uscsi_cdblen = CDB_GROUP5; 28298 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28299 com->uscsi_buflen = buflen; 28300 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28301 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28302 SD_PATH_STANDARD); 28303 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28304 kmem_free(com, sizeof (*com)); 28305 return (rval); 28306 } 28307 28308 28309 /* 28310 * Function: sr_eject() 28311 * 28312 * Description: This routine is the driver entry point for handling CD-ROM 28313 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28314 * 28315 * Arguments: dev - the device 'dev_t' 28316 * 28317 * Return Code: the code returned by sd_send_scsi_cmd() 28318 */ 28319 28320 static int 28321 sr_eject(dev_t dev) 28322 { 28323 struct sd_lun *un; 28324 int rval; 28325 sd_ssc_t *ssc; 28326 28327 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28328 (un->un_state == SD_STATE_OFFLINE)) { 28329 return (ENXIO); 28330 } 28331 28332 /* 28333 * To prevent race conditions with the eject 28334 * command, keep track of an eject command as 28335 * it progresses. If we are already handling 28336 * an eject command in the driver for the given 28337 * unit and another request to eject is received 28338 * immediately return EAGAIN so we don't lose 28339 * the command if the current eject command fails. 28340 */ 28341 mutex_enter(SD_MUTEX(un)); 28342 if (un->un_f_ejecting == TRUE) { 28343 mutex_exit(SD_MUTEX(un)); 28344 return (EAGAIN); 28345 } 28346 un->un_f_ejecting = TRUE; 28347 mutex_exit(SD_MUTEX(un)); 28348 28349 ssc = sd_ssc_init(un); 28350 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28351 SD_PATH_STANDARD); 28352 sd_ssc_fini(ssc); 28353 28354 if (rval != 0) { 28355 mutex_enter(SD_MUTEX(un)); 28356 un->un_f_ejecting = FALSE; 28357 mutex_exit(SD_MUTEX(un)); 28358 return (rval); 28359 } 28360 28361 ssc = sd_ssc_init(un); 28362 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 28363 SD_PATH_STANDARD); 28364 sd_ssc_fini(ssc); 28365 28366 if (rval == 0) { 28367 mutex_enter(SD_MUTEX(un)); 28368 sr_ejected(un); 28369 un->un_mediastate = DKIO_EJECTED; 28370 un->un_f_ejecting = FALSE; 28371 cv_broadcast(&un->un_state_cv); 28372 mutex_exit(SD_MUTEX(un)); 28373 } else { 28374 mutex_enter(SD_MUTEX(un)); 28375 un->un_f_ejecting = FALSE; 28376 mutex_exit(SD_MUTEX(un)); 28377 } 28378 return (rval); 28379 } 28380 28381 28382 /* 28383 * Function: sr_ejected() 28384 * 28385 * Description: This routine updates the soft state structure to invalidate the 28386 * geometry information after the media has been ejected or a 28387 * media eject has been detected. 28388 * 28389 * Arguments: un - driver soft state (unit) structure 28390 */ 28391 28392 static void 28393 sr_ejected(struct sd_lun *un) 28394 { 28395 struct sd_errstats *stp; 28396 28397 ASSERT(un != NULL); 28398 ASSERT(mutex_owned(SD_MUTEX(un))); 28399 28400 un->un_f_blockcount_is_valid = FALSE; 28401 un->un_f_tgt_blocksize_is_valid = FALSE; 28402 mutex_exit(SD_MUTEX(un)); 28403 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28404 mutex_enter(SD_MUTEX(un)); 28405 28406 if (un->un_errstats != NULL) { 28407 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28408 stp->sd_capacity.value.ui64 = 0; 28409 } 28410 } 28411 28412 28413 /* 28414 * Function: sr_check_wp() 28415 * 28416 * Description: This routine checks the write protection of a removable 28417 * media disk and hotpluggable devices via the write protect bit of 28418 * the Mode Page Header device specific field. Some devices choke 28419 * on unsupported mode page. In order to workaround this issue, 28420 * this routine has been implemented to use 0x3f mode page(request 28421 * for all pages) for all device types. 28422 * 28423 * Arguments: dev - the device 'dev_t' 28424 * 28425 * Return Code: int indicating if the device is write protected (1) or not (0) 28426 * 28427 * Context: Kernel thread. 28428 * 28429 */ 28430 28431 static int 28432 sr_check_wp(dev_t dev) 28433 { 28434 struct sd_lun *un; 28435 uchar_t device_specific; 28436 uchar_t *sense; 28437 int hdrlen; 28438 int rval = FALSE; 28439 int status; 28440 sd_ssc_t *ssc; 28441 28442 /* 28443 * Note: The return codes for this routine should be reworked to 28444 * properly handle the case of a NULL softstate. 28445 */ 28446 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28447 return (FALSE); 28448 } 28449 28450 if (un->un_f_cfg_is_atapi == TRUE) { 28451 /* 28452 * The mode page contents are not required; set the allocation 28453 * length for the mode page header only 28454 */ 28455 hdrlen = MODE_HEADER_LENGTH_GRP2; 28456 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28457 ssc = sd_ssc_init(un); 28458 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28459 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28460 sd_ssc_fini(ssc); 28461 if (status != 0) 28462 goto err_exit; 28463 device_specific = 28464 ((struct mode_header_grp2 *)sense)->device_specific; 28465 } else { 28466 hdrlen = MODE_HEADER_LENGTH; 28467 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28468 ssc = sd_ssc_init(un); 28469 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28470 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28471 sd_ssc_fini(ssc); 28472 if (status != 0) 28473 goto err_exit; 28474 device_specific = 28475 ((struct mode_header *)sense)->device_specific; 28476 } 28477 28478 28479 /* 28480 * Write protect mode sense failed; not all disks 28481 * understand this query. Return FALSE assuming that 28482 * these devices are not writable. 28483 */ 28484 if (device_specific & WRITE_PROTECT) { 28485 rval = TRUE; 28486 } 28487 28488 err_exit: 28489 kmem_free(sense, hdrlen); 28490 return (rval); 28491 } 28492 28493 /* 28494 * Function: sr_volume_ctrl() 28495 * 28496 * Description: This routine is the driver entry point for handling CD-ROM 28497 * audio output volume ioctl requests. (CDROMVOLCTRL) 28498 * 28499 * Arguments: dev - the device 'dev_t' 28500 * data - pointer to user audio volume control structure 28501 * flag - this argument is a pass through to ddi_copyxxx() 28502 * directly from the mode argument of ioctl(). 28503 * 28504 * Return Code: the code returned by sd_send_scsi_cmd() 28505 * EFAULT if ddi_copyxxx() fails 28506 * ENXIO if fail ddi_get_soft_state 28507 * EINVAL if data pointer is NULL 28508 * 28509 */ 28510 28511 static int 28512 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28513 { 28514 struct sd_lun *un; 28515 struct cdrom_volctrl volume; 28516 struct cdrom_volctrl *vol = &volume; 28517 uchar_t *sense_page; 28518 uchar_t *select_page; 28519 uchar_t *sense; 28520 uchar_t *select; 28521 int sense_buflen; 28522 int select_buflen; 28523 int rval; 28524 sd_ssc_t *ssc; 28525 28526 if (data == NULL) { 28527 return (EINVAL); 28528 } 28529 28530 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28531 (un->un_state == SD_STATE_OFFLINE)) { 28532 return (ENXIO); 28533 } 28534 28535 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28536 return (EFAULT); 28537 } 28538 28539 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28540 struct mode_header_grp2 *sense_mhp; 28541 struct mode_header_grp2 *select_mhp; 28542 int bd_len; 28543 28544 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28545 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28546 MODEPAGE_AUDIO_CTRL_LEN; 28547 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28548 select = kmem_zalloc(select_buflen, KM_SLEEP); 28549 ssc = sd_ssc_init(un); 28550 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 28551 sense_buflen, MODEPAGE_AUDIO_CTRL, 28552 SD_PATH_STANDARD); 28553 sd_ssc_fini(ssc); 28554 28555 if (rval != 0) { 28556 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28557 "sr_volume_ctrl: Mode Sense Failed\n"); 28558 kmem_free(sense, sense_buflen); 28559 kmem_free(select, select_buflen); 28560 return (rval); 28561 } 28562 sense_mhp = (struct mode_header_grp2 *)sense; 28563 select_mhp = (struct mode_header_grp2 *)select; 28564 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28565 sense_mhp->bdesc_length_lo; 28566 if (bd_len > MODE_BLK_DESC_LENGTH) { 28567 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28568 "sr_volume_ctrl: Mode Sense returned invalid " 28569 "block descriptor length\n"); 28570 kmem_free(sense, sense_buflen); 28571 kmem_free(select, select_buflen); 28572 return (EIO); 28573 } 28574 sense_page = (uchar_t *) 28575 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28576 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28577 select_mhp->length_msb = 0; 28578 select_mhp->length_lsb = 0; 28579 select_mhp->bdesc_length_hi = 0; 28580 select_mhp->bdesc_length_lo = 0; 28581 } else { 28582 struct mode_header *sense_mhp, *select_mhp; 28583 28584 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28585 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28586 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28587 select = kmem_zalloc(select_buflen, KM_SLEEP); 28588 ssc = sd_ssc_init(un); 28589 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 28590 sense_buflen, MODEPAGE_AUDIO_CTRL, 28591 SD_PATH_STANDARD); 28592 sd_ssc_fini(ssc); 28593 28594 if (rval != 0) { 28595 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28596 "sr_volume_ctrl: Mode Sense Failed\n"); 28597 kmem_free(sense, sense_buflen); 28598 kmem_free(select, select_buflen); 28599 return (rval); 28600 } 28601 sense_mhp = (struct mode_header *)sense; 28602 select_mhp = (struct mode_header *)select; 28603 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28604 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28605 "sr_volume_ctrl: Mode Sense returned invalid " 28606 "block descriptor length\n"); 28607 kmem_free(sense, sense_buflen); 28608 kmem_free(select, select_buflen); 28609 return (EIO); 28610 } 28611 sense_page = (uchar_t *) 28612 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28613 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28614 select_mhp->length = 0; 28615 select_mhp->bdesc_length = 0; 28616 } 28617 /* 28618 * Note: An audio control data structure could be created and overlayed 28619 * on the following in place of the array indexing method implemented. 28620 */ 28621 28622 /* Build the select data for the user volume data */ 28623 select_page[0] = MODEPAGE_AUDIO_CTRL; 28624 select_page[1] = 0xE; 28625 /* Set the immediate bit */ 28626 select_page[2] = 0x04; 28627 /* Zero out reserved fields */ 28628 select_page[3] = 0x00; 28629 select_page[4] = 0x00; 28630 /* Return sense data for fields not to be modified */ 28631 select_page[5] = sense_page[5]; 28632 select_page[6] = sense_page[6]; 28633 select_page[7] = sense_page[7]; 28634 /* Set the user specified volume levels for channel 0 and 1 */ 28635 select_page[8] = 0x01; 28636 select_page[9] = vol->channel0; 28637 select_page[10] = 0x02; 28638 select_page[11] = vol->channel1; 28639 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28640 select_page[12] = sense_page[12]; 28641 select_page[13] = sense_page[13]; 28642 select_page[14] = sense_page[14]; 28643 select_page[15] = sense_page[15]; 28644 28645 ssc = sd_ssc_init(un); 28646 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28647 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 28648 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28649 } else { 28650 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 28651 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28652 } 28653 sd_ssc_fini(ssc); 28654 28655 kmem_free(sense, sense_buflen); 28656 kmem_free(select, select_buflen); 28657 return (rval); 28658 } 28659 28660 28661 /* 28662 * Function: sr_read_sony_session_offset() 28663 * 28664 * Description: This routine is the driver entry point for handling CD-ROM 28665 * ioctl requests for session offset information. (CDROMREADOFFSET) 28666 * The address of the first track in the last session of a 28667 * multi-session CD-ROM is returned 28668 * 28669 * Note: This routine uses a vendor specific key value in the 28670 * command control field without implementing any vendor check here 28671 * or in the ioctl routine. 28672 * 28673 * Arguments: dev - the device 'dev_t' 28674 * data - pointer to an int to hold the requested address 28675 * flag - this argument is a pass through to ddi_copyxxx() 28676 * directly from the mode argument of ioctl(). 28677 * 28678 * Return Code: the code returned by sd_send_scsi_cmd() 28679 * EFAULT if ddi_copyxxx() fails 28680 * ENXIO if fail ddi_get_soft_state 28681 * EINVAL if data pointer is NULL 28682 */ 28683 28684 static int 28685 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28686 { 28687 struct sd_lun *un; 28688 struct uscsi_cmd *com; 28689 caddr_t buffer; 28690 char cdb[CDB_GROUP1]; 28691 int session_offset = 0; 28692 int rval; 28693 28694 if (data == NULL) { 28695 return (EINVAL); 28696 } 28697 28698 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28699 (un->un_state == SD_STATE_OFFLINE)) { 28700 return (ENXIO); 28701 } 28702 28703 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28704 bzero(cdb, CDB_GROUP1); 28705 cdb[0] = SCMD_READ_TOC; 28706 /* 28707 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28708 * (4 byte TOC response header + 8 byte response data) 28709 */ 28710 cdb[8] = SONY_SESSION_OFFSET_LEN; 28711 /* Byte 9 is the control byte. A vendor specific value is used */ 28712 cdb[9] = SONY_SESSION_OFFSET_KEY; 28713 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28714 com->uscsi_cdb = cdb; 28715 com->uscsi_cdblen = CDB_GROUP1; 28716 com->uscsi_bufaddr = buffer; 28717 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28718 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28719 28720 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28721 SD_PATH_STANDARD); 28722 if (rval != 0) { 28723 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28724 kmem_free(com, sizeof (*com)); 28725 return (rval); 28726 } 28727 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28728 session_offset = 28729 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28730 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28731 /* 28732 * Offset returned offset in current lbasize block's. Convert to 28733 * 2k block's to return to the user 28734 */ 28735 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28736 session_offset >>= 2; 28737 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28738 session_offset >>= 1; 28739 } 28740 } 28741 28742 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28743 rval = EFAULT; 28744 } 28745 28746 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28747 kmem_free(com, sizeof (*com)); 28748 return (rval); 28749 } 28750 28751 28752 /* 28753 * Function: sd_wm_cache_constructor() 28754 * 28755 * Description: Cache Constructor for the wmap cache for the read/modify/write 28756 * devices. 28757 * 28758 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28759 * un - sd_lun structure for the device. 28760 * flag - the km flags passed to constructor 28761 * 28762 * Return Code: 0 on success. 28763 * -1 on failure. 28764 */ 28765 28766 /*ARGSUSED*/ 28767 static int 28768 sd_wm_cache_constructor(void *wm, void *un, int flags) 28769 { 28770 bzero(wm, sizeof (struct sd_w_map)); 28771 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28772 return (0); 28773 } 28774 28775 28776 /* 28777 * Function: sd_wm_cache_destructor() 28778 * 28779 * Description: Cache destructor for the wmap cache for the read/modify/write 28780 * devices. 28781 * 28782 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28783 * un - sd_lun structure for the device. 28784 */ 28785 /*ARGSUSED*/ 28786 static void 28787 sd_wm_cache_destructor(void *wm, void *un) 28788 { 28789 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28790 } 28791 28792 28793 /* 28794 * Function: sd_range_lock() 28795 * 28796 * Description: Lock the range of blocks specified as parameter to ensure 28797 * that read, modify write is atomic and no other i/o writes 28798 * to the same location. The range is specified in terms 28799 * of start and end blocks. Block numbers are the actual 28800 * media block numbers and not system. 28801 * 28802 * Arguments: un - sd_lun structure for the device. 28803 * startb - The starting block number 28804 * endb - The end block number 28805 * typ - type of i/o - simple/read_modify_write 28806 * 28807 * Return Code: wm - pointer to the wmap structure. 28808 * 28809 * Context: This routine can sleep. 28810 */ 28811 28812 static struct sd_w_map * 28813 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28814 { 28815 struct sd_w_map *wmp = NULL; 28816 struct sd_w_map *sl_wmp = NULL; 28817 struct sd_w_map *tmp_wmp; 28818 wm_state state = SD_WM_CHK_LIST; 28819 28820 28821 ASSERT(un != NULL); 28822 ASSERT(!mutex_owned(SD_MUTEX(un))); 28823 28824 mutex_enter(SD_MUTEX(un)); 28825 28826 while (state != SD_WM_DONE) { 28827 28828 switch (state) { 28829 case SD_WM_CHK_LIST: 28830 /* 28831 * This is the starting state. Check the wmap list 28832 * to see if the range is currently available. 28833 */ 28834 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28835 /* 28836 * If this is a simple write and no rmw 28837 * i/o is pending then try to lock the 28838 * range as the range should be available. 28839 */ 28840 state = SD_WM_LOCK_RANGE; 28841 } else { 28842 tmp_wmp = sd_get_range(un, startb, endb); 28843 if (tmp_wmp != NULL) { 28844 if ((wmp != NULL) && ONLIST(un, wmp)) { 28845 /* 28846 * Should not keep onlist wmps 28847 * while waiting this macro 28848 * will also do wmp = NULL; 28849 */ 28850 FREE_ONLIST_WMAP(un, wmp); 28851 } 28852 /* 28853 * sl_wmp is the wmap on which wait 28854 * is done, since the tmp_wmp points 28855 * to the inuse wmap, set sl_wmp to 28856 * tmp_wmp and change the state to sleep 28857 */ 28858 sl_wmp = tmp_wmp; 28859 state = SD_WM_WAIT_MAP; 28860 } else { 28861 state = SD_WM_LOCK_RANGE; 28862 } 28863 28864 } 28865 break; 28866 28867 case SD_WM_LOCK_RANGE: 28868 ASSERT(un->un_wm_cache); 28869 /* 28870 * The range need to be locked, try to get a wmap. 28871 * First attempt it with NO_SLEEP, want to avoid a sleep 28872 * if possible as we will have to release the sd mutex 28873 * if we have to sleep. 28874 */ 28875 if (wmp == NULL) 28876 wmp = kmem_cache_alloc(un->un_wm_cache, 28877 KM_NOSLEEP); 28878 if (wmp == NULL) { 28879 mutex_exit(SD_MUTEX(un)); 28880 _NOTE(DATA_READABLE_WITHOUT_LOCK 28881 (sd_lun::un_wm_cache)) 28882 wmp = kmem_cache_alloc(un->un_wm_cache, 28883 KM_SLEEP); 28884 mutex_enter(SD_MUTEX(un)); 28885 /* 28886 * we released the mutex so recheck and go to 28887 * check list state. 28888 */ 28889 state = SD_WM_CHK_LIST; 28890 } else { 28891 /* 28892 * We exit out of state machine since we 28893 * have the wmap. Do the housekeeping first. 28894 * place the wmap on the wmap list if it is not 28895 * on it already and then set the state to done. 28896 */ 28897 wmp->wm_start = startb; 28898 wmp->wm_end = endb; 28899 wmp->wm_flags = typ | SD_WM_BUSY; 28900 if (typ & SD_WTYPE_RMW) { 28901 un->un_rmw_count++; 28902 } 28903 /* 28904 * If not already on the list then link 28905 */ 28906 if (!ONLIST(un, wmp)) { 28907 wmp->wm_next = un->un_wm; 28908 wmp->wm_prev = NULL; 28909 if (wmp->wm_next) 28910 wmp->wm_next->wm_prev = wmp; 28911 un->un_wm = wmp; 28912 } 28913 state = SD_WM_DONE; 28914 } 28915 break; 28916 28917 case SD_WM_WAIT_MAP: 28918 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28919 /* 28920 * Wait is done on sl_wmp, which is set in the 28921 * check_list state. 28922 */ 28923 sl_wmp->wm_wanted_count++; 28924 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28925 sl_wmp->wm_wanted_count--; 28926 /* 28927 * We can reuse the memory from the completed sl_wmp 28928 * lock range for our new lock, but only if noone is 28929 * waiting for it. 28930 */ 28931 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28932 if (sl_wmp->wm_wanted_count == 0) { 28933 if (wmp != NULL) 28934 CHK_N_FREEWMP(un, wmp); 28935 wmp = sl_wmp; 28936 } 28937 sl_wmp = NULL; 28938 /* 28939 * After waking up, need to recheck for availability of 28940 * range. 28941 */ 28942 state = SD_WM_CHK_LIST; 28943 break; 28944 28945 default: 28946 panic("sd_range_lock: " 28947 "Unknown state %d in sd_range_lock", state); 28948 /*NOTREACHED*/ 28949 } /* switch(state) */ 28950 28951 } /* while(state != SD_WM_DONE) */ 28952 28953 mutex_exit(SD_MUTEX(un)); 28954 28955 ASSERT(wmp != NULL); 28956 28957 return (wmp); 28958 } 28959 28960 28961 /* 28962 * Function: sd_get_range() 28963 * 28964 * Description: Find if there any overlapping I/O to this one 28965 * Returns the write-map of 1st such I/O, NULL otherwise. 28966 * 28967 * Arguments: un - sd_lun structure for the device. 28968 * startb - The starting block number 28969 * endb - The end block number 28970 * 28971 * Return Code: wm - pointer to the wmap structure. 28972 */ 28973 28974 static struct sd_w_map * 28975 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28976 { 28977 struct sd_w_map *wmp; 28978 28979 ASSERT(un != NULL); 28980 28981 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28982 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28983 continue; 28984 } 28985 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28986 break; 28987 } 28988 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28989 break; 28990 } 28991 } 28992 28993 return (wmp); 28994 } 28995 28996 28997 /* 28998 * Function: sd_free_inlist_wmap() 28999 * 29000 * Description: Unlink and free a write map struct. 29001 * 29002 * Arguments: un - sd_lun structure for the device. 29003 * wmp - sd_w_map which needs to be unlinked. 29004 */ 29005 29006 static void 29007 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29008 { 29009 ASSERT(un != NULL); 29010 29011 if (un->un_wm == wmp) { 29012 un->un_wm = wmp->wm_next; 29013 } else { 29014 wmp->wm_prev->wm_next = wmp->wm_next; 29015 } 29016 29017 if (wmp->wm_next) { 29018 wmp->wm_next->wm_prev = wmp->wm_prev; 29019 } 29020 29021 wmp->wm_next = wmp->wm_prev = NULL; 29022 29023 kmem_cache_free(un->un_wm_cache, wmp); 29024 } 29025 29026 29027 /* 29028 * Function: sd_range_unlock() 29029 * 29030 * Description: Unlock the range locked by wm. 29031 * Free write map if nobody else is waiting on it. 29032 * 29033 * Arguments: un - sd_lun structure for the device. 29034 * wmp - sd_w_map which needs to be unlinked. 29035 */ 29036 29037 static void 29038 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29039 { 29040 ASSERT(un != NULL); 29041 ASSERT(wm != NULL); 29042 ASSERT(!mutex_owned(SD_MUTEX(un))); 29043 29044 mutex_enter(SD_MUTEX(un)); 29045 29046 if (wm->wm_flags & SD_WTYPE_RMW) { 29047 un->un_rmw_count--; 29048 } 29049 29050 if (wm->wm_wanted_count) { 29051 wm->wm_flags = 0; 29052 /* 29053 * Broadcast that the wmap is available now. 29054 */ 29055 cv_broadcast(&wm->wm_avail); 29056 } else { 29057 /* 29058 * If no one is waiting on the map, it should be free'ed. 29059 */ 29060 sd_free_inlist_wmap(un, wm); 29061 } 29062 29063 mutex_exit(SD_MUTEX(un)); 29064 } 29065 29066 29067 /* 29068 * Function: sd_read_modify_write_task 29069 * 29070 * Description: Called from a taskq thread to initiate the write phase of 29071 * a read-modify-write request. This is used for targets where 29072 * un->un_sys_blocksize != un->un_tgt_blocksize. 29073 * 29074 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29075 * 29076 * Context: Called under taskq thread context. 29077 */ 29078 29079 static void 29080 sd_read_modify_write_task(void *arg) 29081 { 29082 struct sd_mapblocksize_info *bsp; 29083 struct buf *bp; 29084 struct sd_xbuf *xp; 29085 struct sd_lun *un; 29086 29087 bp = arg; /* The bp is given in arg */ 29088 ASSERT(bp != NULL); 29089 29090 /* Get the pointer to the layer-private data struct */ 29091 xp = SD_GET_XBUF(bp); 29092 ASSERT(xp != NULL); 29093 bsp = xp->xb_private; 29094 ASSERT(bsp != NULL); 29095 29096 un = SD_GET_UN(bp); 29097 ASSERT(un != NULL); 29098 ASSERT(!mutex_owned(SD_MUTEX(un))); 29099 29100 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29101 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29102 29103 /* 29104 * This is the write phase of a read-modify-write request, called 29105 * under the context of a taskq thread in response to the completion 29106 * of the read portion of the rmw request completing under interrupt 29107 * context. The write request must be sent from here down the iostart 29108 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29109 * we use the layer index saved in the layer-private data area. 29110 */ 29111 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29112 29113 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29114 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29115 } 29116 29117 29118 /* 29119 * Function: sddump_do_read_of_rmw() 29120 * 29121 * Description: This routine will be called from sddump, If sddump is called 29122 * with an I/O which not aligned on device blocksize boundary 29123 * then the write has to be converted to read-modify-write. 29124 * Do the read part here in order to keep sddump simple. 29125 * Note - That the sd_mutex is held across the call to this 29126 * routine. 29127 * 29128 * Arguments: un - sd_lun 29129 * blkno - block number in terms of media block size. 29130 * nblk - number of blocks. 29131 * bpp - pointer to pointer to the buf structure. On return 29132 * from this function, *bpp points to the valid buffer 29133 * to which the write has to be done. 29134 * 29135 * Return Code: 0 for success or errno-type return code 29136 */ 29137 29138 static int 29139 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29140 struct buf **bpp) 29141 { 29142 int err; 29143 int i; 29144 int rval; 29145 struct buf *bp; 29146 struct scsi_pkt *pkt = NULL; 29147 uint32_t target_blocksize; 29148 29149 ASSERT(un != NULL); 29150 ASSERT(mutex_owned(SD_MUTEX(un))); 29151 29152 target_blocksize = un->un_tgt_blocksize; 29153 29154 mutex_exit(SD_MUTEX(un)); 29155 29156 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29157 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29158 if (bp == NULL) { 29159 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29160 "no resources for dumping; giving up"); 29161 err = ENOMEM; 29162 goto done; 29163 } 29164 29165 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29166 blkno, nblk); 29167 if (rval != 0) { 29168 scsi_free_consistent_buf(bp); 29169 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29170 "no resources for dumping; giving up"); 29171 err = ENOMEM; 29172 goto done; 29173 } 29174 29175 pkt->pkt_flags |= FLAG_NOINTR; 29176 29177 err = EIO; 29178 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29179 29180 /* 29181 * Scsi_poll returns 0 (success) if the command completes and 29182 * the status block is STATUS_GOOD. We should only check 29183 * errors if this condition is not true. Even then we should 29184 * send our own request sense packet only if we have a check 29185 * condition and auto request sense has not been performed by 29186 * the hba. 29187 */ 29188 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29189 29190 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29191 err = 0; 29192 break; 29193 } 29194 29195 /* 29196 * Check CMD_DEV_GONE 1st, give up if device is gone, 29197 * no need to read RQS data. 29198 */ 29199 if (pkt->pkt_reason == CMD_DEV_GONE) { 29200 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29201 "Error while dumping state with rmw..." 29202 "Device is gone\n"); 29203 break; 29204 } 29205 29206 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29207 SD_INFO(SD_LOG_DUMP, un, 29208 "sddump: read failed with CHECK, try # %d\n", i); 29209 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29210 (void) sd_send_polled_RQS(un); 29211 } 29212 29213 continue; 29214 } 29215 29216 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29217 int reset_retval = 0; 29218 29219 SD_INFO(SD_LOG_DUMP, un, 29220 "sddump: read failed with BUSY, try # %d\n", i); 29221 29222 if (un->un_f_lun_reset_enabled == TRUE) { 29223 reset_retval = scsi_reset(SD_ADDRESS(un), 29224 RESET_LUN); 29225 } 29226 if (reset_retval == 0) { 29227 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29228 } 29229 (void) sd_send_polled_RQS(un); 29230 29231 } else { 29232 SD_INFO(SD_LOG_DUMP, un, 29233 "sddump: read failed with 0x%x, try # %d\n", 29234 SD_GET_PKT_STATUS(pkt), i); 29235 mutex_enter(SD_MUTEX(un)); 29236 sd_reset_target(un, pkt); 29237 mutex_exit(SD_MUTEX(un)); 29238 } 29239 29240 /* 29241 * If we are not getting anywhere with lun/target resets, 29242 * let's reset the bus. 29243 */ 29244 if (i > SD_NDUMP_RETRIES/2) { 29245 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29246 (void) sd_send_polled_RQS(un); 29247 } 29248 29249 } 29250 scsi_destroy_pkt(pkt); 29251 29252 if (err != 0) { 29253 scsi_free_consistent_buf(bp); 29254 *bpp = NULL; 29255 } else { 29256 *bpp = bp; 29257 } 29258 29259 done: 29260 mutex_enter(SD_MUTEX(un)); 29261 return (err); 29262 } 29263 29264 29265 /* 29266 * Function: sd_failfast_flushq 29267 * 29268 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29269 * in b_flags and move them onto the failfast queue, then kick 29270 * off a thread to return all bp's on the failfast queue to 29271 * their owners with an error set. 29272 * 29273 * Arguments: un - pointer to the soft state struct for the instance. 29274 * 29275 * Context: may execute in interrupt context. 29276 */ 29277 29278 static void 29279 sd_failfast_flushq(struct sd_lun *un) 29280 { 29281 struct buf *bp; 29282 struct buf *next_waitq_bp; 29283 struct buf *prev_waitq_bp = NULL; 29284 29285 ASSERT(un != NULL); 29286 ASSERT(mutex_owned(SD_MUTEX(un))); 29287 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29288 ASSERT(un->un_failfast_bp == NULL); 29289 29290 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29291 "sd_failfast_flushq: entry: un:0x%p\n", un); 29292 29293 /* 29294 * Check if we should flush all bufs when entering failfast state, or 29295 * just those with B_FAILFAST set. 29296 */ 29297 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29298 /* 29299 * Move *all* bp's on the wait queue to the failfast flush 29300 * queue, including those that do NOT have B_FAILFAST set. 29301 */ 29302 if (un->un_failfast_headp == NULL) { 29303 ASSERT(un->un_failfast_tailp == NULL); 29304 un->un_failfast_headp = un->un_waitq_headp; 29305 } else { 29306 ASSERT(un->un_failfast_tailp != NULL); 29307 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29308 } 29309 29310 un->un_failfast_tailp = un->un_waitq_tailp; 29311 29312 /* update kstat for each bp moved out of the waitq */ 29313 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29314 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29315 } 29316 29317 /* empty the waitq */ 29318 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29319 29320 } else { 29321 /* 29322 * Go thru the wait queue, pick off all entries with 29323 * B_FAILFAST set, and move these onto the failfast queue. 29324 */ 29325 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29326 /* 29327 * Save the pointer to the next bp on the wait queue, 29328 * so we get to it on the next iteration of this loop. 29329 */ 29330 next_waitq_bp = bp->av_forw; 29331 29332 /* 29333 * If this bp from the wait queue does NOT have 29334 * B_FAILFAST set, just move on to the next element 29335 * in the wait queue. Note, this is the only place 29336 * where it is correct to set prev_waitq_bp. 29337 */ 29338 if ((bp->b_flags & B_FAILFAST) == 0) { 29339 prev_waitq_bp = bp; 29340 continue; 29341 } 29342 29343 /* 29344 * Remove the bp from the wait queue. 29345 */ 29346 if (bp == un->un_waitq_headp) { 29347 /* The bp is the first element of the waitq. */ 29348 un->un_waitq_headp = next_waitq_bp; 29349 if (un->un_waitq_headp == NULL) { 29350 /* The wait queue is now empty */ 29351 un->un_waitq_tailp = NULL; 29352 } 29353 } else { 29354 /* 29355 * The bp is either somewhere in the middle 29356 * or at the end of the wait queue. 29357 */ 29358 ASSERT(un->un_waitq_headp != NULL); 29359 ASSERT(prev_waitq_bp != NULL); 29360 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29361 == 0); 29362 if (bp == un->un_waitq_tailp) { 29363 /* bp is the last entry on the waitq. */ 29364 ASSERT(next_waitq_bp == NULL); 29365 un->un_waitq_tailp = prev_waitq_bp; 29366 } 29367 prev_waitq_bp->av_forw = next_waitq_bp; 29368 } 29369 bp->av_forw = NULL; 29370 29371 /* 29372 * update kstat since the bp is moved out of 29373 * the waitq 29374 */ 29375 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29376 29377 /* 29378 * Now put the bp onto the failfast queue. 29379 */ 29380 if (un->un_failfast_headp == NULL) { 29381 /* failfast queue is currently empty */ 29382 ASSERT(un->un_failfast_tailp == NULL); 29383 un->un_failfast_headp = 29384 un->un_failfast_tailp = bp; 29385 } else { 29386 /* Add the bp to the end of the failfast q */ 29387 ASSERT(un->un_failfast_tailp != NULL); 29388 ASSERT(un->un_failfast_tailp->b_flags & 29389 B_FAILFAST); 29390 un->un_failfast_tailp->av_forw = bp; 29391 un->un_failfast_tailp = bp; 29392 } 29393 } 29394 } 29395 29396 /* 29397 * Now return all bp's on the failfast queue to their owners. 29398 */ 29399 while ((bp = un->un_failfast_headp) != NULL) { 29400 29401 un->un_failfast_headp = bp->av_forw; 29402 if (un->un_failfast_headp == NULL) { 29403 un->un_failfast_tailp = NULL; 29404 } 29405 29406 /* 29407 * We want to return the bp with a failure error code, but 29408 * we do not want a call to sd_start_cmds() to occur here, 29409 * so use sd_return_failed_command_no_restart() instead of 29410 * sd_return_failed_command(). 29411 */ 29412 sd_return_failed_command_no_restart(un, bp, EIO); 29413 } 29414 29415 /* Flush the xbuf queues if required. */ 29416 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29417 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29418 } 29419 29420 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29421 "sd_failfast_flushq: exit: un:0x%p\n", un); 29422 } 29423 29424 29425 /* 29426 * Function: sd_failfast_flushq_callback 29427 * 29428 * Description: Return TRUE if the given bp meets the criteria for failfast 29429 * flushing. Used with ddi_xbuf_flushq(9F). 29430 * 29431 * Arguments: bp - ptr to buf struct to be examined. 29432 * 29433 * Context: Any 29434 */ 29435 29436 static int 29437 sd_failfast_flushq_callback(struct buf *bp) 29438 { 29439 /* 29440 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29441 * state is entered; OR (2) the given bp has B_FAILFAST set. 29442 */ 29443 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29444 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29445 } 29446 29447 29448 29449 /* 29450 * Function: sd_setup_next_xfer 29451 * 29452 * Description: Prepare next I/O operation using DMA_PARTIAL 29453 * 29454 */ 29455 29456 static int 29457 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29458 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29459 { 29460 ssize_t num_blks_not_xfered; 29461 daddr_t strt_blk_num; 29462 ssize_t bytes_not_xfered; 29463 int rval; 29464 29465 ASSERT(pkt->pkt_resid == 0); 29466 29467 /* 29468 * Calculate next block number and amount to be transferred. 29469 * 29470 * How much data NOT transfered to the HBA yet. 29471 */ 29472 bytes_not_xfered = xp->xb_dma_resid; 29473 29474 /* 29475 * figure how many blocks NOT transfered to the HBA yet. 29476 */ 29477 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29478 29479 /* 29480 * set starting block number to the end of what WAS transfered. 29481 */ 29482 strt_blk_num = xp->xb_blkno + 29483 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29484 29485 /* 29486 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29487 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29488 * the disk mutex here. 29489 */ 29490 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29491 strt_blk_num, num_blks_not_xfered); 29492 29493 if (rval == 0) { 29494 29495 /* 29496 * Success. 29497 * 29498 * Adjust things if there are still more blocks to be 29499 * transfered. 29500 */ 29501 xp->xb_dma_resid = pkt->pkt_resid; 29502 pkt->pkt_resid = 0; 29503 29504 return (1); 29505 } 29506 29507 /* 29508 * There's really only one possible return value from 29509 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29510 * returns NULL. 29511 */ 29512 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29513 29514 bp->b_resid = bp->b_bcount; 29515 bp->b_flags |= B_ERROR; 29516 29517 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29518 "Error setting up next portion of DMA transfer\n"); 29519 29520 return (0); 29521 } 29522 29523 /* 29524 * Function: sd_panic_for_res_conflict 29525 * 29526 * Description: Call panic with a string formatted with "Reservation Conflict" 29527 * and a human readable identifier indicating the SD instance 29528 * that experienced the reservation conflict. 29529 * 29530 * Arguments: un - pointer to the soft state struct for the instance. 29531 * 29532 * Context: may execute in interrupt context. 29533 */ 29534 29535 #define SD_RESV_CONFLICT_FMT_LEN 40 29536 void 29537 sd_panic_for_res_conflict(struct sd_lun *un) 29538 { 29539 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 29540 char path_str[MAXPATHLEN]; 29541 29542 (void) snprintf(panic_str, sizeof (panic_str), 29543 "Reservation Conflict\nDisk: %s", 29544 ddi_pathname(SD_DEVINFO(un), path_str)); 29545 29546 panic(panic_str); 29547 } 29548 29549 /* 29550 * Note: The following sd_faultinjection_ioctl( ) routines implement 29551 * driver support for handling fault injection for error analysis 29552 * causing faults in multiple layers of the driver. 29553 * 29554 */ 29555 29556 #ifdef SD_FAULT_INJECTION 29557 static uint_t sd_fault_injection_on = 0; 29558 29559 /* 29560 * Function: sd_faultinjection_ioctl() 29561 * 29562 * Description: This routine is the driver entry point for handling 29563 * faultinjection ioctls to inject errors into the 29564 * layer model 29565 * 29566 * Arguments: cmd - the ioctl cmd received 29567 * arg - the arguments from user and returns 29568 */ 29569 29570 static void 29571 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29572 29573 uint_t i = 0; 29574 uint_t rval; 29575 29576 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29577 29578 mutex_enter(SD_MUTEX(un)); 29579 29580 switch (cmd) { 29581 case SDIOCRUN: 29582 /* Allow pushed faults to be injected */ 29583 SD_INFO(SD_LOG_SDTEST, un, 29584 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29585 29586 sd_fault_injection_on = 1; 29587 29588 SD_INFO(SD_LOG_IOERR, un, 29589 "sd_faultinjection_ioctl: run finished\n"); 29590 break; 29591 29592 case SDIOCSTART: 29593 /* Start Injection Session */ 29594 SD_INFO(SD_LOG_SDTEST, un, 29595 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29596 29597 sd_fault_injection_on = 0; 29598 un->sd_injection_mask = 0xFFFFFFFF; 29599 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29600 un->sd_fi_fifo_pkt[i] = NULL; 29601 un->sd_fi_fifo_xb[i] = NULL; 29602 un->sd_fi_fifo_un[i] = NULL; 29603 un->sd_fi_fifo_arq[i] = NULL; 29604 } 29605 un->sd_fi_fifo_start = 0; 29606 un->sd_fi_fifo_end = 0; 29607 29608 mutex_enter(&(un->un_fi_mutex)); 29609 un->sd_fi_log[0] = '\0'; 29610 un->sd_fi_buf_len = 0; 29611 mutex_exit(&(un->un_fi_mutex)); 29612 29613 SD_INFO(SD_LOG_IOERR, un, 29614 "sd_faultinjection_ioctl: start finished\n"); 29615 break; 29616 29617 case SDIOCSTOP: 29618 /* Stop Injection Session */ 29619 SD_INFO(SD_LOG_SDTEST, un, 29620 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29621 sd_fault_injection_on = 0; 29622 un->sd_injection_mask = 0x0; 29623 29624 /* Empty stray or unuseds structs from fifo */ 29625 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29626 if (un->sd_fi_fifo_pkt[i] != NULL) { 29627 kmem_free(un->sd_fi_fifo_pkt[i], 29628 sizeof (struct sd_fi_pkt)); 29629 } 29630 if (un->sd_fi_fifo_xb[i] != NULL) { 29631 kmem_free(un->sd_fi_fifo_xb[i], 29632 sizeof (struct sd_fi_xb)); 29633 } 29634 if (un->sd_fi_fifo_un[i] != NULL) { 29635 kmem_free(un->sd_fi_fifo_un[i], 29636 sizeof (struct sd_fi_un)); 29637 } 29638 if (un->sd_fi_fifo_arq[i] != NULL) { 29639 kmem_free(un->sd_fi_fifo_arq[i], 29640 sizeof (struct sd_fi_arq)); 29641 } 29642 un->sd_fi_fifo_pkt[i] = NULL; 29643 un->sd_fi_fifo_un[i] = NULL; 29644 un->sd_fi_fifo_xb[i] = NULL; 29645 un->sd_fi_fifo_arq[i] = NULL; 29646 } 29647 un->sd_fi_fifo_start = 0; 29648 un->sd_fi_fifo_end = 0; 29649 29650 SD_INFO(SD_LOG_IOERR, un, 29651 "sd_faultinjection_ioctl: stop finished\n"); 29652 break; 29653 29654 case SDIOCINSERTPKT: 29655 /* Store a packet struct to be pushed onto fifo */ 29656 SD_INFO(SD_LOG_SDTEST, un, 29657 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29658 29659 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29660 29661 sd_fault_injection_on = 0; 29662 29663 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29664 if (un->sd_fi_fifo_pkt[i] != NULL) { 29665 kmem_free(un->sd_fi_fifo_pkt[i], 29666 sizeof (struct sd_fi_pkt)); 29667 } 29668 if (arg != NULL) { 29669 un->sd_fi_fifo_pkt[i] = 29670 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29671 if (un->sd_fi_fifo_pkt[i] == NULL) { 29672 /* Alloc failed don't store anything */ 29673 break; 29674 } 29675 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29676 sizeof (struct sd_fi_pkt), 0); 29677 if (rval == -1) { 29678 kmem_free(un->sd_fi_fifo_pkt[i], 29679 sizeof (struct sd_fi_pkt)); 29680 un->sd_fi_fifo_pkt[i] = NULL; 29681 } 29682 } else { 29683 SD_INFO(SD_LOG_IOERR, un, 29684 "sd_faultinjection_ioctl: pkt null\n"); 29685 } 29686 break; 29687 29688 case SDIOCINSERTXB: 29689 /* Store a xb struct to be pushed onto fifo */ 29690 SD_INFO(SD_LOG_SDTEST, un, 29691 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29692 29693 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29694 29695 sd_fault_injection_on = 0; 29696 29697 if (un->sd_fi_fifo_xb[i] != NULL) { 29698 kmem_free(un->sd_fi_fifo_xb[i], 29699 sizeof (struct sd_fi_xb)); 29700 un->sd_fi_fifo_xb[i] = NULL; 29701 } 29702 if (arg != NULL) { 29703 un->sd_fi_fifo_xb[i] = 29704 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29705 if (un->sd_fi_fifo_xb[i] == NULL) { 29706 /* Alloc failed don't store anything */ 29707 break; 29708 } 29709 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29710 sizeof (struct sd_fi_xb), 0); 29711 29712 if (rval == -1) { 29713 kmem_free(un->sd_fi_fifo_xb[i], 29714 sizeof (struct sd_fi_xb)); 29715 un->sd_fi_fifo_xb[i] = NULL; 29716 } 29717 } else { 29718 SD_INFO(SD_LOG_IOERR, un, 29719 "sd_faultinjection_ioctl: xb null\n"); 29720 } 29721 break; 29722 29723 case SDIOCINSERTUN: 29724 /* Store a un struct to be pushed onto fifo */ 29725 SD_INFO(SD_LOG_SDTEST, un, 29726 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29727 29728 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29729 29730 sd_fault_injection_on = 0; 29731 29732 if (un->sd_fi_fifo_un[i] != NULL) { 29733 kmem_free(un->sd_fi_fifo_un[i], 29734 sizeof (struct sd_fi_un)); 29735 un->sd_fi_fifo_un[i] = NULL; 29736 } 29737 if (arg != NULL) { 29738 un->sd_fi_fifo_un[i] = 29739 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29740 if (un->sd_fi_fifo_un[i] == NULL) { 29741 /* Alloc failed don't store anything */ 29742 break; 29743 } 29744 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29745 sizeof (struct sd_fi_un), 0); 29746 if (rval == -1) { 29747 kmem_free(un->sd_fi_fifo_un[i], 29748 sizeof (struct sd_fi_un)); 29749 un->sd_fi_fifo_un[i] = NULL; 29750 } 29751 29752 } else { 29753 SD_INFO(SD_LOG_IOERR, un, 29754 "sd_faultinjection_ioctl: un null\n"); 29755 } 29756 29757 break; 29758 29759 case SDIOCINSERTARQ: 29760 /* Store a arq struct to be pushed onto fifo */ 29761 SD_INFO(SD_LOG_SDTEST, un, 29762 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29763 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29764 29765 sd_fault_injection_on = 0; 29766 29767 if (un->sd_fi_fifo_arq[i] != NULL) { 29768 kmem_free(un->sd_fi_fifo_arq[i], 29769 sizeof (struct sd_fi_arq)); 29770 un->sd_fi_fifo_arq[i] = NULL; 29771 } 29772 if (arg != NULL) { 29773 un->sd_fi_fifo_arq[i] = 29774 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29775 if (un->sd_fi_fifo_arq[i] == NULL) { 29776 /* Alloc failed don't store anything */ 29777 break; 29778 } 29779 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29780 sizeof (struct sd_fi_arq), 0); 29781 if (rval == -1) { 29782 kmem_free(un->sd_fi_fifo_arq[i], 29783 sizeof (struct sd_fi_arq)); 29784 un->sd_fi_fifo_arq[i] = NULL; 29785 } 29786 29787 } else { 29788 SD_INFO(SD_LOG_IOERR, un, 29789 "sd_faultinjection_ioctl: arq null\n"); 29790 } 29791 29792 break; 29793 29794 case SDIOCPUSH: 29795 /* Push stored xb, pkt, un, and arq onto fifo */ 29796 sd_fault_injection_on = 0; 29797 29798 if (arg != NULL) { 29799 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29800 if (rval != -1 && 29801 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29802 un->sd_fi_fifo_end += i; 29803 } 29804 } else { 29805 SD_INFO(SD_LOG_IOERR, un, 29806 "sd_faultinjection_ioctl: push arg null\n"); 29807 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29808 un->sd_fi_fifo_end++; 29809 } 29810 } 29811 SD_INFO(SD_LOG_IOERR, un, 29812 "sd_faultinjection_ioctl: push to end=%d\n", 29813 un->sd_fi_fifo_end); 29814 break; 29815 29816 case SDIOCRETRIEVE: 29817 /* Return buffer of log from Injection session */ 29818 SD_INFO(SD_LOG_SDTEST, un, 29819 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29820 29821 sd_fault_injection_on = 0; 29822 29823 mutex_enter(&(un->un_fi_mutex)); 29824 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29825 un->sd_fi_buf_len+1, 0); 29826 mutex_exit(&(un->un_fi_mutex)); 29827 29828 if (rval == -1) { 29829 /* 29830 * arg is possibly invalid setting 29831 * it to NULL for return 29832 */ 29833 arg = NULL; 29834 } 29835 break; 29836 } 29837 29838 mutex_exit(SD_MUTEX(un)); 29839 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29840 " exit\n"); 29841 } 29842 29843 29844 /* 29845 * Function: sd_injection_log() 29846 * 29847 * Description: This routine adds buff to the already existing injection log 29848 * for retrieval via faultinjection_ioctl for use in fault 29849 * detection and recovery 29850 * 29851 * Arguments: buf - the string to add to the log 29852 */ 29853 29854 static void 29855 sd_injection_log(char *buf, struct sd_lun *un) 29856 { 29857 uint_t len; 29858 29859 ASSERT(un != NULL); 29860 ASSERT(buf != NULL); 29861 29862 mutex_enter(&(un->un_fi_mutex)); 29863 29864 len = min(strlen(buf), 255); 29865 /* Add logged value to Injection log to be returned later */ 29866 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29867 uint_t offset = strlen((char *)un->sd_fi_log); 29868 char *destp = (char *)un->sd_fi_log + offset; 29869 int i; 29870 for (i = 0; i < len; i++) { 29871 *destp++ = *buf++; 29872 } 29873 un->sd_fi_buf_len += len; 29874 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29875 } 29876 29877 mutex_exit(&(un->un_fi_mutex)); 29878 } 29879 29880 29881 /* 29882 * Function: sd_faultinjection() 29883 * 29884 * Description: This routine takes the pkt and changes its 29885 * content based on error injection scenerio. 29886 * 29887 * Arguments: pktp - packet to be changed 29888 */ 29889 29890 static void 29891 sd_faultinjection(struct scsi_pkt *pktp) 29892 { 29893 uint_t i; 29894 struct sd_fi_pkt *fi_pkt; 29895 struct sd_fi_xb *fi_xb; 29896 struct sd_fi_un *fi_un; 29897 struct sd_fi_arq *fi_arq; 29898 struct buf *bp; 29899 struct sd_xbuf *xb; 29900 struct sd_lun *un; 29901 29902 ASSERT(pktp != NULL); 29903 29904 /* pull bp xb and un from pktp */ 29905 bp = (struct buf *)pktp->pkt_private; 29906 xb = SD_GET_XBUF(bp); 29907 un = SD_GET_UN(bp); 29908 29909 ASSERT(un != NULL); 29910 29911 mutex_enter(SD_MUTEX(un)); 29912 29913 SD_TRACE(SD_LOG_SDTEST, un, 29914 "sd_faultinjection: entry Injection from sdintr\n"); 29915 29916 /* if injection is off return */ 29917 if (sd_fault_injection_on == 0 || 29918 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29919 mutex_exit(SD_MUTEX(un)); 29920 return; 29921 } 29922 29923 SD_INFO(SD_LOG_SDTEST, un, 29924 "sd_faultinjection: is working for copying\n"); 29925 29926 /* take next set off fifo */ 29927 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29928 29929 fi_pkt = un->sd_fi_fifo_pkt[i]; 29930 fi_xb = un->sd_fi_fifo_xb[i]; 29931 fi_un = un->sd_fi_fifo_un[i]; 29932 fi_arq = un->sd_fi_fifo_arq[i]; 29933 29934 29935 /* set variables accordingly */ 29936 /* set pkt if it was on fifo */ 29937 if (fi_pkt != NULL) { 29938 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29939 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29940 if (fi_pkt->pkt_cdbp != 0xff) 29941 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29942 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29943 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29944 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29945 29946 } 29947 /* set xb if it was on fifo */ 29948 if (fi_xb != NULL) { 29949 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29950 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29951 if (fi_xb->xb_retry_count != 0) 29952 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29953 SD_CONDSET(xb, xb, xb_victim_retry_count, 29954 "xb_victim_retry_count"); 29955 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29956 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29957 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29958 29959 /* copy in block data from sense */ 29960 /* 29961 * if (fi_xb->xb_sense_data[0] != -1) { 29962 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29963 * SENSE_LENGTH); 29964 * } 29965 */ 29966 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29967 29968 /* copy in extended sense codes */ 29969 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29970 xb, es_code, "es_code"); 29971 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29972 xb, es_key, "es_key"); 29973 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29974 xb, es_add_code, "es_add_code"); 29975 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29976 xb, es_qual_code, "es_qual_code"); 29977 struct scsi_extended_sense *esp; 29978 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29979 esp->es_class = CLASS_EXTENDED_SENSE; 29980 } 29981 29982 /* set un if it was on fifo */ 29983 if (fi_un != NULL) { 29984 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29985 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29986 SD_CONDSET(un, un, un_reset_retry_count, 29987 "un_reset_retry_count"); 29988 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29989 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29990 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29991 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29992 "un_f_allow_bus_device_reset"); 29993 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29994 29995 } 29996 29997 /* copy in auto request sense if it was on fifo */ 29998 if (fi_arq != NULL) { 29999 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30000 } 30001 30002 /* free structs */ 30003 if (un->sd_fi_fifo_pkt[i] != NULL) { 30004 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30005 } 30006 if (un->sd_fi_fifo_xb[i] != NULL) { 30007 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30008 } 30009 if (un->sd_fi_fifo_un[i] != NULL) { 30010 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30011 } 30012 if (un->sd_fi_fifo_arq[i] != NULL) { 30013 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30014 } 30015 30016 /* 30017 * kmem_free does not gurantee to set to NULL 30018 * since we uses these to determine if we set 30019 * values or not lets confirm they are always 30020 * NULL after free 30021 */ 30022 un->sd_fi_fifo_pkt[i] = NULL; 30023 un->sd_fi_fifo_un[i] = NULL; 30024 un->sd_fi_fifo_xb[i] = NULL; 30025 un->sd_fi_fifo_arq[i] = NULL; 30026 30027 un->sd_fi_fifo_start++; 30028 30029 mutex_exit(SD_MUTEX(un)); 30030 30031 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30032 } 30033 30034 #endif /* SD_FAULT_INJECTION */ 30035 30036 /* 30037 * This routine is invoked in sd_unit_attach(). Before calling it, the 30038 * properties in conf file should be processed already, and "hotpluggable" 30039 * property was processed also. 30040 * 30041 * The sd driver distinguishes 3 different type of devices: removable media, 30042 * non-removable media, and hotpluggable. Below the differences are defined: 30043 * 30044 * 1. Device ID 30045 * 30046 * The device ID of a device is used to identify this device. Refer to 30047 * ddi_devid_register(9F). 30048 * 30049 * For a non-removable media disk device which can provide 0x80 or 0x83 30050 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30051 * device ID is created to identify this device. For other non-removable 30052 * media devices, a default device ID is created only if this device has 30053 * at least 2 alter cylinders. Otherwise, this device has no devid. 30054 * 30055 * ------------------------------------------------------- 30056 * removable media hotpluggable | Can Have Device ID 30057 * ------------------------------------------------------- 30058 * false false | Yes 30059 * false true | Yes 30060 * true x | No 30061 * ------------------------------------------------------ 30062 * 30063 * 30064 * 2. SCSI group 4 commands 30065 * 30066 * In SCSI specs, only some commands in group 4 command set can use 30067 * 8-byte addresses that can be used to access >2TB storage spaces. 30068 * Other commands have no such capability. Without supporting group4, 30069 * it is impossible to make full use of storage spaces of a disk with 30070 * capacity larger than 2TB. 30071 * 30072 * ----------------------------------------------- 30073 * removable media hotpluggable LP64 | Group 30074 * ----------------------------------------------- 30075 * false false false | 1 30076 * false false true | 4 30077 * false true false | 1 30078 * false true true | 4 30079 * true x x | 5 30080 * ----------------------------------------------- 30081 * 30082 * 30083 * 3. Check for VTOC Label 30084 * 30085 * If a direct-access disk has no EFI label, sd will check if it has a 30086 * valid VTOC label. Now, sd also does that check for removable media 30087 * and hotpluggable devices. 30088 * 30089 * -------------------------------------------------------------- 30090 * Direct-Access removable media hotpluggable | Check Label 30091 * ------------------------------------------------------------- 30092 * false false false | No 30093 * false false true | No 30094 * false true false | Yes 30095 * false true true | Yes 30096 * true x x | Yes 30097 * -------------------------------------------------------------- 30098 * 30099 * 30100 * 4. Building default VTOC label 30101 * 30102 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30103 * If those devices have no valid VTOC label, sd(7d) will attempt to 30104 * create default VTOC for them. Currently sd creates default VTOC label 30105 * for all devices on x86 platform (VTOC_16), but only for removable 30106 * media devices on SPARC (VTOC_8). 30107 * 30108 * ----------------------------------------------------------- 30109 * removable media hotpluggable platform | Default Label 30110 * ----------------------------------------------------------- 30111 * false false sparc | No 30112 * false true x86 | Yes 30113 * false true sparc | Yes 30114 * true x x | Yes 30115 * ---------------------------------------------------------- 30116 * 30117 * 30118 * 5. Supported blocksizes of target devices 30119 * 30120 * Sd supports non-512-byte blocksize for removable media devices only. 30121 * For other devices, only 512-byte blocksize is supported. This may be 30122 * changed in near future because some RAID devices require non-512-byte 30123 * blocksize 30124 * 30125 * ----------------------------------------------------------- 30126 * removable media hotpluggable | non-512-byte blocksize 30127 * ----------------------------------------------------------- 30128 * false false | No 30129 * false true | No 30130 * true x | Yes 30131 * ----------------------------------------------------------- 30132 * 30133 * 30134 * 6. Automatic mount & unmount 30135 * 30136 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30137 * if a device is removable media device. It return 1 for removable media 30138 * devices, and 0 for others. 30139 * 30140 * The automatic mounting subsystem should distinguish between the types 30141 * of devices and apply automounting policies to each. 30142 * 30143 * 30144 * 7. fdisk partition management 30145 * 30146 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30147 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30148 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30149 * fdisk partitions on both x86 and SPARC platform. 30150 * 30151 * ----------------------------------------------------------- 30152 * platform removable media USB/1394 | fdisk supported 30153 * ----------------------------------------------------------- 30154 * x86 X X | true 30155 * ------------------------------------------------------------ 30156 * sparc X X | false 30157 * ------------------------------------------------------------ 30158 * 30159 * 30160 * 8. MBOOT/MBR 30161 * 30162 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30163 * read/write mboot for removable media devices on sparc platform. 30164 * 30165 * ----------------------------------------------------------- 30166 * platform removable media USB/1394 | mboot supported 30167 * ----------------------------------------------------------- 30168 * x86 X X | true 30169 * ------------------------------------------------------------ 30170 * sparc false false | false 30171 * sparc false true | true 30172 * sparc true false | true 30173 * sparc true true | true 30174 * ------------------------------------------------------------ 30175 * 30176 * 30177 * 9. error handling during opening device 30178 * 30179 * If failed to open a disk device, an errno is returned. For some kinds 30180 * of errors, different errno is returned depending on if this device is 30181 * a removable media device. This brings USB/1394 hard disks in line with 30182 * expected hard disk behavior. It is not expected that this breaks any 30183 * application. 30184 * 30185 * ------------------------------------------------------ 30186 * removable media hotpluggable | errno 30187 * ------------------------------------------------------ 30188 * false false | EIO 30189 * false true | EIO 30190 * true x | ENXIO 30191 * ------------------------------------------------------ 30192 * 30193 * 30194 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30195 * 30196 * These IOCTLs are applicable only to removable media devices. 30197 * 30198 * ----------------------------------------------------------- 30199 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30200 * ----------------------------------------------------------- 30201 * false false | No 30202 * false true | No 30203 * true x | Yes 30204 * ----------------------------------------------------------- 30205 * 30206 * 30207 * 12. Kstats for partitions 30208 * 30209 * sd creates partition kstat for non-removable media devices. USB and 30210 * Firewire hard disks now have partition kstats 30211 * 30212 * ------------------------------------------------------ 30213 * removable media hotpluggable | kstat 30214 * ------------------------------------------------------ 30215 * false false | Yes 30216 * false true | Yes 30217 * true x | No 30218 * ------------------------------------------------------ 30219 * 30220 * 30221 * 13. Removable media & hotpluggable properties 30222 * 30223 * Sd driver creates a "removable-media" property for removable media 30224 * devices. Parent nexus drivers create a "hotpluggable" property if 30225 * it supports hotplugging. 30226 * 30227 * --------------------------------------------------------------------- 30228 * removable media hotpluggable | "removable-media" " hotpluggable" 30229 * --------------------------------------------------------------------- 30230 * false false | No No 30231 * false true | No Yes 30232 * true false | Yes No 30233 * true true | Yes Yes 30234 * --------------------------------------------------------------------- 30235 * 30236 * 30237 * 14. Power Management 30238 * 30239 * sd only power manages removable media devices or devices that support 30240 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30241 * 30242 * A parent nexus that supports hotplugging can also set "pm-capable" 30243 * if the disk can be power managed. 30244 * 30245 * ------------------------------------------------------------ 30246 * removable media hotpluggable pm-capable | power manage 30247 * ------------------------------------------------------------ 30248 * false false false | No 30249 * false false true | Yes 30250 * false true false | No 30251 * false true true | Yes 30252 * true x x | Yes 30253 * ------------------------------------------------------------ 30254 * 30255 * USB and firewire hard disks can now be power managed independently 30256 * of the framebuffer 30257 * 30258 * 30259 * 15. Support for USB disks with capacity larger than 1TB 30260 * 30261 * Currently, sd doesn't permit a fixed disk device with capacity 30262 * larger than 1TB to be used in a 32-bit operating system environment. 30263 * However, sd doesn't do that for removable media devices. Instead, it 30264 * assumes that removable media devices cannot have a capacity larger 30265 * than 1TB. Therefore, using those devices on 32-bit system is partially 30266 * supported, which can cause some unexpected results. 30267 * 30268 * --------------------------------------------------------------------- 30269 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30270 * --------------------------------------------------------------------- 30271 * false false | true | no 30272 * false true | true | no 30273 * true false | true | Yes 30274 * true true | true | Yes 30275 * --------------------------------------------------------------------- 30276 * 30277 * 30278 * 16. Check write-protection at open time 30279 * 30280 * When a removable media device is being opened for writing without NDELAY 30281 * flag, sd will check if this device is writable. If attempting to open 30282 * without NDELAY flag a write-protected device, this operation will abort. 30283 * 30284 * ------------------------------------------------------------ 30285 * removable media USB/1394 | WP Check 30286 * ------------------------------------------------------------ 30287 * false false | No 30288 * false true | No 30289 * true false | Yes 30290 * true true | Yes 30291 * ------------------------------------------------------------ 30292 * 30293 * 30294 * 17. syslog when corrupted VTOC is encountered 30295 * 30296 * Currently, if an invalid VTOC is encountered, sd only print syslog 30297 * for fixed SCSI disks. 30298 * ------------------------------------------------------------ 30299 * removable media USB/1394 | print syslog 30300 * ------------------------------------------------------------ 30301 * false false | Yes 30302 * false true | No 30303 * true false | No 30304 * true true | No 30305 * ------------------------------------------------------------ 30306 */ 30307 static void 30308 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30309 { 30310 int pm_capable_prop; 30311 30312 ASSERT(un->un_sd); 30313 ASSERT(un->un_sd->sd_inq); 30314 30315 /* 30316 * Enable SYNC CACHE support for all devices. 30317 */ 30318 un->un_f_sync_cache_supported = TRUE; 30319 30320 /* 30321 * Set the sync cache required flag to false. 30322 * This would ensure that there is no SYNC CACHE 30323 * sent when there are no writes 30324 */ 30325 un->un_f_sync_cache_required = FALSE; 30326 30327 if (un->un_sd->sd_inq->inq_rmb) { 30328 /* 30329 * The media of this device is removable. And for this kind 30330 * of devices, it is possible to change medium after opening 30331 * devices. Thus we should support this operation. 30332 */ 30333 un->un_f_has_removable_media = TRUE; 30334 30335 /* 30336 * support non-512-byte blocksize of removable media devices 30337 */ 30338 un->un_f_non_devbsize_supported = TRUE; 30339 30340 /* 30341 * Assume that all removable media devices support DOOR_LOCK 30342 */ 30343 un->un_f_doorlock_supported = TRUE; 30344 30345 /* 30346 * For a removable media device, it is possible to be opened 30347 * with NDELAY flag when there is no media in drive, in this 30348 * case we don't care if device is writable. But if without 30349 * NDELAY flag, we need to check if media is write-protected. 30350 */ 30351 un->un_f_chk_wp_open = TRUE; 30352 30353 /* 30354 * need to start a SCSI watch thread to monitor media state, 30355 * when media is being inserted or ejected, notify syseventd. 30356 */ 30357 un->un_f_monitor_media_state = TRUE; 30358 30359 /* 30360 * Some devices don't support START_STOP_UNIT command. 30361 * Therefore, we'd better check if a device supports it 30362 * before sending it. 30363 */ 30364 un->un_f_check_start_stop = TRUE; 30365 30366 /* 30367 * support eject media ioctl: 30368 * FDEJECT, DKIOCEJECT, CDROMEJECT 30369 */ 30370 un->un_f_eject_media_supported = TRUE; 30371 30372 /* 30373 * Because many removable-media devices don't support 30374 * LOG_SENSE, we couldn't use this command to check if 30375 * a removable media device support power-management. 30376 * We assume that they support power-management via 30377 * START_STOP_UNIT command and can be spun up and down 30378 * without limitations. 30379 */ 30380 un->un_f_pm_supported = TRUE; 30381 30382 /* 30383 * Need to create a zero length (Boolean) property 30384 * removable-media for the removable media devices. 30385 * Note that the return value of the property is not being 30386 * checked, since if unable to create the property 30387 * then do not want the attach to fail altogether. Consistent 30388 * with other property creation in attach. 30389 */ 30390 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30391 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30392 30393 } else { 30394 /* 30395 * create device ID for device 30396 */ 30397 un->un_f_devid_supported = TRUE; 30398 30399 /* 30400 * Spin up non-removable-media devices once it is attached 30401 */ 30402 un->un_f_attach_spinup = TRUE; 30403 30404 /* 30405 * According to SCSI specification, Sense data has two kinds of 30406 * format: fixed format, and descriptor format. At present, we 30407 * don't support descriptor format sense data for removable 30408 * media. 30409 */ 30410 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30411 un->un_f_descr_format_supported = TRUE; 30412 } 30413 30414 /* 30415 * kstats are created only for non-removable media devices. 30416 * 30417 * Set this in sd.conf to 0 in order to disable kstats. The 30418 * default is 1, so they are enabled by default. 30419 */ 30420 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30421 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30422 "enable-partition-kstats", 1)); 30423 30424 /* 30425 * Check if HBA has set the "pm-capable" property. 30426 * If "pm-capable" exists and is non-zero then we can 30427 * power manage the device without checking the start/stop 30428 * cycle count log sense page. 30429 * 30430 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 30431 * then we should not power manage the device. 30432 * 30433 * If "pm-capable" doesn't exist then pm_capable_prop will 30434 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30435 * sd will check the start/stop cycle count log sense page 30436 * and power manage the device if the cycle count limit has 30437 * not been exceeded. 30438 */ 30439 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30440 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30441 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 30442 un->un_f_log_sense_supported = TRUE; 30443 } else { 30444 /* 30445 * pm-capable property exists. 30446 * 30447 * Convert "TRUE" values for pm_capable_prop to 30448 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 30449 * later. "TRUE" values are any values except 30450 * SD_PM_CAPABLE_FALSE (0) and 30451 * SD_PM_CAPABLE_UNDEFINED (-1) 30452 */ 30453 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 30454 un->un_f_log_sense_supported = FALSE; 30455 } else { 30456 un->un_f_pm_supported = TRUE; 30457 } 30458 30459 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30460 "sd_unit_attach: un:0x%p pm-capable " 30461 "property set to %d.\n", un, un->un_f_pm_supported); 30462 } 30463 } 30464 30465 if (un->un_f_is_hotpluggable) { 30466 30467 /* 30468 * Have to watch hotpluggable devices as well, since 30469 * that's the only way for userland applications to 30470 * detect hot removal while device is busy/mounted. 30471 */ 30472 un->un_f_monitor_media_state = TRUE; 30473 30474 un->un_f_check_start_stop = TRUE; 30475 30476 } 30477 } 30478 30479 /* 30480 * sd_tg_rdwr: 30481 * Provides rdwr access for cmlb via sd_tgops. The start_block is 30482 * in sys block size, req_length in bytes. 30483 * 30484 */ 30485 static int 30486 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 30487 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 30488 { 30489 struct sd_lun *un; 30490 int path_flag = (int)(uintptr_t)tg_cookie; 30491 char *dkl = NULL; 30492 diskaddr_t real_addr = start_block; 30493 diskaddr_t first_byte, end_block; 30494 30495 size_t buffer_size = reqlength; 30496 int rval = 0; 30497 diskaddr_t cap; 30498 uint32_t lbasize; 30499 sd_ssc_t *ssc; 30500 30501 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30502 if (un == NULL) 30503 return (ENXIO); 30504 30505 if (cmd != TG_READ && cmd != TG_WRITE) 30506 return (EINVAL); 30507 30508 ssc = sd_ssc_init(un); 30509 mutex_enter(SD_MUTEX(un)); 30510 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 30511 mutex_exit(SD_MUTEX(un)); 30512 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30513 &lbasize, path_flag); 30514 if (rval != 0) 30515 goto done1; 30516 mutex_enter(SD_MUTEX(un)); 30517 sd_update_block_info(un, lbasize, cap); 30518 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 30519 mutex_exit(SD_MUTEX(un)); 30520 rval = EIO; 30521 goto done; 30522 } 30523 } 30524 30525 if (NOT_DEVBSIZE(un)) { 30526 /* 30527 * sys_blocksize != tgt_blocksize, need to re-adjust 30528 * blkno and save the index to beginning of dk_label 30529 */ 30530 first_byte = SD_SYSBLOCKS2BYTES(start_block); 30531 real_addr = first_byte / un->un_tgt_blocksize; 30532 30533 end_block = (first_byte + reqlength + 30534 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 30535 30536 /* round up buffer size to multiple of target block size */ 30537 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 30538 30539 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 30540 "label_addr: 0x%x allocation size: 0x%x\n", 30541 real_addr, buffer_size); 30542 30543 if (((first_byte % un->un_tgt_blocksize) != 0) || 30544 (reqlength % un->un_tgt_blocksize) != 0) 30545 /* the request is not aligned */ 30546 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 30547 } 30548 30549 /* 30550 * The MMC standard allows READ CAPACITY to be 30551 * inaccurate by a bounded amount (in the interest of 30552 * response latency). As a result, failed READs are 30553 * commonplace (due to the reading of metadata and not 30554 * data). Depending on the per-Vendor/drive Sense data, 30555 * the failed READ can cause many (unnecessary) retries. 30556 */ 30557 30558 if (ISCD(un) && (cmd == TG_READ) && 30559 (un->un_f_blockcount_is_valid == TRUE) && 30560 ((start_block == (un->un_blockcount - 1))|| 30561 (start_block == (un->un_blockcount - 2)))) { 30562 path_flag = SD_PATH_DIRECT_PRIORITY; 30563 } 30564 30565 mutex_exit(SD_MUTEX(un)); 30566 if (cmd == TG_READ) { 30567 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 30568 buffer_size, real_addr, path_flag); 30569 if (dkl != NULL) 30570 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 30571 real_addr), bufaddr, reqlength); 30572 } else { 30573 if (dkl) { 30574 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 30575 real_addr, path_flag); 30576 if (rval) { 30577 goto done1; 30578 } 30579 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 30580 real_addr), reqlength); 30581 } 30582 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 30583 buffer_size, real_addr, path_flag); 30584 } 30585 30586 done1: 30587 if (dkl != NULL) 30588 kmem_free(dkl, buffer_size); 30589 30590 if (rval != 0) { 30591 if (rval == EIO) 30592 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 30593 else 30594 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 30595 } 30596 done: 30597 sd_ssc_fini(ssc); 30598 return (rval); 30599 } 30600 30601 30602 static int 30603 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 30604 { 30605 30606 struct sd_lun *un; 30607 diskaddr_t cap; 30608 uint32_t lbasize; 30609 int path_flag = (int)(uintptr_t)tg_cookie; 30610 int ret = 0; 30611 30612 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30613 if (un == NULL) 30614 return (ENXIO); 30615 30616 switch (cmd) { 30617 case TG_GETPHYGEOM: 30618 case TG_GETVIRTGEOM: 30619 case TG_GETCAPACITY: 30620 case TG_GETBLOCKSIZE: 30621 mutex_enter(SD_MUTEX(un)); 30622 30623 if ((un->un_f_blockcount_is_valid == TRUE) && 30624 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 30625 cap = un->un_blockcount; 30626 lbasize = un->un_tgt_blocksize; 30627 mutex_exit(SD_MUTEX(un)); 30628 } else { 30629 sd_ssc_t *ssc; 30630 mutex_exit(SD_MUTEX(un)); 30631 ssc = sd_ssc_init(un); 30632 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30633 &lbasize, path_flag); 30634 if (ret != 0) { 30635 if (ret == EIO) 30636 sd_ssc_assessment(ssc, 30637 SD_FMT_STATUS_CHECK); 30638 else 30639 sd_ssc_assessment(ssc, 30640 SD_FMT_IGNORE); 30641 sd_ssc_fini(ssc); 30642 return (ret); 30643 } 30644 sd_ssc_fini(ssc); 30645 mutex_enter(SD_MUTEX(un)); 30646 sd_update_block_info(un, lbasize, cap); 30647 if ((un->un_f_blockcount_is_valid == FALSE) || 30648 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 30649 mutex_exit(SD_MUTEX(un)); 30650 return (EIO); 30651 } 30652 mutex_exit(SD_MUTEX(un)); 30653 } 30654 30655 if (cmd == TG_GETCAPACITY) { 30656 *(diskaddr_t *)arg = cap; 30657 return (0); 30658 } 30659 30660 if (cmd == TG_GETBLOCKSIZE) { 30661 *(uint32_t *)arg = lbasize; 30662 return (0); 30663 } 30664 30665 if (cmd == TG_GETPHYGEOM) 30666 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 30667 cap, lbasize, path_flag); 30668 else 30669 /* TG_GETVIRTGEOM */ 30670 ret = sd_get_virtual_geometry(un, 30671 (cmlb_geom_t *)arg, cap, lbasize); 30672 30673 return (ret); 30674 30675 case TG_GETATTR: 30676 mutex_enter(SD_MUTEX(un)); 30677 ((tg_attribute_t *)arg)->media_is_writable = 30678 un->un_f_mmc_writable_media; 30679 mutex_exit(SD_MUTEX(un)); 30680 return (0); 30681 default: 30682 return (ENOTTY); 30683 30684 } 30685 } 30686 30687 /* 30688 * Function: sd_ssc_ereport_post 30689 * 30690 * Description: Will be called when SD driver need to post an ereport. 30691 * 30692 * Context: Kernel thread or interrupt context. 30693 */ 30694 static void 30695 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 30696 { 30697 int uscsi_path_instance = 0; 30698 uchar_t uscsi_pkt_reason; 30699 uint32_t uscsi_pkt_state; 30700 uint32_t uscsi_pkt_statistics; 30701 uint64_t uscsi_ena; 30702 uchar_t op_code; 30703 uint8_t *sensep; 30704 union scsi_cdb *cdbp; 30705 uint_t cdblen = 0; 30706 uint_t senlen = 0; 30707 struct sd_lun *un; 30708 dev_info_t *dip; 30709 char *devid; 30710 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 30711 SSC_FLAGS_INVALID_STATUS | 30712 SSC_FLAGS_INVALID_SENSE | 30713 SSC_FLAGS_INVALID_DATA; 30714 char assessment[16]; 30715 30716 ASSERT(ssc != NULL); 30717 ASSERT(ssc->ssc_uscsi_cmd != NULL); 30718 ASSERT(ssc->ssc_uscsi_info != NULL); 30719 30720 un = ssc->ssc_un; 30721 ASSERT(un != NULL); 30722 30723 dip = un->un_sd->sd_dev; 30724 30725 /* 30726 * Get the devid: 30727 * devid will only be passed to non-transport error reports. 30728 */ 30729 devid = DEVI(dip)->devi_devid_str; 30730 30731 /* 30732 * If we are syncing or dumping, the command will not be executed 30733 * so we bypass this situation. 30734 */ 30735 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 30736 (un->un_state == SD_STATE_DUMPING)) 30737 return; 30738 30739 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 30740 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 30741 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 30742 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 30743 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 30744 30745 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 30746 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 30747 30748 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 30749 if (cdbp == NULL) { 30750 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30751 "sd_ssc_ereport_post meet empty cdb\n"); 30752 return; 30753 } 30754 30755 op_code = cdbp->scc_cmd; 30756 30757 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 30758 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 30759 ssc->ssc_uscsi_cmd->uscsi_rqresid); 30760 30761 if (senlen > 0) 30762 ASSERT(sensep != NULL); 30763 30764 /* 30765 * Initialize drv_assess to corresponding values. 30766 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 30767 * on the sense-key returned back. 30768 */ 30769 switch (drv_assess) { 30770 case SD_FM_DRV_RECOVERY: 30771 (void) sprintf(assessment, "%s", "recovered"); 30772 break; 30773 case SD_FM_DRV_RETRY: 30774 (void) sprintf(assessment, "%s", "retry"); 30775 break; 30776 case SD_FM_DRV_NOTICE: 30777 (void) sprintf(assessment, "%s", "info"); 30778 break; 30779 case SD_FM_DRV_FATAL: 30780 default: 30781 (void) sprintf(assessment, "%s", "unknown"); 30782 } 30783 /* 30784 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30785 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30786 * driver-assessment will always be "recovered" here. 30787 */ 30788 if (drv_assess == SD_FM_DRV_RECOVERY) { 30789 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30790 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30791 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30792 "driver-assessment", DATA_TYPE_STRING, assessment, 30793 "op-code", DATA_TYPE_UINT8, op_code, 30794 "cdb", DATA_TYPE_UINT8_ARRAY, 30795 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30796 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30797 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30798 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30799 NULL); 30800 return; 30801 } 30802 30803 /* 30804 * If there is un-expected/un-decodable data, we should post 30805 * ereport.io.scsi.cmd.disk.dev.uderr. 30806 * driver-assessment will be set based on parameter drv_assess. 30807 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30808 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30809 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30810 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30811 */ 30812 if (ssc->ssc_flags & ssc_invalid_flags) { 30813 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30814 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30815 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30816 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30817 "driver-assessment", DATA_TYPE_STRING, 30818 drv_assess == SD_FM_DRV_FATAL ? 30819 "fail" : assessment, 30820 "op-code", DATA_TYPE_UINT8, op_code, 30821 "cdb", DATA_TYPE_UINT8_ARRAY, 30822 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30823 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30824 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30825 "pkt-stats", DATA_TYPE_UINT32, 30826 uscsi_pkt_statistics, 30827 "stat-code", DATA_TYPE_UINT8, 30828 ssc->ssc_uscsi_cmd->uscsi_status, 30829 "un-decode-info", DATA_TYPE_STRING, 30830 ssc->ssc_info, 30831 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30832 senlen, sensep, 30833 NULL); 30834 } else { 30835 /* 30836 * For other type of invalid data, the 30837 * un-decode-value field would be empty because the 30838 * un-decodable content could be seen from upper 30839 * level payload or inside un-decode-info. 30840 */ 30841 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30842 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30843 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30844 "driver-assessment", DATA_TYPE_STRING, 30845 drv_assess == SD_FM_DRV_FATAL ? 30846 "fail" : assessment, 30847 "op-code", DATA_TYPE_UINT8, op_code, 30848 "cdb", DATA_TYPE_UINT8_ARRAY, 30849 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30850 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30851 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30852 "pkt-stats", DATA_TYPE_UINT32, 30853 uscsi_pkt_statistics, 30854 "stat-code", DATA_TYPE_UINT8, 30855 ssc->ssc_uscsi_cmd->uscsi_status, 30856 "un-decode-info", DATA_TYPE_STRING, 30857 ssc->ssc_info, 30858 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30859 0, NULL, 30860 NULL); 30861 } 30862 ssc->ssc_flags &= ~ssc_invalid_flags; 30863 return; 30864 } 30865 30866 if (uscsi_pkt_reason != CMD_CMPLT || 30867 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30868 /* 30869 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30870 * set inside sd_start_cmds due to errors(bad packet or 30871 * fatal transport error), we should take it as a 30872 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30873 * driver-assessment will be set based on drv_assess. 30874 * We will set devid to NULL because it is a transport 30875 * error. 30876 */ 30877 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30878 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30879 30880 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30881 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30882 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30883 "driver-assessment", DATA_TYPE_STRING, 30884 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30885 "op-code", DATA_TYPE_UINT8, op_code, 30886 "cdb", DATA_TYPE_UINT8_ARRAY, 30887 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30888 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30889 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30890 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30891 NULL); 30892 } else { 30893 /* 30894 * If we got here, we have a completed command, and we need 30895 * to further investigate the sense data to see what kind 30896 * of ereport we should post. 30897 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30898 * if sense-key == 0x3. 30899 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30900 * driver-assessment will be set based on the parameter 30901 * drv_assess. 30902 */ 30903 if (senlen > 0) { 30904 /* 30905 * Here we have sense data available. 30906 */ 30907 uint8_t sense_key; 30908 sense_key = scsi_sense_key(sensep); 30909 if (sense_key == 0x3) { 30910 /* 30911 * sense-key == 0x3(medium error), 30912 * driver-assessment should be "fatal" if 30913 * drv_assess is SD_FM_DRV_FATAL. 30914 */ 30915 scsi_fm_ereport_post(un->un_sd, 30916 uscsi_path_instance, 30917 "cmd.disk.dev.rqs.merr", 30918 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30919 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30920 "driver-assessment", 30921 DATA_TYPE_STRING, 30922 drv_assess == SD_FM_DRV_FATAL ? 30923 "fatal" : assessment, 30924 "op-code", 30925 DATA_TYPE_UINT8, op_code, 30926 "cdb", 30927 DATA_TYPE_UINT8_ARRAY, cdblen, 30928 ssc->ssc_uscsi_cmd->uscsi_cdb, 30929 "pkt-reason", 30930 DATA_TYPE_UINT8, uscsi_pkt_reason, 30931 "pkt-state", 30932 DATA_TYPE_UINT8, uscsi_pkt_state, 30933 "pkt-stats", 30934 DATA_TYPE_UINT32, 30935 uscsi_pkt_statistics, 30936 "stat-code", 30937 DATA_TYPE_UINT8, 30938 ssc->ssc_uscsi_cmd->uscsi_status, 30939 "key", 30940 DATA_TYPE_UINT8, 30941 scsi_sense_key(sensep), 30942 "asc", 30943 DATA_TYPE_UINT8, 30944 scsi_sense_asc(sensep), 30945 "ascq", 30946 DATA_TYPE_UINT8, 30947 scsi_sense_ascq(sensep), 30948 "sense-data", 30949 DATA_TYPE_UINT8_ARRAY, 30950 senlen, sensep, 30951 "lba", 30952 DATA_TYPE_UINT64, 30953 ssc->ssc_uscsi_info->ui_lba, 30954 NULL); 30955 } else { 30956 /* 30957 * if sense-key == 0x4(hardware 30958 * error), driver-assessment should 30959 * be "fatal" if drv_assess is 30960 * SD_FM_DRV_FATAL. 30961 */ 30962 scsi_fm_ereport_post(un->un_sd, 30963 uscsi_path_instance, 30964 "cmd.disk.dev.rqs.derr", 30965 uscsi_ena, devid, DDI_NOSLEEP, 30966 FM_VERSION, 30967 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30968 "driver-assessment", 30969 DATA_TYPE_STRING, 30970 drv_assess == SD_FM_DRV_FATAL ? 30971 (sense_key == 0x4 ? 30972 "fatal" : "fail") : assessment, 30973 "op-code", 30974 DATA_TYPE_UINT8, op_code, 30975 "cdb", 30976 DATA_TYPE_UINT8_ARRAY, cdblen, 30977 ssc->ssc_uscsi_cmd->uscsi_cdb, 30978 "pkt-reason", 30979 DATA_TYPE_UINT8, uscsi_pkt_reason, 30980 "pkt-state", 30981 DATA_TYPE_UINT8, uscsi_pkt_state, 30982 "pkt-stats", 30983 DATA_TYPE_UINT32, 30984 uscsi_pkt_statistics, 30985 "stat-code", 30986 DATA_TYPE_UINT8, 30987 ssc->ssc_uscsi_cmd->uscsi_status, 30988 "key", 30989 DATA_TYPE_UINT8, 30990 scsi_sense_key(sensep), 30991 "asc", 30992 DATA_TYPE_UINT8, 30993 scsi_sense_asc(sensep), 30994 "ascq", 30995 DATA_TYPE_UINT8, 30996 scsi_sense_ascq(sensep), 30997 "sense-data", 30998 DATA_TYPE_UINT8_ARRAY, 30999 senlen, sensep, 31000 NULL); 31001 } 31002 } else { 31003 /* 31004 * For stat_code == STATUS_GOOD, this is not a 31005 * hardware error. 31006 */ 31007 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31008 return; 31009 31010 /* 31011 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31012 * stat-code but with sense data unavailable. 31013 * driver-assessment will be set based on parameter 31014 * drv_assess. 31015 */ 31016 scsi_fm_ereport_post(un->un_sd, 31017 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 31018 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 31019 FM_EREPORT_VERS0, 31020 "driver-assessment", DATA_TYPE_STRING, 31021 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31022 "op-code", DATA_TYPE_UINT8, op_code, 31023 "cdb", 31024 DATA_TYPE_UINT8_ARRAY, 31025 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31026 "pkt-reason", 31027 DATA_TYPE_UINT8, uscsi_pkt_reason, 31028 "pkt-state", 31029 DATA_TYPE_UINT8, uscsi_pkt_state, 31030 "pkt-stats", 31031 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31032 "stat-code", 31033 DATA_TYPE_UINT8, 31034 ssc->ssc_uscsi_cmd->uscsi_status, 31035 NULL); 31036 } 31037 } 31038 } 31039 31040 /* 31041 * Function: sd_ssc_extract_info 31042 * 31043 * Description: Extract information available to help generate ereport. 31044 * 31045 * Context: Kernel thread or interrupt context. 31046 */ 31047 static void 31048 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31049 struct buf *bp, struct sd_xbuf *xp) 31050 { 31051 size_t senlen = 0; 31052 union scsi_cdb *cdbp; 31053 int path_instance; 31054 /* 31055 * Need scsi_cdb_size array to determine the cdb length. 31056 */ 31057 extern uchar_t scsi_cdb_size[]; 31058 31059 ASSERT(un != NULL); 31060 ASSERT(pktp != NULL); 31061 ASSERT(bp != NULL); 31062 ASSERT(xp != NULL); 31063 ASSERT(ssc != NULL); 31064 ASSERT(mutex_owned(SD_MUTEX(un))); 31065 31066 /* 31067 * Transfer the cdb buffer pointer here. 31068 */ 31069 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31070 31071 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31072 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31073 31074 /* 31075 * Transfer the sense data buffer pointer if sense data is available, 31076 * calculate the sense data length first. 31077 */ 31078 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31079 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31080 /* 31081 * For arq case, we will enter here. 31082 */ 31083 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31084 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31085 } else { 31086 senlen = SENSE_LENGTH; 31087 } 31088 } else { 31089 /* 31090 * For non-arq case, we will enter this branch. 31091 */ 31092 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31093 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31094 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31095 } 31096 31097 } 31098 31099 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31100 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31101 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31102 31103 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31104 31105 /* 31106 * Only transfer path_instance when scsi_pkt was properly allocated. 31107 */ 31108 path_instance = pktp->pkt_path_instance; 31109 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31110 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31111 else 31112 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31113 31114 /* 31115 * Copy in the other fields we may need when posting ereport. 31116 */ 31117 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31118 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31119 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31120 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31121 31122 /* 31123 * For partially read/write command, we will not create ena 31124 * in case of a successful command be reconized as recovered. 31125 */ 31126 if ((pktp->pkt_reason == CMD_CMPLT) && 31127 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31128 (senlen == 0)) { 31129 return; 31130 } 31131 31132 /* 31133 * To associate ereports of a single command execution flow, we 31134 * need a shared ena for a specific command. 31135 */ 31136 if (xp->xb_ena == 0) 31137 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31138 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31139 } 31140