1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else /* !__fibre */ 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif /* !__fibre */ 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_INTERCONNECT_SAS 5 779 780 #define SD_IS_PARALLEL_SCSI(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 782 #define SD_IS_SERIAL(un) \ 783 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 784 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 785 786 /* 787 * Definitions used by device id registration routines 788 */ 789 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 790 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 791 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 792 793 static kmutex_t sd_sense_mutex = {0}; 794 795 /* 796 * Macros for updates of the driver state 797 */ 798 #define New_state(un, s) \ 799 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 800 #define Restore_state(un) \ 801 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 802 803 static struct sd_cdbinfo sd_cdbtab[] = { 804 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 805 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 806 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 807 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 808 }; 809 810 /* 811 * Specifies the number of seconds that must have elapsed since the last 812 * cmd. has completed for a device to be declared idle to the PM framework. 813 */ 814 static int sd_pm_idletime = 1; 815 816 /* 817 * Internal function prototypes 818 */ 819 820 #if (defined(__fibre)) 821 /* 822 * These #defines are to avoid namespace collisions that occur because this 823 * code is currently used to compile two separate driver modules: sd and ssd. 824 * All function names need to be treated this way (even if declared static) 825 * in order to allow the debugger to resolve the names properly. 826 * It is anticipated that in the near future the ssd module will be obsoleted, 827 * at which time this ugliness should go away. 828 */ 829 #define sd_log_trace ssd_log_trace 830 #define sd_log_info ssd_log_info 831 #define sd_log_err ssd_log_err 832 #define sdprobe ssdprobe 833 #define sdinfo ssdinfo 834 #define sd_prop_op ssd_prop_op 835 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 836 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 837 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 838 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 839 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 840 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 841 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 842 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 843 #define sd_spin_up_unit ssd_spin_up_unit 844 #define sd_enable_descr_sense ssd_enable_descr_sense 845 #define sd_reenable_dsense_task ssd_reenable_dsense_task 846 #define sd_set_mmc_caps ssd_set_mmc_caps 847 #define sd_read_unit_properties ssd_read_unit_properties 848 #define sd_process_sdconf_file ssd_process_sdconf_file 849 #define sd_process_sdconf_table ssd_process_sdconf_table 850 #define sd_sdconf_id_match ssd_sdconf_id_match 851 #define sd_blank_cmp ssd_blank_cmp 852 #define sd_chk_vers1_data ssd_chk_vers1_data 853 #define sd_set_vers1_properties ssd_set_vers1_properties 854 855 #define sd_get_physical_geometry ssd_get_physical_geometry 856 #define sd_get_virtual_geometry ssd_get_virtual_geometry 857 #define sd_update_block_info ssd_update_block_info 858 #define sd_register_devid ssd_register_devid 859 #define sd_get_devid ssd_get_devid 860 #define sd_create_devid ssd_create_devid 861 #define sd_write_deviceid ssd_write_deviceid 862 #define sd_check_vpd_page_support ssd_check_vpd_page_support 863 #define sd_setup_pm ssd_setup_pm 864 #define sd_create_pm_components ssd_create_pm_components 865 #define sd_ddi_suspend ssd_ddi_suspend 866 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 867 #define sd_ddi_resume ssd_ddi_resume 868 #define sd_ddi_pm_resume ssd_ddi_pm_resume 869 #define sdpower ssdpower 870 #define sdattach ssdattach 871 #define sddetach ssddetach 872 #define sd_unit_attach ssd_unit_attach 873 #define sd_unit_detach ssd_unit_detach 874 #define sd_set_unit_attributes ssd_set_unit_attributes 875 #define sd_create_errstats ssd_create_errstats 876 #define sd_set_errstats ssd_set_errstats 877 #define sd_set_pstats ssd_set_pstats 878 #define sddump ssddump 879 #define sd_scsi_poll ssd_scsi_poll 880 #define sd_send_polled_RQS ssd_send_polled_RQS 881 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 882 #define sd_init_event_callbacks ssd_init_event_callbacks 883 #define sd_event_callback ssd_event_callback 884 #define sd_cache_control ssd_cache_control 885 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 886 #define sd_get_nv_sup ssd_get_nv_sup 887 #define sd_make_device ssd_make_device 888 #define sdopen ssdopen 889 #define sdclose ssdclose 890 #define sd_ready_and_valid ssd_ready_and_valid 891 #define sdmin ssdmin 892 #define sdread ssdread 893 #define sdwrite ssdwrite 894 #define sdaread ssdaread 895 #define sdawrite ssdawrite 896 #define sdstrategy ssdstrategy 897 #define sdioctl ssdioctl 898 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 899 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 900 #define sd_checksum_iostart ssd_checksum_iostart 901 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 902 #define sd_pm_iostart ssd_pm_iostart 903 #define sd_core_iostart ssd_core_iostart 904 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 905 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 906 #define sd_checksum_iodone ssd_checksum_iodone 907 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 908 #define sd_pm_iodone ssd_pm_iodone 909 #define sd_initpkt_for_buf ssd_initpkt_for_buf 910 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 911 #define sd_setup_rw_pkt ssd_setup_rw_pkt 912 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 913 #define sd_buf_iodone ssd_buf_iodone 914 #define sd_uscsi_strategy ssd_uscsi_strategy 915 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 916 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 917 #define sd_uscsi_iodone ssd_uscsi_iodone 918 #define sd_xbuf_strategy ssd_xbuf_strategy 919 #define sd_xbuf_init ssd_xbuf_init 920 #define sd_pm_entry ssd_pm_entry 921 #define sd_pm_exit ssd_pm_exit 922 923 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 924 #define sd_pm_timeout_handler ssd_pm_timeout_handler 925 926 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 927 #define sdintr ssdintr 928 #define sd_start_cmds ssd_start_cmds 929 #define sd_send_scsi_cmd ssd_send_scsi_cmd 930 #define sd_bioclone_alloc ssd_bioclone_alloc 931 #define sd_bioclone_free ssd_bioclone_free 932 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 933 #define sd_shadow_buf_free ssd_shadow_buf_free 934 #define sd_print_transport_rejected_message \ 935 ssd_print_transport_rejected_message 936 #define sd_retry_command ssd_retry_command 937 #define sd_set_retry_bp ssd_set_retry_bp 938 #define sd_send_request_sense_command ssd_send_request_sense_command 939 #define sd_start_retry_command ssd_start_retry_command 940 #define sd_start_direct_priority_command \ 941 ssd_start_direct_priority_command 942 #define sd_return_failed_command ssd_return_failed_command 943 #define sd_return_failed_command_no_restart \ 944 ssd_return_failed_command_no_restart 945 #define sd_return_command ssd_return_command 946 #define sd_sync_with_callback ssd_sync_with_callback 947 #define sdrunout ssdrunout 948 #define sd_mark_rqs_busy ssd_mark_rqs_busy 949 #define sd_mark_rqs_idle ssd_mark_rqs_idle 950 #define sd_reduce_throttle ssd_reduce_throttle 951 #define sd_restore_throttle ssd_restore_throttle 952 #define sd_print_incomplete_msg ssd_print_incomplete_msg 953 #define sd_init_cdb_limits ssd_init_cdb_limits 954 #define sd_pkt_status_good ssd_pkt_status_good 955 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 956 #define sd_pkt_status_busy ssd_pkt_status_busy 957 #define sd_pkt_status_reservation_conflict \ 958 ssd_pkt_status_reservation_conflict 959 #define sd_pkt_status_qfull ssd_pkt_status_qfull 960 #define sd_handle_request_sense ssd_handle_request_sense 961 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 962 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 963 #define sd_validate_sense_data ssd_validate_sense_data 964 #define sd_decode_sense ssd_decode_sense 965 #define sd_print_sense_msg ssd_print_sense_msg 966 #define sd_sense_key_no_sense ssd_sense_key_no_sense 967 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 968 #define sd_sense_key_not_ready ssd_sense_key_not_ready 969 #define sd_sense_key_medium_or_hardware_error \ 970 ssd_sense_key_medium_or_hardware_error 971 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 972 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 973 #define sd_sense_key_fail_command ssd_sense_key_fail_command 974 #define sd_sense_key_blank_check ssd_sense_key_blank_check 975 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 976 #define sd_sense_key_default ssd_sense_key_default 977 #define sd_print_retry_msg ssd_print_retry_msg 978 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 979 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 980 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 981 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 982 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 983 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 984 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 985 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 986 #define sd_pkt_reason_default ssd_pkt_reason_default 987 #define sd_reset_target ssd_reset_target 988 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 989 #define sd_start_stop_unit_task ssd_start_stop_unit_task 990 #define sd_taskq_create ssd_taskq_create 991 #define sd_taskq_delete ssd_taskq_delete 992 #define sd_target_change_task ssd_target_change_task 993 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 994 #define sd_media_change_task ssd_media_change_task 995 #define sd_handle_mchange ssd_handle_mchange 996 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 997 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 998 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 999 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1000 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1001 sd_send_scsi_feature_GET_CONFIGURATION 1002 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1003 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1004 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1005 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1006 ssd_send_scsi_PERSISTENT_RESERVE_IN 1007 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1008 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1009 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1010 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1011 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1012 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1013 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1014 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1015 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1016 #define sd_alloc_rqs ssd_alloc_rqs 1017 #define sd_free_rqs ssd_free_rqs 1018 #define sd_dump_memory ssd_dump_memory 1019 #define sd_get_media_info ssd_get_media_info 1020 #define sd_get_media_info_ext ssd_get_media_info_ext 1021 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1022 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1023 #define sd_strtok_r ssd_strtok_r 1024 #define sd_set_properties ssd_set_properties 1025 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1026 #define sd_setup_next_xfer ssd_setup_next_xfer 1027 #define sd_dkio_get_temp ssd_dkio_get_temp 1028 #define sd_check_mhd ssd_check_mhd 1029 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1030 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1031 #define sd_sname ssd_sname 1032 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1033 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1034 #define sd_take_ownership ssd_take_ownership 1035 #define sd_reserve_release ssd_reserve_release 1036 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1037 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1038 #define sd_persistent_reservation_in_read_keys \ 1039 ssd_persistent_reservation_in_read_keys 1040 #define sd_persistent_reservation_in_read_resv \ 1041 ssd_persistent_reservation_in_read_resv 1042 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1043 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1044 #define sd_mhdioc_release ssd_mhdioc_release 1045 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1046 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1047 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1048 #define sr_change_blkmode ssr_change_blkmode 1049 #define sr_change_speed ssr_change_speed 1050 #define sr_atapi_change_speed ssr_atapi_change_speed 1051 #define sr_pause_resume ssr_pause_resume 1052 #define sr_play_msf ssr_play_msf 1053 #define sr_play_trkind ssr_play_trkind 1054 #define sr_read_all_subcodes ssr_read_all_subcodes 1055 #define sr_read_subchannel ssr_read_subchannel 1056 #define sr_read_tocentry ssr_read_tocentry 1057 #define sr_read_tochdr ssr_read_tochdr 1058 #define sr_read_cdda ssr_read_cdda 1059 #define sr_read_cdxa ssr_read_cdxa 1060 #define sr_read_mode1 ssr_read_mode1 1061 #define sr_read_mode2 ssr_read_mode2 1062 #define sr_read_cd_mode2 ssr_read_cd_mode2 1063 #define sr_sector_mode ssr_sector_mode 1064 #define sr_eject ssr_eject 1065 #define sr_ejected ssr_ejected 1066 #define sr_check_wp ssr_check_wp 1067 #define sd_check_media ssd_check_media 1068 #define sd_media_watch_cb ssd_media_watch_cb 1069 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1070 #define sr_volume_ctrl ssr_volume_ctrl 1071 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1072 #define sd_log_page_supported ssd_log_page_supported 1073 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1074 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1075 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1076 #define sd_range_lock ssd_range_lock 1077 #define sd_get_range ssd_get_range 1078 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1079 #define sd_range_unlock ssd_range_unlock 1080 #define sd_read_modify_write_task ssd_read_modify_write_task 1081 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1082 1083 #define sd_iostart_chain ssd_iostart_chain 1084 #define sd_iodone_chain ssd_iodone_chain 1085 #define sd_initpkt_map ssd_initpkt_map 1086 #define sd_destroypkt_map ssd_destroypkt_map 1087 #define sd_chain_type_map ssd_chain_type_map 1088 #define sd_chain_index_map ssd_chain_index_map 1089 1090 #define sd_failfast_flushctl ssd_failfast_flushctl 1091 #define sd_failfast_flushq ssd_failfast_flushq 1092 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1093 1094 #define sd_is_lsi ssd_is_lsi 1095 #define sd_tg_rdwr ssd_tg_rdwr 1096 #define sd_tg_getinfo ssd_tg_getinfo 1097 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1098 1099 #endif /* #if (defined(__fibre)) */ 1100 1101 1102 int _init(void); 1103 int _fini(void); 1104 int _info(struct modinfo *modinfop); 1105 1106 /*PRINTFLIKE3*/ 1107 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1108 /*PRINTFLIKE3*/ 1109 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1110 /*PRINTFLIKE3*/ 1111 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1112 1113 static int sdprobe(dev_info_t *devi); 1114 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1115 void **result); 1116 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1117 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1118 1119 /* 1120 * Smart probe for parallel scsi 1121 */ 1122 static void sd_scsi_probe_cache_init(void); 1123 static void sd_scsi_probe_cache_fini(void); 1124 static void sd_scsi_clear_probe_cache(void); 1125 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1126 1127 /* 1128 * Attached luns on target for parallel scsi 1129 */ 1130 static void sd_scsi_target_lun_init(void); 1131 static void sd_scsi_target_lun_fini(void); 1132 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1133 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1134 1135 static int sd_spin_up_unit(sd_ssc_t *ssc); 1136 1137 /* 1138 * Using sd_ssc_init to establish sd_ssc_t struct 1139 * Using sd_ssc_send to send uscsi internal command 1140 * Using sd_ssc_fini to free sd_ssc_t struct 1141 */ 1142 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1143 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1144 int flag, enum uio_seg dataspace, int path_flag); 1145 static void sd_ssc_fini(sd_ssc_t *ssc); 1146 1147 /* 1148 * Using sd_ssc_assessment to set correct type-of-assessment 1149 * Using sd_ssc_post to post ereport & system log 1150 * sd_ssc_post will call sd_ssc_print to print system log 1151 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1152 */ 1153 static void sd_ssc_assessment(sd_ssc_t *ssc, 1154 enum sd_type_assessment tp_assess); 1155 1156 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1157 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1158 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1159 enum sd_driver_assessment drv_assess); 1160 1161 /* 1162 * Using sd_ssc_set_info to mark an un-decodable-data error. 1163 * Using sd_ssc_extract_info to transfer information from internal 1164 * data structures to sd_ssc_t. 1165 */ 1166 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1167 const char *fmt, ...); 1168 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1169 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1170 1171 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1172 enum uio_seg dataspace, int path_flag); 1173 1174 #ifdef _LP64 1175 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1176 static void sd_reenable_dsense_task(void *arg); 1177 #endif /* _LP64 */ 1178 1179 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1180 1181 static void sd_read_unit_properties(struct sd_lun *un); 1182 static int sd_process_sdconf_file(struct sd_lun *un); 1183 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1184 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1185 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1186 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1187 int *data_list, sd_tunables *values); 1188 static void sd_process_sdconf_table(struct sd_lun *un); 1189 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1190 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1191 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1192 int list_len, char *dataname_ptr); 1193 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1194 sd_tunables *prop_list); 1195 1196 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1197 int reservation_flag); 1198 static int sd_get_devid(sd_ssc_t *ssc); 1199 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1200 static int sd_write_deviceid(sd_ssc_t *ssc); 1201 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1202 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1203 1204 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1205 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1206 1207 static int sd_ddi_suspend(dev_info_t *devi); 1208 static int sd_ddi_pm_suspend(struct sd_lun *un); 1209 static int sd_ddi_resume(dev_info_t *devi); 1210 static int sd_ddi_pm_resume(struct sd_lun *un); 1211 static int sdpower(dev_info_t *devi, int component, int level); 1212 1213 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1214 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1215 static int sd_unit_attach(dev_info_t *devi); 1216 static int sd_unit_detach(dev_info_t *devi); 1217 1218 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1219 static void sd_create_errstats(struct sd_lun *un, int instance); 1220 static void sd_set_errstats(struct sd_lun *un); 1221 static void sd_set_pstats(struct sd_lun *un); 1222 1223 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1224 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1225 static int sd_send_polled_RQS(struct sd_lun *un); 1226 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1227 1228 #if (defined(__fibre)) 1229 /* 1230 * Event callbacks (photon) 1231 */ 1232 static void sd_init_event_callbacks(struct sd_lun *un); 1233 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1234 #endif 1235 1236 /* 1237 * Defines for sd_cache_control 1238 */ 1239 1240 #define SD_CACHE_ENABLE 1 1241 #define SD_CACHE_DISABLE 0 1242 #define SD_CACHE_NOCHANGE -1 1243 1244 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1245 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1246 static void sd_get_nv_sup(sd_ssc_t *ssc); 1247 static dev_t sd_make_device(dev_info_t *devi); 1248 1249 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1250 uint64_t capacity); 1251 1252 /* 1253 * Driver entry point functions. 1254 */ 1255 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1256 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1257 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1258 1259 static void sdmin(struct buf *bp); 1260 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1261 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1262 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1263 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1264 1265 static int sdstrategy(struct buf *bp); 1266 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1267 1268 /* 1269 * Function prototypes for layering functions in the iostart chain. 1270 */ 1271 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1274 struct buf *bp); 1275 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1276 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1277 struct buf *bp); 1278 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1279 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1280 1281 /* 1282 * Function prototypes for layering functions in the iodone chain. 1283 */ 1284 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1285 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1289 struct buf *bp); 1290 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1291 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1292 struct buf *bp); 1293 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1294 1295 /* 1296 * Prototypes for functions to support buf(9S) based IO. 1297 */ 1298 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1299 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1300 static void sd_destroypkt_for_buf(struct buf *); 1301 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1302 struct buf *bp, int flags, 1303 int (*callback)(caddr_t), caddr_t callback_arg, 1304 diskaddr_t lba, uint32_t blockcount); 1305 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1306 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1307 1308 /* 1309 * Prototypes for functions to support USCSI IO. 1310 */ 1311 static int sd_uscsi_strategy(struct buf *bp); 1312 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1313 static void sd_destroypkt_for_uscsi(struct buf *); 1314 1315 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1316 uchar_t chain_type, void *pktinfop); 1317 1318 static int sd_pm_entry(struct sd_lun *un); 1319 static void sd_pm_exit(struct sd_lun *un); 1320 1321 static void sd_pm_idletimeout_handler(void *arg); 1322 1323 /* 1324 * sd_core internal functions (used at the sd_core_io layer). 1325 */ 1326 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1327 static void sdintr(struct scsi_pkt *pktp); 1328 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1329 1330 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1331 enum uio_seg dataspace, int path_flag); 1332 1333 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1334 daddr_t blkno, int (*func)(struct buf *)); 1335 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1336 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1337 static void sd_bioclone_free(struct buf *bp); 1338 static void sd_shadow_buf_free(struct buf *bp); 1339 1340 static void sd_print_transport_rejected_message(struct sd_lun *un, 1341 struct sd_xbuf *xp, int code); 1342 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1343 void *arg, int code); 1344 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1345 void *arg, int code); 1346 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1347 void *arg, int code); 1348 1349 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1350 int retry_check_flag, 1351 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1352 int c), 1353 void *user_arg, int failure_code, clock_t retry_delay, 1354 void (*statp)(kstat_io_t *)); 1355 1356 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1357 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1358 1359 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1360 struct scsi_pkt *pktp); 1361 static void sd_start_retry_command(void *arg); 1362 static void sd_start_direct_priority_command(void *arg); 1363 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1364 int errcode); 1365 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1366 struct buf *bp, int errcode); 1367 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1368 static void sd_sync_with_callback(struct sd_lun *un); 1369 static int sdrunout(caddr_t arg); 1370 1371 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1372 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1373 1374 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1375 static void sd_restore_throttle(void *arg); 1376 1377 static void sd_init_cdb_limits(struct sd_lun *un); 1378 1379 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 1382 /* 1383 * Error handling functions 1384 */ 1385 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1388 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1389 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1390 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 1394 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1395 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1396 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1397 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1398 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1399 struct sd_xbuf *xp, size_t actual_len); 1400 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1401 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1402 1403 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1404 void *arg, int code); 1405 1406 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1407 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1408 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1409 uint8_t *sense_datap, 1410 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1411 static void sd_sense_key_not_ready(struct sd_lun *un, 1412 uint8_t *sense_datap, 1413 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_unit_attention(struct sd_lun *un, 1420 uint8_t *sense_datap, 1421 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1422 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1423 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1424 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1425 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1427 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1428 static void sd_sense_key_default(struct sd_lun *un, 1429 uint8_t *sense_datap, 1430 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1431 1432 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1433 void *arg, int flag); 1434 1435 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1436 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1437 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1438 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1439 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1441 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1442 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1443 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1444 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1445 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1446 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1447 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1448 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1449 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1450 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1451 1452 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1453 1454 static void sd_start_stop_unit_callback(void *arg); 1455 static void sd_start_stop_unit_task(void *arg); 1456 1457 static void sd_taskq_create(void); 1458 static void sd_taskq_delete(void); 1459 static void sd_target_change_task(void *arg); 1460 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1461 static void sd_media_change_task(void *arg); 1462 1463 static int sd_handle_mchange(struct sd_lun *un); 1464 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1465 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1466 uint32_t *lbap, int path_flag); 1467 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1468 uint32_t *lbap, uint32_t *psp, int path_flag); 1469 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1470 int path_flag); 1471 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1472 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1473 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1474 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1475 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1476 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1477 uchar_t usr_cmd, uchar_t *usr_bufp); 1478 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1479 struct dk_callback *dkc); 1480 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1481 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1482 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1483 uchar_t *bufaddr, uint_t buflen, int path_flag); 1484 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1485 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1486 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1487 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1488 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1489 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1490 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1491 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1492 size_t buflen, daddr_t start_block, int path_flag); 1493 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1494 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1495 path_flag) 1496 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1497 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1498 path_flag) 1499 1500 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1501 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1502 uint16_t param_ptr, int path_flag); 1503 1504 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1505 static void sd_free_rqs(struct sd_lun *un); 1506 1507 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1508 uchar_t *data, int len, int fmt); 1509 static void sd_panic_for_res_conflict(struct sd_lun *un); 1510 1511 /* 1512 * Disk Ioctl Function Prototypes 1513 */ 1514 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1515 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1516 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1517 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1518 1519 /* 1520 * Multi-host Ioctl Prototypes 1521 */ 1522 static int sd_check_mhd(dev_t dev, int interval); 1523 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1524 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1525 static char *sd_sname(uchar_t status); 1526 static void sd_mhd_resvd_recover(void *arg); 1527 static void sd_resv_reclaim_thread(); 1528 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1529 static int sd_reserve_release(dev_t dev, int cmd); 1530 static void sd_rmv_resv_reclaim_req(dev_t dev); 1531 static void sd_mhd_reset_notify_cb(caddr_t arg); 1532 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1533 mhioc_inkeys_t *usrp, int flag); 1534 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1535 mhioc_inresvs_t *usrp, int flag); 1536 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1537 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1538 static int sd_mhdioc_release(dev_t dev); 1539 static int sd_mhdioc_register_devid(dev_t dev); 1540 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1541 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1542 1543 /* 1544 * SCSI removable prototypes 1545 */ 1546 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1547 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1548 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1549 static int sr_pause_resume(dev_t dev, int mode); 1550 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1551 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1555 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1556 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1557 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1558 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1559 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1560 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1561 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1562 static int sr_eject(dev_t dev); 1563 static void sr_ejected(register struct sd_lun *un); 1564 static int sr_check_wp(dev_t dev); 1565 static int sd_check_media(dev_t dev, enum dkio_state state); 1566 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1567 static void sd_delayed_cv_broadcast(void *arg); 1568 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1569 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1570 1571 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1572 1573 /* 1574 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1575 */ 1576 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1577 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1578 static void sd_wm_cache_destructor(void *wm, void *un); 1579 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1580 daddr_t endb, ushort_t typ); 1581 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1582 daddr_t endb); 1583 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1584 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1585 static void sd_read_modify_write_task(void * arg); 1586 static int 1587 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1588 struct buf **bpp); 1589 1590 1591 /* 1592 * Function prototypes for failfast support. 1593 */ 1594 static void sd_failfast_flushq(struct sd_lun *un); 1595 static int sd_failfast_flushq_callback(struct buf *bp); 1596 1597 /* 1598 * Function prototypes to check for lsi devices 1599 */ 1600 static void sd_is_lsi(struct sd_lun *un); 1601 1602 /* 1603 * Function prototypes for partial DMA support 1604 */ 1605 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1606 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1607 1608 1609 /* Function prototypes for cmlb */ 1610 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1611 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1612 1613 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1614 1615 /* 1616 * For printing RMW warning message timely 1617 */ 1618 static void sd_rmw_msg_print_handler(void *arg); 1619 1620 /* 1621 * Constants for failfast support: 1622 * 1623 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1624 * failfast processing being performed. 1625 * 1626 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1627 * failfast processing on all bufs with B_FAILFAST set. 1628 */ 1629 1630 #define SD_FAILFAST_INACTIVE 0 1631 #define SD_FAILFAST_ACTIVE 1 1632 1633 /* 1634 * Bitmask to control behavior of buf(9S) flushes when a transition to 1635 * the failfast state occurs. Optional bits include: 1636 * 1637 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1638 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1639 * be flushed. 1640 * 1641 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1642 * driver, in addition to the regular wait queue. This includes the xbuf 1643 * queues. When clear, only the driver's wait queue will be flushed. 1644 */ 1645 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1646 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1647 1648 /* 1649 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1650 * to flush all queues within the driver. 1651 */ 1652 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1653 1654 1655 /* 1656 * SD Testing Fault Injection 1657 */ 1658 #ifdef SD_FAULT_INJECTION 1659 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1660 static void sd_faultinjection(struct scsi_pkt *pktp); 1661 static void sd_injection_log(char *buf, struct sd_lun *un); 1662 #endif 1663 1664 /* 1665 * Device driver ops vector 1666 */ 1667 static struct cb_ops sd_cb_ops = { 1668 sdopen, /* open */ 1669 sdclose, /* close */ 1670 sdstrategy, /* strategy */ 1671 nodev, /* print */ 1672 sddump, /* dump */ 1673 sdread, /* read */ 1674 sdwrite, /* write */ 1675 sdioctl, /* ioctl */ 1676 nodev, /* devmap */ 1677 nodev, /* mmap */ 1678 nodev, /* segmap */ 1679 nochpoll, /* poll */ 1680 sd_prop_op, /* cb_prop_op */ 1681 0, /* streamtab */ 1682 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1683 CB_REV, /* cb_rev */ 1684 sdaread, /* async I/O read entry point */ 1685 sdawrite /* async I/O write entry point */ 1686 }; 1687 1688 struct dev_ops sd_ops = { 1689 DEVO_REV, /* devo_rev, */ 1690 0, /* refcnt */ 1691 sdinfo, /* info */ 1692 nulldev, /* identify */ 1693 sdprobe, /* probe */ 1694 sdattach, /* attach */ 1695 sddetach, /* detach */ 1696 nodev, /* reset */ 1697 &sd_cb_ops, /* driver operations */ 1698 NULL, /* bus operations */ 1699 sdpower, /* power */ 1700 ddi_quiesce_not_needed, /* quiesce */ 1701 }; 1702 1703 /* 1704 * This is the loadable module wrapper. 1705 */ 1706 #include <sys/modctl.h> 1707 1708 #ifndef XPV_HVM_DRIVER 1709 static struct modldrv modldrv = { 1710 &mod_driverops, /* Type of module. This one is a driver */ 1711 SD_MODULE_NAME, /* Module name. */ 1712 &sd_ops /* driver ops */ 1713 }; 1714 1715 static struct modlinkage modlinkage = { 1716 MODREV_1, &modldrv, NULL 1717 }; 1718 1719 #else /* XPV_HVM_DRIVER */ 1720 static struct modlmisc modlmisc = { 1721 &mod_miscops, /* Type of module. This one is a misc */ 1722 "HVM " SD_MODULE_NAME, /* Module name. */ 1723 }; 1724 1725 static struct modlinkage modlinkage = { 1726 MODREV_1, &modlmisc, NULL 1727 }; 1728 1729 #endif /* XPV_HVM_DRIVER */ 1730 1731 static cmlb_tg_ops_t sd_tgops = { 1732 TG_DK_OPS_VERSION_1, 1733 sd_tg_rdwr, 1734 sd_tg_getinfo 1735 }; 1736 1737 static struct scsi_asq_key_strings sd_additional_codes[] = { 1738 0x81, 0, "Logical Unit is Reserved", 1739 0x85, 0, "Audio Address Not Valid", 1740 0xb6, 0, "Media Load Mechanism Failed", 1741 0xB9, 0, "Audio Play Operation Aborted", 1742 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1743 0x53, 2, "Medium removal prevented", 1744 0x6f, 0, "Authentication failed during key exchange", 1745 0x6f, 1, "Key not present", 1746 0x6f, 2, "Key not established", 1747 0x6f, 3, "Read without proper authentication", 1748 0x6f, 4, "Mismatched region to this logical unit", 1749 0x6f, 5, "Region reset count error", 1750 0xffff, 0x0, NULL 1751 }; 1752 1753 1754 /* 1755 * Struct for passing printing information for sense data messages 1756 */ 1757 struct sd_sense_info { 1758 int ssi_severity; 1759 int ssi_pfa_flag; 1760 }; 1761 1762 /* 1763 * Table of function pointers for iostart-side routines. Separate "chains" 1764 * of layered function calls are formed by placing the function pointers 1765 * sequentially in the desired order. Functions are called according to an 1766 * incrementing table index ordering. The last function in each chain must 1767 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1768 * in the sd_iodone_chain[] array. 1769 * 1770 * Note: It may seem more natural to organize both the iostart and iodone 1771 * functions together, into an array of structures (or some similar 1772 * organization) with a common index, rather than two separate arrays which 1773 * must be maintained in synchronization. The purpose of this division is 1774 * to achieve improved performance: individual arrays allows for more 1775 * effective cache line utilization on certain platforms. 1776 */ 1777 1778 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1779 1780 1781 static sd_chain_t sd_iostart_chain[] = { 1782 1783 /* Chain for buf IO for disk drive targets (PM enabled) */ 1784 sd_mapblockaddr_iostart, /* Index: 0 */ 1785 sd_pm_iostart, /* Index: 1 */ 1786 sd_core_iostart, /* Index: 2 */ 1787 1788 /* Chain for buf IO for disk drive targets (PM disabled) */ 1789 sd_mapblockaddr_iostart, /* Index: 3 */ 1790 sd_core_iostart, /* Index: 4 */ 1791 1792 /* 1793 * Chain for buf IO for removable-media or large sector size 1794 * disk drive targets with RMW needed (PM enabled) 1795 */ 1796 sd_mapblockaddr_iostart, /* Index: 5 */ 1797 sd_mapblocksize_iostart, /* Index: 6 */ 1798 sd_pm_iostart, /* Index: 7 */ 1799 sd_core_iostart, /* Index: 8 */ 1800 1801 /* 1802 * Chain for buf IO for removable-media or large sector size 1803 * disk drive targets with RMW needed (PM disabled) 1804 */ 1805 sd_mapblockaddr_iostart, /* Index: 9 */ 1806 sd_mapblocksize_iostart, /* Index: 10 */ 1807 sd_core_iostart, /* Index: 11 */ 1808 1809 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1810 sd_mapblockaddr_iostart, /* Index: 12 */ 1811 sd_checksum_iostart, /* Index: 13 */ 1812 sd_pm_iostart, /* Index: 14 */ 1813 sd_core_iostart, /* Index: 15 */ 1814 1815 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1816 sd_mapblockaddr_iostart, /* Index: 16 */ 1817 sd_checksum_iostart, /* Index: 17 */ 1818 sd_core_iostart, /* Index: 18 */ 1819 1820 /* Chain for USCSI commands (all targets) */ 1821 sd_pm_iostart, /* Index: 19 */ 1822 sd_core_iostart, /* Index: 20 */ 1823 1824 /* Chain for checksumming USCSI commands (all targets) */ 1825 sd_checksum_uscsi_iostart, /* Index: 21 */ 1826 sd_pm_iostart, /* Index: 22 */ 1827 sd_core_iostart, /* Index: 23 */ 1828 1829 /* Chain for "direct" USCSI commands (all targets) */ 1830 sd_core_iostart, /* Index: 24 */ 1831 1832 /* Chain for "direct priority" USCSI commands (all targets) */ 1833 sd_core_iostart, /* Index: 25 */ 1834 1835 /* 1836 * Chain for buf IO for large sector size disk drive targets 1837 * with RMW needed with checksumming (PM enabled) 1838 */ 1839 sd_mapblockaddr_iostart, /* Index: 26 */ 1840 sd_mapblocksize_iostart, /* Index: 27 */ 1841 sd_checksum_iostart, /* Index: 28 */ 1842 sd_pm_iostart, /* Index: 29 */ 1843 sd_core_iostart, /* Index: 30 */ 1844 1845 /* 1846 * Chain for buf IO for large sector size disk drive targets 1847 * with RMW needed with checksumming (PM disabled) 1848 */ 1849 sd_mapblockaddr_iostart, /* Index: 31 */ 1850 sd_mapblocksize_iostart, /* Index: 32 */ 1851 sd_checksum_iostart, /* Index: 33 */ 1852 sd_core_iostart, /* Index: 34 */ 1853 1854 }; 1855 1856 /* 1857 * Macros to locate the first function of each iostart chain in the 1858 * sd_iostart_chain[] array. These are located by the index in the array. 1859 */ 1860 #define SD_CHAIN_DISK_IOSTART 0 1861 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1862 #define SD_CHAIN_MSS_DISK_IOSTART 5 1863 #define SD_CHAIN_RMMEDIA_IOSTART 5 1864 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1865 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1866 #define SD_CHAIN_CHKSUM_IOSTART 12 1867 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1868 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1869 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1870 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1871 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1872 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1873 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1874 1875 1876 /* 1877 * Table of function pointers for the iodone-side routines for the driver- 1878 * internal layering mechanism. The calling sequence for iodone routines 1879 * uses a decrementing table index, so the last routine called in a chain 1880 * must be at the lowest array index location for that chain. The last 1881 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1882 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1883 * of the functions in an iodone side chain must correspond to the ordering 1884 * of the iostart routines for that chain. Note that there is no iodone 1885 * side routine that corresponds to sd_core_iostart(), so there is no 1886 * entry in the table for this. 1887 */ 1888 1889 static sd_chain_t sd_iodone_chain[] = { 1890 1891 /* Chain for buf IO for disk drive targets (PM enabled) */ 1892 sd_buf_iodone, /* Index: 0 */ 1893 sd_mapblockaddr_iodone, /* Index: 1 */ 1894 sd_pm_iodone, /* Index: 2 */ 1895 1896 /* Chain for buf IO for disk drive targets (PM disabled) */ 1897 sd_buf_iodone, /* Index: 3 */ 1898 sd_mapblockaddr_iodone, /* Index: 4 */ 1899 1900 /* 1901 * Chain for buf IO for removable-media or large sector size 1902 * disk drive targets with RMW needed (PM enabled) 1903 */ 1904 sd_buf_iodone, /* Index: 5 */ 1905 sd_mapblockaddr_iodone, /* Index: 6 */ 1906 sd_mapblocksize_iodone, /* Index: 7 */ 1907 sd_pm_iodone, /* Index: 8 */ 1908 1909 /* 1910 * Chain for buf IO for removable-media or large sector size 1911 * disk drive targets with RMW needed (PM disabled) 1912 */ 1913 sd_buf_iodone, /* Index: 9 */ 1914 sd_mapblockaddr_iodone, /* Index: 10 */ 1915 sd_mapblocksize_iodone, /* Index: 11 */ 1916 1917 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1918 sd_buf_iodone, /* Index: 12 */ 1919 sd_mapblockaddr_iodone, /* Index: 13 */ 1920 sd_checksum_iodone, /* Index: 14 */ 1921 sd_pm_iodone, /* Index: 15 */ 1922 1923 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1924 sd_buf_iodone, /* Index: 16 */ 1925 sd_mapblockaddr_iodone, /* Index: 17 */ 1926 sd_checksum_iodone, /* Index: 18 */ 1927 1928 /* Chain for USCSI commands (non-checksum targets) */ 1929 sd_uscsi_iodone, /* Index: 19 */ 1930 sd_pm_iodone, /* Index: 20 */ 1931 1932 /* Chain for USCSI commands (checksum targets) */ 1933 sd_uscsi_iodone, /* Index: 21 */ 1934 sd_checksum_uscsi_iodone, /* Index: 22 */ 1935 sd_pm_iodone, /* Index: 22 */ 1936 1937 /* Chain for "direct" USCSI commands (all targets) */ 1938 sd_uscsi_iodone, /* Index: 24 */ 1939 1940 /* Chain for "direct priority" USCSI commands (all targets) */ 1941 sd_uscsi_iodone, /* Index: 25 */ 1942 1943 /* 1944 * Chain for buf IO for large sector size disk drive targets 1945 * with checksumming (PM enabled) 1946 */ 1947 sd_buf_iodone, /* Index: 26 */ 1948 sd_mapblockaddr_iodone, /* Index: 27 */ 1949 sd_mapblocksize_iodone, /* Index: 28 */ 1950 sd_checksum_iodone, /* Index: 29 */ 1951 sd_pm_iodone, /* Index: 30 */ 1952 1953 /* 1954 * Chain for buf IO for large sector size disk drive targets 1955 * with checksumming (PM disabled) 1956 */ 1957 sd_buf_iodone, /* Index: 31 */ 1958 sd_mapblockaddr_iodone, /* Index: 32 */ 1959 sd_mapblocksize_iodone, /* Index: 33 */ 1960 sd_checksum_iodone, /* Index: 34 */ 1961 }; 1962 1963 1964 /* 1965 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1966 * each iodone-side chain. These are located by the array index, but as the 1967 * iodone side functions are called in a decrementing-index order, the 1968 * highest index number in each chain must be specified (as these correspond 1969 * to the first function in the iodone chain that will be called by the core 1970 * at IO completion time). 1971 */ 1972 1973 #define SD_CHAIN_DISK_IODONE 2 1974 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1975 #define SD_CHAIN_RMMEDIA_IODONE 8 1976 #define SD_CHAIN_MSS_DISK_IODONE 8 1977 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1978 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 1979 #define SD_CHAIN_CHKSUM_IODONE 15 1980 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1981 #define SD_CHAIN_USCSI_CMD_IODONE 20 1982 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1983 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1984 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1985 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 1986 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 1987 1988 1989 1990 /* 1991 * Array to map a layering chain index to the appropriate initpkt routine. 1992 * The redundant entries are present so that the index used for accessing 1993 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1994 * with this table as well. 1995 */ 1996 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1997 1998 static sd_initpkt_t sd_initpkt_map[] = { 1999 2000 /* Chain for buf IO for disk drive targets (PM enabled) */ 2001 sd_initpkt_for_buf, /* Index: 0 */ 2002 sd_initpkt_for_buf, /* Index: 1 */ 2003 sd_initpkt_for_buf, /* Index: 2 */ 2004 2005 /* Chain for buf IO for disk drive targets (PM disabled) */ 2006 sd_initpkt_for_buf, /* Index: 3 */ 2007 sd_initpkt_for_buf, /* Index: 4 */ 2008 2009 /* 2010 * Chain for buf IO for removable-media or large sector size 2011 * disk drive targets (PM enabled) 2012 */ 2013 sd_initpkt_for_buf, /* Index: 5 */ 2014 sd_initpkt_for_buf, /* Index: 6 */ 2015 sd_initpkt_for_buf, /* Index: 7 */ 2016 sd_initpkt_for_buf, /* Index: 8 */ 2017 2018 /* 2019 * Chain for buf IO for removable-media or large sector size 2020 * disk drive targets (PM disabled) 2021 */ 2022 sd_initpkt_for_buf, /* Index: 9 */ 2023 sd_initpkt_for_buf, /* Index: 10 */ 2024 sd_initpkt_for_buf, /* Index: 11 */ 2025 2026 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2027 sd_initpkt_for_buf, /* Index: 12 */ 2028 sd_initpkt_for_buf, /* Index: 13 */ 2029 sd_initpkt_for_buf, /* Index: 14 */ 2030 sd_initpkt_for_buf, /* Index: 15 */ 2031 2032 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2033 sd_initpkt_for_buf, /* Index: 16 */ 2034 sd_initpkt_for_buf, /* Index: 17 */ 2035 sd_initpkt_for_buf, /* Index: 18 */ 2036 2037 /* Chain for USCSI commands (non-checksum targets) */ 2038 sd_initpkt_for_uscsi, /* Index: 19 */ 2039 sd_initpkt_for_uscsi, /* Index: 20 */ 2040 2041 /* Chain for USCSI commands (checksum targets) */ 2042 sd_initpkt_for_uscsi, /* Index: 21 */ 2043 sd_initpkt_for_uscsi, /* Index: 22 */ 2044 sd_initpkt_for_uscsi, /* Index: 22 */ 2045 2046 /* Chain for "direct" USCSI commands (all targets) */ 2047 sd_initpkt_for_uscsi, /* Index: 24 */ 2048 2049 /* Chain for "direct priority" USCSI commands (all targets) */ 2050 sd_initpkt_for_uscsi, /* Index: 25 */ 2051 2052 /* 2053 * Chain for buf IO for large sector size disk drive targets 2054 * with checksumming (PM enabled) 2055 */ 2056 sd_initpkt_for_buf, /* Index: 26 */ 2057 sd_initpkt_for_buf, /* Index: 27 */ 2058 sd_initpkt_for_buf, /* Index: 28 */ 2059 sd_initpkt_for_buf, /* Index: 29 */ 2060 sd_initpkt_for_buf, /* Index: 30 */ 2061 2062 /* 2063 * Chain for buf IO for large sector size disk drive targets 2064 * with checksumming (PM disabled) 2065 */ 2066 sd_initpkt_for_buf, /* Index: 31 */ 2067 sd_initpkt_for_buf, /* Index: 32 */ 2068 sd_initpkt_for_buf, /* Index: 33 */ 2069 sd_initpkt_for_buf, /* Index: 34 */ 2070 }; 2071 2072 2073 /* 2074 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2075 * The redundant entries are present so that the index used for accessing 2076 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2077 * with this table as well. 2078 */ 2079 typedef void (*sd_destroypkt_t)(struct buf *); 2080 2081 static sd_destroypkt_t sd_destroypkt_map[] = { 2082 2083 /* Chain for buf IO for disk drive targets (PM enabled) */ 2084 sd_destroypkt_for_buf, /* Index: 0 */ 2085 sd_destroypkt_for_buf, /* Index: 1 */ 2086 sd_destroypkt_for_buf, /* Index: 2 */ 2087 2088 /* Chain for buf IO for disk drive targets (PM disabled) */ 2089 sd_destroypkt_for_buf, /* Index: 3 */ 2090 sd_destroypkt_for_buf, /* Index: 4 */ 2091 2092 /* 2093 * Chain for buf IO for removable-media or large sector size 2094 * disk drive targets (PM enabled) 2095 */ 2096 sd_destroypkt_for_buf, /* Index: 5 */ 2097 sd_destroypkt_for_buf, /* Index: 6 */ 2098 sd_destroypkt_for_buf, /* Index: 7 */ 2099 sd_destroypkt_for_buf, /* Index: 8 */ 2100 2101 /* 2102 * Chain for buf IO for removable-media or large sector size 2103 * disk drive targets (PM disabled) 2104 */ 2105 sd_destroypkt_for_buf, /* Index: 9 */ 2106 sd_destroypkt_for_buf, /* Index: 10 */ 2107 sd_destroypkt_for_buf, /* Index: 11 */ 2108 2109 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2110 sd_destroypkt_for_buf, /* Index: 12 */ 2111 sd_destroypkt_for_buf, /* Index: 13 */ 2112 sd_destroypkt_for_buf, /* Index: 14 */ 2113 sd_destroypkt_for_buf, /* Index: 15 */ 2114 2115 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2116 sd_destroypkt_for_buf, /* Index: 16 */ 2117 sd_destroypkt_for_buf, /* Index: 17 */ 2118 sd_destroypkt_for_buf, /* Index: 18 */ 2119 2120 /* Chain for USCSI commands (non-checksum targets) */ 2121 sd_destroypkt_for_uscsi, /* Index: 19 */ 2122 sd_destroypkt_for_uscsi, /* Index: 20 */ 2123 2124 /* Chain for USCSI commands (checksum targets) */ 2125 sd_destroypkt_for_uscsi, /* Index: 21 */ 2126 sd_destroypkt_for_uscsi, /* Index: 22 */ 2127 sd_destroypkt_for_uscsi, /* Index: 22 */ 2128 2129 /* Chain for "direct" USCSI commands (all targets) */ 2130 sd_destroypkt_for_uscsi, /* Index: 24 */ 2131 2132 /* Chain for "direct priority" USCSI commands (all targets) */ 2133 sd_destroypkt_for_uscsi, /* Index: 25 */ 2134 2135 /* 2136 * Chain for buf IO for large sector size disk drive targets 2137 * with checksumming (PM disabled) 2138 */ 2139 sd_destroypkt_for_buf, /* Index: 26 */ 2140 sd_destroypkt_for_buf, /* Index: 27 */ 2141 sd_destroypkt_for_buf, /* Index: 28 */ 2142 sd_destroypkt_for_buf, /* Index: 29 */ 2143 sd_destroypkt_for_buf, /* Index: 30 */ 2144 2145 /* 2146 * Chain for buf IO for large sector size disk drive targets 2147 * with checksumming (PM enabled) 2148 */ 2149 sd_destroypkt_for_buf, /* Index: 31 */ 2150 sd_destroypkt_for_buf, /* Index: 32 */ 2151 sd_destroypkt_for_buf, /* Index: 33 */ 2152 sd_destroypkt_for_buf, /* Index: 34 */ 2153 }; 2154 2155 2156 2157 /* 2158 * Array to map a layering chain index to the appropriate chain "type". 2159 * The chain type indicates a specific property/usage of the chain. 2160 * The redundant entries are present so that the index used for accessing 2161 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2162 * with this table as well. 2163 */ 2164 2165 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2166 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2167 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2168 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2169 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2170 /* (for error recovery) */ 2171 2172 static int sd_chain_type_map[] = { 2173 2174 /* Chain for buf IO for disk drive targets (PM enabled) */ 2175 SD_CHAIN_BUFIO, /* Index: 0 */ 2176 SD_CHAIN_BUFIO, /* Index: 1 */ 2177 SD_CHAIN_BUFIO, /* Index: 2 */ 2178 2179 /* Chain for buf IO for disk drive targets (PM disabled) */ 2180 SD_CHAIN_BUFIO, /* Index: 3 */ 2181 SD_CHAIN_BUFIO, /* Index: 4 */ 2182 2183 /* 2184 * Chain for buf IO for removable-media or large sector size 2185 * disk drive targets (PM enabled) 2186 */ 2187 SD_CHAIN_BUFIO, /* Index: 5 */ 2188 SD_CHAIN_BUFIO, /* Index: 6 */ 2189 SD_CHAIN_BUFIO, /* Index: 7 */ 2190 SD_CHAIN_BUFIO, /* Index: 8 */ 2191 2192 /* 2193 * Chain for buf IO for removable-media or large sector size 2194 * disk drive targets (PM disabled) 2195 */ 2196 SD_CHAIN_BUFIO, /* Index: 9 */ 2197 SD_CHAIN_BUFIO, /* Index: 10 */ 2198 SD_CHAIN_BUFIO, /* Index: 11 */ 2199 2200 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2201 SD_CHAIN_BUFIO, /* Index: 12 */ 2202 SD_CHAIN_BUFIO, /* Index: 13 */ 2203 SD_CHAIN_BUFIO, /* Index: 14 */ 2204 SD_CHAIN_BUFIO, /* Index: 15 */ 2205 2206 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2207 SD_CHAIN_BUFIO, /* Index: 16 */ 2208 SD_CHAIN_BUFIO, /* Index: 17 */ 2209 SD_CHAIN_BUFIO, /* Index: 18 */ 2210 2211 /* Chain for USCSI commands (non-checksum targets) */ 2212 SD_CHAIN_USCSI, /* Index: 19 */ 2213 SD_CHAIN_USCSI, /* Index: 20 */ 2214 2215 /* Chain for USCSI commands (checksum targets) */ 2216 SD_CHAIN_USCSI, /* Index: 21 */ 2217 SD_CHAIN_USCSI, /* Index: 22 */ 2218 SD_CHAIN_USCSI, /* Index: 23 */ 2219 2220 /* Chain for "direct" USCSI commands (all targets) */ 2221 SD_CHAIN_DIRECT, /* Index: 24 */ 2222 2223 /* Chain for "direct priority" USCSI commands (all targets) */ 2224 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2225 2226 /* 2227 * Chain for buf IO for large sector size disk drive targets 2228 * with checksumming (PM enabled) 2229 */ 2230 SD_CHAIN_BUFIO, /* Index: 26 */ 2231 SD_CHAIN_BUFIO, /* Index: 27 */ 2232 SD_CHAIN_BUFIO, /* Index: 28 */ 2233 SD_CHAIN_BUFIO, /* Index: 29 */ 2234 SD_CHAIN_BUFIO, /* Index: 30 */ 2235 2236 /* 2237 * Chain for buf IO for large sector size disk drive targets 2238 * with checksumming (PM disabled) 2239 */ 2240 SD_CHAIN_BUFIO, /* Index: 31 */ 2241 SD_CHAIN_BUFIO, /* Index: 32 */ 2242 SD_CHAIN_BUFIO, /* Index: 33 */ 2243 SD_CHAIN_BUFIO, /* Index: 34 */ 2244 }; 2245 2246 2247 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2248 #define SD_IS_BUFIO(xp) \ 2249 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2250 2251 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2252 #define SD_IS_DIRECT_PRIORITY(xp) \ 2253 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2254 2255 2256 2257 /* 2258 * Struct, array, and macros to map a specific chain to the appropriate 2259 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2260 * 2261 * The sd_chain_index_map[] array is used at attach time to set the various 2262 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2263 * chain to be used with the instance. This allows different instances to use 2264 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2265 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2266 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2267 * dynamically & without the use of locking; and (2) a layer may update the 2268 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2269 * to allow for deferred processing of an IO within the same chain from a 2270 * different execution context. 2271 */ 2272 2273 struct sd_chain_index { 2274 int sci_iostart_index; 2275 int sci_iodone_index; 2276 }; 2277 2278 static struct sd_chain_index sd_chain_index_map[] = { 2279 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2280 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2281 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2282 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2283 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2284 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2285 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2286 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2287 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2288 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2289 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2290 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2291 2292 }; 2293 2294 2295 /* 2296 * The following are indexes into the sd_chain_index_map[] array. 2297 */ 2298 2299 /* un->un_buf_chain_type must be set to one of these */ 2300 #define SD_CHAIN_INFO_DISK 0 2301 #define SD_CHAIN_INFO_DISK_NO_PM 1 2302 #define SD_CHAIN_INFO_RMMEDIA 2 2303 #define SD_CHAIN_INFO_MSS_DISK 2 2304 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2305 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2306 #define SD_CHAIN_INFO_CHKSUM 4 2307 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2308 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2309 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2310 2311 /* un->un_uscsi_chain_type must be set to one of these */ 2312 #define SD_CHAIN_INFO_USCSI_CMD 6 2313 /* USCSI with PM disabled is the same as DIRECT */ 2314 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2315 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2316 2317 /* un->un_direct_chain_type must be set to one of these */ 2318 #define SD_CHAIN_INFO_DIRECT_CMD 8 2319 2320 /* un->un_priority_chain_type must be set to one of these */ 2321 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2322 2323 /* size for devid inquiries */ 2324 #define MAX_INQUIRY_SIZE 0xF0 2325 2326 /* 2327 * Macros used by functions to pass a given buf(9S) struct along to the 2328 * next function in the layering chain for further processing. 2329 * 2330 * In the following macros, passing more than three arguments to the called 2331 * routines causes the optimizer for the SPARC compiler to stop doing tail 2332 * call elimination which results in significant performance degradation. 2333 */ 2334 #define SD_BEGIN_IOSTART(index, un, bp) \ 2335 ((*(sd_iostart_chain[index]))(index, un, bp)) 2336 2337 #define SD_BEGIN_IODONE(index, un, bp) \ 2338 ((*(sd_iodone_chain[index]))(index, un, bp)) 2339 2340 #define SD_NEXT_IOSTART(index, un, bp) \ 2341 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2342 2343 #define SD_NEXT_IODONE(index, un, bp) \ 2344 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2345 2346 /* 2347 * Function: _init 2348 * 2349 * Description: This is the driver _init(9E) entry point. 2350 * 2351 * Return Code: Returns the value from mod_install(9F) or 2352 * ddi_soft_state_init(9F) as appropriate. 2353 * 2354 * Context: Called when driver module loaded. 2355 */ 2356 2357 int 2358 _init(void) 2359 { 2360 int err; 2361 2362 /* establish driver name from module name */ 2363 sd_label = (char *)mod_modname(&modlinkage); 2364 2365 #ifndef XPV_HVM_DRIVER 2366 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2367 SD_MAXUNIT); 2368 if (err != 0) { 2369 return (err); 2370 } 2371 2372 #else /* XPV_HVM_DRIVER */ 2373 /* Remove the leading "hvm_" from the module name */ 2374 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2375 sd_label += strlen("hvm_"); 2376 2377 #endif /* XPV_HVM_DRIVER */ 2378 2379 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2380 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2381 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2382 2383 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2384 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2385 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2386 2387 /* 2388 * it's ok to init here even for fibre device 2389 */ 2390 sd_scsi_probe_cache_init(); 2391 2392 sd_scsi_target_lun_init(); 2393 2394 /* 2395 * Creating taskq before mod_install ensures that all callers (threads) 2396 * that enter the module after a successful mod_install encounter 2397 * a valid taskq. 2398 */ 2399 sd_taskq_create(); 2400 2401 err = mod_install(&modlinkage); 2402 if (err != 0) { 2403 /* delete taskq if install fails */ 2404 sd_taskq_delete(); 2405 2406 mutex_destroy(&sd_detach_mutex); 2407 mutex_destroy(&sd_log_mutex); 2408 mutex_destroy(&sd_label_mutex); 2409 2410 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2411 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2412 cv_destroy(&sd_tr.srq_inprocess_cv); 2413 2414 sd_scsi_probe_cache_fini(); 2415 2416 sd_scsi_target_lun_fini(); 2417 2418 #ifndef XPV_HVM_DRIVER 2419 ddi_soft_state_fini(&sd_state); 2420 #endif /* !XPV_HVM_DRIVER */ 2421 return (err); 2422 } 2423 2424 return (err); 2425 } 2426 2427 2428 /* 2429 * Function: _fini 2430 * 2431 * Description: This is the driver _fini(9E) entry point. 2432 * 2433 * Return Code: Returns the value from mod_remove(9F) 2434 * 2435 * Context: Called when driver module is unloaded. 2436 */ 2437 2438 int 2439 _fini(void) 2440 { 2441 int err; 2442 2443 if ((err = mod_remove(&modlinkage)) != 0) { 2444 return (err); 2445 } 2446 2447 sd_taskq_delete(); 2448 2449 mutex_destroy(&sd_detach_mutex); 2450 mutex_destroy(&sd_log_mutex); 2451 mutex_destroy(&sd_label_mutex); 2452 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2453 2454 sd_scsi_probe_cache_fini(); 2455 2456 sd_scsi_target_lun_fini(); 2457 2458 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2459 cv_destroy(&sd_tr.srq_inprocess_cv); 2460 2461 #ifndef XPV_HVM_DRIVER 2462 ddi_soft_state_fini(&sd_state); 2463 #endif /* !XPV_HVM_DRIVER */ 2464 2465 return (err); 2466 } 2467 2468 2469 /* 2470 * Function: _info 2471 * 2472 * Description: This is the driver _info(9E) entry point. 2473 * 2474 * Arguments: modinfop - pointer to the driver modinfo structure 2475 * 2476 * Return Code: Returns the value from mod_info(9F). 2477 * 2478 * Context: Kernel thread context 2479 */ 2480 2481 int 2482 _info(struct modinfo *modinfop) 2483 { 2484 return (mod_info(&modlinkage, modinfop)); 2485 } 2486 2487 2488 /* 2489 * The following routines implement the driver message logging facility. 2490 * They provide component- and level- based debug output filtering. 2491 * Output may also be restricted to messages for a single instance by 2492 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2493 * to NULL, then messages for all instances are printed. 2494 * 2495 * These routines have been cloned from each other due to the language 2496 * constraints of macros and variable argument list processing. 2497 */ 2498 2499 2500 /* 2501 * Function: sd_log_err 2502 * 2503 * Description: This routine is called by the SD_ERROR macro for debug 2504 * logging of error conditions. 2505 * 2506 * Arguments: comp - driver component being logged 2507 * dev - pointer to driver info structure 2508 * fmt - error string and format to be logged 2509 */ 2510 2511 static void 2512 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2513 { 2514 va_list ap; 2515 dev_info_t *dev; 2516 2517 ASSERT(un != NULL); 2518 dev = SD_DEVINFO(un); 2519 ASSERT(dev != NULL); 2520 2521 /* 2522 * Filter messages based on the global component and level masks. 2523 * Also print if un matches the value of sd_debug_un, or if 2524 * sd_debug_un is set to NULL. 2525 */ 2526 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2527 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2528 mutex_enter(&sd_log_mutex); 2529 va_start(ap, fmt); 2530 (void) vsprintf(sd_log_buf, fmt, ap); 2531 va_end(ap); 2532 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2533 mutex_exit(&sd_log_mutex); 2534 } 2535 #ifdef SD_FAULT_INJECTION 2536 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2537 if (un->sd_injection_mask & comp) { 2538 mutex_enter(&sd_log_mutex); 2539 va_start(ap, fmt); 2540 (void) vsprintf(sd_log_buf, fmt, ap); 2541 va_end(ap); 2542 sd_injection_log(sd_log_buf, un); 2543 mutex_exit(&sd_log_mutex); 2544 } 2545 #endif 2546 } 2547 2548 2549 /* 2550 * Function: sd_log_info 2551 * 2552 * Description: This routine is called by the SD_INFO macro for debug 2553 * logging of general purpose informational conditions. 2554 * 2555 * Arguments: comp - driver component being logged 2556 * dev - pointer to driver info structure 2557 * fmt - info string and format to be logged 2558 */ 2559 2560 static void 2561 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2562 { 2563 va_list ap; 2564 dev_info_t *dev; 2565 2566 ASSERT(un != NULL); 2567 dev = SD_DEVINFO(un); 2568 ASSERT(dev != NULL); 2569 2570 /* 2571 * Filter messages based on the global component and level masks. 2572 * Also print if un matches the value of sd_debug_un, or if 2573 * sd_debug_un is set to NULL. 2574 */ 2575 if ((sd_component_mask & component) && 2576 (sd_level_mask & SD_LOGMASK_INFO) && 2577 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2578 mutex_enter(&sd_log_mutex); 2579 va_start(ap, fmt); 2580 (void) vsprintf(sd_log_buf, fmt, ap); 2581 va_end(ap); 2582 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2583 mutex_exit(&sd_log_mutex); 2584 } 2585 #ifdef SD_FAULT_INJECTION 2586 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2587 if (un->sd_injection_mask & component) { 2588 mutex_enter(&sd_log_mutex); 2589 va_start(ap, fmt); 2590 (void) vsprintf(sd_log_buf, fmt, ap); 2591 va_end(ap); 2592 sd_injection_log(sd_log_buf, un); 2593 mutex_exit(&sd_log_mutex); 2594 } 2595 #endif 2596 } 2597 2598 2599 /* 2600 * Function: sd_log_trace 2601 * 2602 * Description: This routine is called by the SD_TRACE macro for debug 2603 * logging of trace conditions (i.e. function entry/exit). 2604 * 2605 * Arguments: comp - driver component being logged 2606 * dev - pointer to driver info structure 2607 * fmt - trace string and format to be logged 2608 */ 2609 2610 static void 2611 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2612 { 2613 va_list ap; 2614 dev_info_t *dev; 2615 2616 ASSERT(un != NULL); 2617 dev = SD_DEVINFO(un); 2618 ASSERT(dev != NULL); 2619 2620 /* 2621 * Filter messages based on the global component and level masks. 2622 * Also print if un matches the value of sd_debug_un, or if 2623 * sd_debug_un is set to NULL. 2624 */ 2625 if ((sd_component_mask & component) && 2626 (sd_level_mask & SD_LOGMASK_TRACE) && 2627 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2628 mutex_enter(&sd_log_mutex); 2629 va_start(ap, fmt); 2630 (void) vsprintf(sd_log_buf, fmt, ap); 2631 va_end(ap); 2632 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2633 mutex_exit(&sd_log_mutex); 2634 } 2635 #ifdef SD_FAULT_INJECTION 2636 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2637 if (un->sd_injection_mask & component) { 2638 mutex_enter(&sd_log_mutex); 2639 va_start(ap, fmt); 2640 (void) vsprintf(sd_log_buf, fmt, ap); 2641 va_end(ap); 2642 sd_injection_log(sd_log_buf, un); 2643 mutex_exit(&sd_log_mutex); 2644 } 2645 #endif 2646 } 2647 2648 2649 /* 2650 * Function: sdprobe 2651 * 2652 * Description: This is the driver probe(9e) entry point function. 2653 * 2654 * Arguments: devi - opaque device info handle 2655 * 2656 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2657 * DDI_PROBE_FAILURE: If the probe failed. 2658 * DDI_PROBE_PARTIAL: If the instance is not present now, 2659 * but may be present in the future. 2660 */ 2661 2662 static int 2663 sdprobe(dev_info_t *devi) 2664 { 2665 struct scsi_device *devp; 2666 int rval; 2667 #ifndef XPV_HVM_DRIVER 2668 int instance = ddi_get_instance(devi); 2669 #endif /* !XPV_HVM_DRIVER */ 2670 2671 /* 2672 * if it wasn't for pln, sdprobe could actually be nulldev 2673 * in the "__fibre" case. 2674 */ 2675 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2676 return (DDI_PROBE_DONTCARE); 2677 } 2678 2679 devp = ddi_get_driver_private(devi); 2680 2681 if (devp == NULL) { 2682 /* Ooops... nexus driver is mis-configured... */ 2683 return (DDI_PROBE_FAILURE); 2684 } 2685 2686 #ifndef XPV_HVM_DRIVER 2687 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2688 return (DDI_PROBE_PARTIAL); 2689 } 2690 #endif /* !XPV_HVM_DRIVER */ 2691 2692 /* 2693 * Call the SCSA utility probe routine to see if we actually 2694 * have a target at this SCSI nexus. 2695 */ 2696 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2697 case SCSIPROBE_EXISTS: 2698 switch (devp->sd_inq->inq_dtype) { 2699 case DTYPE_DIRECT: 2700 rval = DDI_PROBE_SUCCESS; 2701 break; 2702 case DTYPE_RODIRECT: 2703 /* CDs etc. Can be removable media */ 2704 rval = DDI_PROBE_SUCCESS; 2705 break; 2706 case DTYPE_OPTICAL: 2707 /* 2708 * Rewritable optical driver HP115AA 2709 * Can also be removable media 2710 */ 2711 2712 /* 2713 * Do not attempt to bind to DTYPE_OPTICAL if 2714 * pre solaris 9 sparc sd behavior is required 2715 * 2716 * If first time through and sd_dtype_optical_bind 2717 * has not been set in /etc/system check properties 2718 */ 2719 2720 if (sd_dtype_optical_bind < 0) { 2721 sd_dtype_optical_bind = ddi_prop_get_int 2722 (DDI_DEV_T_ANY, devi, 0, 2723 "optical-device-bind", 1); 2724 } 2725 2726 if (sd_dtype_optical_bind == 0) { 2727 rval = DDI_PROBE_FAILURE; 2728 } else { 2729 rval = DDI_PROBE_SUCCESS; 2730 } 2731 break; 2732 2733 case DTYPE_NOTPRESENT: 2734 default: 2735 rval = DDI_PROBE_FAILURE; 2736 break; 2737 } 2738 break; 2739 default: 2740 rval = DDI_PROBE_PARTIAL; 2741 break; 2742 } 2743 2744 /* 2745 * This routine checks for resource allocation prior to freeing, 2746 * so it will take care of the "smart probing" case where a 2747 * scsi_probe() may or may not have been issued and will *not* 2748 * free previously-freed resources. 2749 */ 2750 scsi_unprobe(devp); 2751 return (rval); 2752 } 2753 2754 2755 /* 2756 * Function: sdinfo 2757 * 2758 * Description: This is the driver getinfo(9e) entry point function. 2759 * Given the device number, return the devinfo pointer from 2760 * the scsi_device structure or the instance number 2761 * associated with the dev_t. 2762 * 2763 * Arguments: dip - pointer to device info structure 2764 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2765 * DDI_INFO_DEVT2INSTANCE) 2766 * arg - driver dev_t 2767 * resultp - user buffer for request response 2768 * 2769 * Return Code: DDI_SUCCESS 2770 * DDI_FAILURE 2771 */ 2772 /* ARGSUSED */ 2773 static int 2774 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2775 { 2776 struct sd_lun *un; 2777 dev_t dev; 2778 int instance; 2779 int error; 2780 2781 switch (infocmd) { 2782 case DDI_INFO_DEVT2DEVINFO: 2783 dev = (dev_t)arg; 2784 instance = SDUNIT(dev); 2785 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2786 return (DDI_FAILURE); 2787 } 2788 *result = (void *) SD_DEVINFO(un); 2789 error = DDI_SUCCESS; 2790 break; 2791 case DDI_INFO_DEVT2INSTANCE: 2792 dev = (dev_t)arg; 2793 instance = SDUNIT(dev); 2794 *result = (void *)(uintptr_t)instance; 2795 error = DDI_SUCCESS; 2796 break; 2797 default: 2798 error = DDI_FAILURE; 2799 } 2800 return (error); 2801 } 2802 2803 /* 2804 * Function: sd_prop_op 2805 * 2806 * Description: This is the driver prop_op(9e) entry point function. 2807 * Return the number of blocks for the partition in question 2808 * or forward the request to the property facilities. 2809 * 2810 * Arguments: dev - device number 2811 * dip - pointer to device info structure 2812 * prop_op - property operator 2813 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2814 * name - pointer to property name 2815 * valuep - pointer or address of the user buffer 2816 * lengthp - property length 2817 * 2818 * Return Code: DDI_PROP_SUCCESS 2819 * DDI_PROP_NOT_FOUND 2820 * DDI_PROP_UNDEFINED 2821 * DDI_PROP_NO_MEMORY 2822 * DDI_PROP_BUF_TOO_SMALL 2823 */ 2824 2825 static int 2826 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2827 char *name, caddr_t valuep, int *lengthp) 2828 { 2829 struct sd_lun *un; 2830 2831 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2832 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2833 name, valuep, lengthp)); 2834 2835 return (cmlb_prop_op(un->un_cmlbhandle, 2836 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2837 SDPART(dev), (void *)SD_PATH_DIRECT)); 2838 } 2839 2840 /* 2841 * The following functions are for smart probing: 2842 * sd_scsi_probe_cache_init() 2843 * sd_scsi_probe_cache_fini() 2844 * sd_scsi_clear_probe_cache() 2845 * sd_scsi_probe_with_cache() 2846 */ 2847 2848 /* 2849 * Function: sd_scsi_probe_cache_init 2850 * 2851 * Description: Initializes the probe response cache mutex and head pointer. 2852 * 2853 * Context: Kernel thread context 2854 */ 2855 2856 static void 2857 sd_scsi_probe_cache_init(void) 2858 { 2859 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2860 sd_scsi_probe_cache_head = NULL; 2861 } 2862 2863 2864 /* 2865 * Function: sd_scsi_probe_cache_fini 2866 * 2867 * Description: Frees all resources associated with the probe response cache. 2868 * 2869 * Context: Kernel thread context 2870 */ 2871 2872 static void 2873 sd_scsi_probe_cache_fini(void) 2874 { 2875 struct sd_scsi_probe_cache *cp; 2876 struct sd_scsi_probe_cache *ncp; 2877 2878 /* Clean up our smart probing linked list */ 2879 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2880 ncp = cp->next; 2881 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2882 } 2883 sd_scsi_probe_cache_head = NULL; 2884 mutex_destroy(&sd_scsi_probe_cache_mutex); 2885 } 2886 2887 2888 /* 2889 * Function: sd_scsi_clear_probe_cache 2890 * 2891 * Description: This routine clears the probe response cache. This is 2892 * done when open() returns ENXIO so that when deferred 2893 * attach is attempted (possibly after a device has been 2894 * turned on) we will retry the probe. Since we don't know 2895 * which target we failed to open, we just clear the 2896 * entire cache. 2897 * 2898 * Context: Kernel thread context 2899 */ 2900 2901 static void 2902 sd_scsi_clear_probe_cache(void) 2903 { 2904 struct sd_scsi_probe_cache *cp; 2905 int i; 2906 2907 mutex_enter(&sd_scsi_probe_cache_mutex); 2908 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2909 /* 2910 * Reset all entries to SCSIPROBE_EXISTS. This will 2911 * force probing to be performed the next time 2912 * sd_scsi_probe_with_cache is called. 2913 */ 2914 for (i = 0; i < NTARGETS_WIDE; i++) { 2915 cp->cache[i] = SCSIPROBE_EXISTS; 2916 } 2917 } 2918 mutex_exit(&sd_scsi_probe_cache_mutex); 2919 } 2920 2921 2922 /* 2923 * Function: sd_scsi_probe_with_cache 2924 * 2925 * Description: This routine implements support for a scsi device probe 2926 * with cache. The driver maintains a cache of the target 2927 * responses to scsi probes. If we get no response from a 2928 * target during a probe inquiry, we remember that, and we 2929 * avoid additional calls to scsi_probe on non-zero LUNs 2930 * on the same target until the cache is cleared. By doing 2931 * so we avoid the 1/4 sec selection timeout for nonzero 2932 * LUNs. lun0 of a target is always probed. 2933 * 2934 * Arguments: devp - Pointer to a scsi_device(9S) structure 2935 * waitfunc - indicates what the allocator routines should 2936 * do when resources are not available. This value 2937 * is passed on to scsi_probe() when that routine 2938 * is called. 2939 * 2940 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2941 * otherwise the value returned by scsi_probe(9F). 2942 * 2943 * Context: Kernel thread context 2944 */ 2945 2946 static int 2947 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2948 { 2949 struct sd_scsi_probe_cache *cp; 2950 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2951 int lun, tgt; 2952 2953 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2954 SCSI_ADDR_PROP_LUN, 0); 2955 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2956 SCSI_ADDR_PROP_TARGET, -1); 2957 2958 /* Make sure caching enabled and target in range */ 2959 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2960 /* do it the old way (no cache) */ 2961 return (scsi_probe(devp, waitfn)); 2962 } 2963 2964 mutex_enter(&sd_scsi_probe_cache_mutex); 2965 2966 /* Find the cache for this scsi bus instance */ 2967 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2968 if (cp->pdip == pdip) { 2969 break; 2970 } 2971 } 2972 2973 /* If we can't find a cache for this pdip, create one */ 2974 if (cp == NULL) { 2975 int i; 2976 2977 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2978 KM_SLEEP); 2979 cp->pdip = pdip; 2980 cp->next = sd_scsi_probe_cache_head; 2981 sd_scsi_probe_cache_head = cp; 2982 for (i = 0; i < NTARGETS_WIDE; i++) { 2983 cp->cache[i] = SCSIPROBE_EXISTS; 2984 } 2985 } 2986 2987 mutex_exit(&sd_scsi_probe_cache_mutex); 2988 2989 /* Recompute the cache for this target if LUN zero */ 2990 if (lun == 0) { 2991 cp->cache[tgt] = SCSIPROBE_EXISTS; 2992 } 2993 2994 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2995 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2996 return (SCSIPROBE_NORESP); 2997 } 2998 2999 /* Do the actual probe; save & return the result */ 3000 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3001 } 3002 3003 3004 /* 3005 * Function: sd_scsi_target_lun_init 3006 * 3007 * Description: Initializes the attached lun chain mutex and head pointer. 3008 * 3009 * Context: Kernel thread context 3010 */ 3011 3012 static void 3013 sd_scsi_target_lun_init(void) 3014 { 3015 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3016 sd_scsi_target_lun_head = NULL; 3017 } 3018 3019 3020 /* 3021 * Function: sd_scsi_target_lun_fini 3022 * 3023 * Description: Frees all resources associated with the attached lun 3024 * chain 3025 * 3026 * Context: Kernel thread context 3027 */ 3028 3029 static void 3030 sd_scsi_target_lun_fini(void) 3031 { 3032 struct sd_scsi_hba_tgt_lun *cp; 3033 struct sd_scsi_hba_tgt_lun *ncp; 3034 3035 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3036 ncp = cp->next; 3037 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3038 } 3039 sd_scsi_target_lun_head = NULL; 3040 mutex_destroy(&sd_scsi_target_lun_mutex); 3041 } 3042 3043 3044 /* 3045 * Function: sd_scsi_get_target_lun_count 3046 * 3047 * Description: This routine will check in the attached lun chain to see 3048 * how many luns are attached on the required SCSI controller 3049 * and target. Currently, some capabilities like tagged queue 3050 * are supported per target based by HBA. So all luns in a 3051 * target have the same capabilities. Based on this assumption, 3052 * sd should only set these capabilities once per target. This 3053 * function is called when sd needs to decide how many luns 3054 * already attached on a target. 3055 * 3056 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3057 * controller device. 3058 * target - The target ID on the controller's SCSI bus. 3059 * 3060 * Return Code: The number of luns attached on the required target and 3061 * controller. 3062 * -1 if target ID is not in parallel SCSI scope or the given 3063 * dip is not in the chain. 3064 * 3065 * Context: Kernel thread context 3066 */ 3067 3068 static int 3069 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3070 { 3071 struct sd_scsi_hba_tgt_lun *cp; 3072 3073 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3074 return (-1); 3075 } 3076 3077 mutex_enter(&sd_scsi_target_lun_mutex); 3078 3079 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3080 if (cp->pdip == dip) { 3081 break; 3082 } 3083 } 3084 3085 mutex_exit(&sd_scsi_target_lun_mutex); 3086 3087 if (cp == NULL) { 3088 return (-1); 3089 } 3090 3091 return (cp->nlun[target]); 3092 } 3093 3094 3095 /* 3096 * Function: sd_scsi_update_lun_on_target 3097 * 3098 * Description: This routine is used to update the attached lun chain when a 3099 * lun is attached or detached on a target. 3100 * 3101 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3102 * controller device. 3103 * target - The target ID on the controller's SCSI bus. 3104 * flag - Indicate the lun is attached or detached. 3105 * 3106 * Context: Kernel thread context 3107 */ 3108 3109 static void 3110 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3111 { 3112 struct sd_scsi_hba_tgt_lun *cp; 3113 3114 mutex_enter(&sd_scsi_target_lun_mutex); 3115 3116 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3117 if (cp->pdip == dip) { 3118 break; 3119 } 3120 } 3121 3122 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3123 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3124 KM_SLEEP); 3125 cp->pdip = dip; 3126 cp->next = sd_scsi_target_lun_head; 3127 sd_scsi_target_lun_head = cp; 3128 } 3129 3130 mutex_exit(&sd_scsi_target_lun_mutex); 3131 3132 if (cp != NULL) { 3133 if (flag == SD_SCSI_LUN_ATTACH) { 3134 cp->nlun[target] ++; 3135 } else { 3136 cp->nlun[target] --; 3137 } 3138 } 3139 } 3140 3141 3142 /* 3143 * Function: sd_spin_up_unit 3144 * 3145 * Description: Issues the following commands to spin-up the device: 3146 * START STOP UNIT, and INQUIRY. 3147 * 3148 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3149 * structure for this target. 3150 * 3151 * Return Code: 0 - success 3152 * EIO - failure 3153 * EACCES - reservation conflict 3154 * 3155 * Context: Kernel thread context 3156 */ 3157 3158 static int 3159 sd_spin_up_unit(sd_ssc_t *ssc) 3160 { 3161 size_t resid = 0; 3162 int has_conflict = FALSE; 3163 uchar_t *bufaddr; 3164 int status; 3165 struct sd_lun *un; 3166 3167 ASSERT(ssc != NULL); 3168 un = ssc->ssc_un; 3169 ASSERT(un != NULL); 3170 3171 /* 3172 * Send a throwaway START UNIT command. 3173 * 3174 * If we fail on this, we don't care presently what precisely 3175 * is wrong. EMC's arrays will also fail this with a check 3176 * condition (0x2/0x4/0x3) if the device is "inactive," but 3177 * we don't want to fail the attach because it may become 3178 * "active" later. 3179 */ 3180 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3181 SD_PATH_DIRECT); 3182 3183 if (status != 0) { 3184 if (status == EACCES) 3185 has_conflict = TRUE; 3186 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3187 } 3188 3189 /* 3190 * Send another INQUIRY command to the target. This is necessary for 3191 * non-removable media direct access devices because their INQUIRY data 3192 * may not be fully qualified until they are spun up (perhaps via the 3193 * START command above). Note: This seems to be needed for some 3194 * legacy devices only.) The INQUIRY command should succeed even if a 3195 * Reservation Conflict is present. 3196 */ 3197 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3198 3199 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3200 != 0) { 3201 kmem_free(bufaddr, SUN_INQSIZE); 3202 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3203 return (EIO); 3204 } 3205 3206 /* 3207 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3208 * Note that this routine does not return a failure here even if the 3209 * INQUIRY command did not return any data. This is a legacy behavior. 3210 */ 3211 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3212 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3213 } 3214 3215 kmem_free(bufaddr, SUN_INQSIZE); 3216 3217 /* If we hit a reservation conflict above, tell the caller. */ 3218 if (has_conflict == TRUE) { 3219 return (EACCES); 3220 } 3221 3222 return (0); 3223 } 3224 3225 #ifdef _LP64 3226 /* 3227 * Function: sd_enable_descr_sense 3228 * 3229 * Description: This routine attempts to select descriptor sense format 3230 * using the Control mode page. Devices that support 64 bit 3231 * LBAs (for >2TB luns) should also implement descriptor 3232 * sense data so we will call this function whenever we see 3233 * a lun larger than 2TB. If for some reason the device 3234 * supports 64 bit LBAs but doesn't support descriptor sense 3235 * presumably the mode select will fail. Everything will 3236 * continue to work normally except that we will not get 3237 * complete sense data for commands that fail with an LBA 3238 * larger than 32 bits. 3239 * 3240 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3241 * structure for this target. 3242 * 3243 * Context: Kernel thread context only 3244 */ 3245 3246 static void 3247 sd_enable_descr_sense(sd_ssc_t *ssc) 3248 { 3249 uchar_t *header; 3250 struct mode_control_scsi3 *ctrl_bufp; 3251 size_t buflen; 3252 size_t bd_len; 3253 int status; 3254 struct sd_lun *un; 3255 3256 ASSERT(ssc != NULL); 3257 un = ssc->ssc_un; 3258 ASSERT(un != NULL); 3259 3260 /* 3261 * Read MODE SENSE page 0xA, Control Mode Page 3262 */ 3263 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3264 sizeof (struct mode_control_scsi3); 3265 header = kmem_zalloc(buflen, KM_SLEEP); 3266 3267 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3268 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3269 3270 if (status != 0) { 3271 SD_ERROR(SD_LOG_COMMON, un, 3272 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3273 goto eds_exit; 3274 } 3275 3276 /* 3277 * Determine size of Block Descriptors in order to locate 3278 * the mode page data. ATAPI devices return 0, SCSI devices 3279 * should return MODE_BLK_DESC_LENGTH. 3280 */ 3281 bd_len = ((struct mode_header *)header)->bdesc_length; 3282 3283 /* Clear the mode data length field for MODE SELECT */ 3284 ((struct mode_header *)header)->length = 0; 3285 3286 ctrl_bufp = (struct mode_control_scsi3 *) 3287 (header + MODE_HEADER_LENGTH + bd_len); 3288 3289 /* 3290 * If the page length is smaller than the expected value, 3291 * the target device doesn't support D_SENSE. Bail out here. 3292 */ 3293 if (ctrl_bufp->mode_page.length < 3294 sizeof (struct mode_control_scsi3) - 2) { 3295 SD_ERROR(SD_LOG_COMMON, un, 3296 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3297 goto eds_exit; 3298 } 3299 3300 /* 3301 * Clear PS bit for MODE SELECT 3302 */ 3303 ctrl_bufp->mode_page.ps = 0; 3304 3305 /* 3306 * Set D_SENSE to enable descriptor sense format. 3307 */ 3308 ctrl_bufp->d_sense = 1; 3309 3310 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3311 3312 /* 3313 * Use MODE SELECT to commit the change to the D_SENSE bit 3314 */ 3315 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3316 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3317 3318 if (status != 0) { 3319 SD_INFO(SD_LOG_COMMON, un, 3320 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3321 } else { 3322 kmem_free(header, buflen); 3323 return; 3324 } 3325 3326 eds_exit: 3327 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3328 kmem_free(header, buflen); 3329 } 3330 3331 /* 3332 * Function: sd_reenable_dsense_task 3333 * 3334 * Description: Re-enable descriptor sense after device or bus reset 3335 * 3336 * Context: Executes in a taskq() thread context 3337 */ 3338 static void 3339 sd_reenable_dsense_task(void *arg) 3340 { 3341 struct sd_lun *un = arg; 3342 sd_ssc_t *ssc; 3343 3344 ASSERT(un != NULL); 3345 3346 ssc = sd_ssc_init(un); 3347 sd_enable_descr_sense(ssc); 3348 sd_ssc_fini(ssc); 3349 } 3350 #endif /* _LP64 */ 3351 3352 /* 3353 * Function: sd_set_mmc_caps 3354 * 3355 * Description: This routine determines if the device is MMC compliant and if 3356 * the device supports CDDA via a mode sense of the CDVD 3357 * capabilities mode page. Also checks if the device is a 3358 * dvdram writable device. 3359 * 3360 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3361 * structure for this target. 3362 * 3363 * Context: Kernel thread context only 3364 */ 3365 3366 static void 3367 sd_set_mmc_caps(sd_ssc_t *ssc) 3368 { 3369 struct mode_header_grp2 *sense_mhp; 3370 uchar_t *sense_page; 3371 caddr_t buf; 3372 int bd_len; 3373 int status; 3374 struct uscsi_cmd com; 3375 int rtn; 3376 uchar_t *out_data_rw, *out_data_hd; 3377 uchar_t *rqbuf_rw, *rqbuf_hd; 3378 struct sd_lun *un; 3379 3380 ASSERT(ssc != NULL); 3381 un = ssc->ssc_un; 3382 ASSERT(un != NULL); 3383 3384 /* 3385 * The flags which will be set in this function are - mmc compliant, 3386 * dvdram writable device, cdda support. Initialize them to FALSE 3387 * and if a capability is detected - it will be set to TRUE. 3388 */ 3389 un->un_f_mmc_cap = FALSE; 3390 un->un_f_dvdram_writable_device = FALSE; 3391 un->un_f_cfg_cdda = FALSE; 3392 3393 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3394 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3395 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3396 3397 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3398 3399 if (status != 0) { 3400 /* command failed; just return */ 3401 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3402 return; 3403 } 3404 /* 3405 * If the mode sense request for the CDROM CAPABILITIES 3406 * page (0x2A) succeeds the device is assumed to be MMC. 3407 */ 3408 un->un_f_mmc_cap = TRUE; 3409 3410 /* Get to the page data */ 3411 sense_mhp = (struct mode_header_grp2 *)buf; 3412 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3413 sense_mhp->bdesc_length_lo; 3414 if (bd_len > MODE_BLK_DESC_LENGTH) { 3415 /* 3416 * We did not get back the expected block descriptor 3417 * length so we cannot determine if the device supports 3418 * CDDA. However, we still indicate the device is MMC 3419 * according to the successful response to the page 3420 * 0x2A mode sense request. 3421 */ 3422 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3423 "sd_set_mmc_caps: Mode Sense returned " 3424 "invalid block descriptor length\n"); 3425 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3426 return; 3427 } 3428 3429 /* See if read CDDA is supported */ 3430 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3431 bd_len); 3432 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3433 3434 /* See if writing DVD RAM is supported. */ 3435 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3436 if (un->un_f_dvdram_writable_device == TRUE) { 3437 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3438 return; 3439 } 3440 3441 /* 3442 * If the device presents DVD or CD capabilities in the mode 3443 * page, we can return here since a RRD will not have 3444 * these capabilities. 3445 */ 3446 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3447 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3448 return; 3449 } 3450 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3451 3452 /* 3453 * If un->un_f_dvdram_writable_device is still FALSE, 3454 * check for a Removable Rigid Disk (RRD). A RRD 3455 * device is identified by the features RANDOM_WRITABLE and 3456 * HARDWARE_DEFECT_MANAGEMENT. 3457 */ 3458 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3459 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3460 3461 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3462 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3463 RANDOM_WRITABLE, SD_PATH_STANDARD); 3464 3465 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3466 3467 if (rtn != 0) { 3468 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3469 kmem_free(rqbuf_rw, SENSE_LENGTH); 3470 return; 3471 } 3472 3473 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3474 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3475 3476 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3477 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3478 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3479 3480 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3481 3482 if (rtn == 0) { 3483 /* 3484 * We have good information, check for random writable 3485 * and hardware defect features. 3486 */ 3487 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3488 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3489 un->un_f_dvdram_writable_device = TRUE; 3490 } 3491 } 3492 3493 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3494 kmem_free(rqbuf_rw, SENSE_LENGTH); 3495 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3496 kmem_free(rqbuf_hd, SENSE_LENGTH); 3497 } 3498 3499 /* 3500 * Function: sd_check_for_writable_cd 3501 * 3502 * Description: This routine determines if the media in the device is 3503 * writable or not. It uses the get configuration command (0x46) 3504 * to determine if the media is writable 3505 * 3506 * Arguments: un - driver soft state (unit) structure 3507 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3508 * chain and the normal command waitq, or 3509 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3510 * "direct" chain and bypass the normal command 3511 * waitq. 3512 * 3513 * Context: Never called at interrupt context. 3514 */ 3515 3516 static void 3517 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3518 { 3519 struct uscsi_cmd com; 3520 uchar_t *out_data; 3521 uchar_t *rqbuf; 3522 int rtn; 3523 uchar_t *out_data_rw, *out_data_hd; 3524 uchar_t *rqbuf_rw, *rqbuf_hd; 3525 struct mode_header_grp2 *sense_mhp; 3526 uchar_t *sense_page; 3527 caddr_t buf; 3528 int bd_len; 3529 int status; 3530 struct sd_lun *un; 3531 3532 ASSERT(ssc != NULL); 3533 un = ssc->ssc_un; 3534 ASSERT(un != NULL); 3535 ASSERT(mutex_owned(SD_MUTEX(un))); 3536 3537 /* 3538 * Initialize the writable media to false, if configuration info. 3539 * tells us otherwise then only we will set it. 3540 */ 3541 un->un_f_mmc_writable_media = FALSE; 3542 mutex_exit(SD_MUTEX(un)); 3543 3544 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3545 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3546 3547 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3548 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3549 3550 if (rtn != 0) 3551 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3552 3553 mutex_enter(SD_MUTEX(un)); 3554 if (rtn == 0) { 3555 /* 3556 * We have good information, check for writable DVD. 3557 */ 3558 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3559 un->un_f_mmc_writable_media = TRUE; 3560 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3561 kmem_free(rqbuf, SENSE_LENGTH); 3562 return; 3563 } 3564 } 3565 3566 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3567 kmem_free(rqbuf, SENSE_LENGTH); 3568 3569 /* 3570 * Determine if this is a RRD type device. 3571 */ 3572 mutex_exit(SD_MUTEX(un)); 3573 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3574 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3575 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3576 3577 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3578 3579 mutex_enter(SD_MUTEX(un)); 3580 if (status != 0) { 3581 /* command failed; just return */ 3582 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3583 return; 3584 } 3585 3586 /* Get to the page data */ 3587 sense_mhp = (struct mode_header_grp2 *)buf; 3588 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3589 if (bd_len > MODE_BLK_DESC_LENGTH) { 3590 /* 3591 * We did not get back the expected block descriptor length so 3592 * we cannot check the mode page. 3593 */ 3594 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3595 "sd_check_for_writable_cd: Mode Sense returned " 3596 "invalid block descriptor length\n"); 3597 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3598 return; 3599 } 3600 3601 /* 3602 * If the device presents DVD or CD capabilities in the mode 3603 * page, we can return here since a RRD device will not have 3604 * these capabilities. 3605 */ 3606 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3607 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3608 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3609 return; 3610 } 3611 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3612 3613 /* 3614 * If un->un_f_mmc_writable_media is still FALSE, 3615 * check for RRD type media. A RRD device is identified 3616 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3617 */ 3618 mutex_exit(SD_MUTEX(un)); 3619 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3620 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3621 3622 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3623 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3624 RANDOM_WRITABLE, path_flag); 3625 3626 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3627 if (rtn != 0) { 3628 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3629 kmem_free(rqbuf_rw, SENSE_LENGTH); 3630 mutex_enter(SD_MUTEX(un)); 3631 return; 3632 } 3633 3634 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3635 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3636 3637 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3638 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3639 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3640 3641 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3642 mutex_enter(SD_MUTEX(un)); 3643 if (rtn == 0) { 3644 /* 3645 * We have good information, check for random writable 3646 * and hardware defect features as current. 3647 */ 3648 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3649 (out_data_rw[10] & 0x1) && 3650 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3651 (out_data_hd[10] & 0x1)) { 3652 un->un_f_mmc_writable_media = TRUE; 3653 } 3654 } 3655 3656 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3657 kmem_free(rqbuf_rw, SENSE_LENGTH); 3658 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3659 kmem_free(rqbuf_hd, SENSE_LENGTH); 3660 } 3661 3662 /* 3663 * Function: sd_read_unit_properties 3664 * 3665 * Description: The following implements a property lookup mechanism. 3666 * Properties for particular disks (keyed on vendor, model 3667 * and rev numbers) are sought in the sd.conf file via 3668 * sd_process_sdconf_file(), and if not found there, are 3669 * looked for in a list hardcoded in this driver via 3670 * sd_process_sdconf_table() Once located the properties 3671 * are used to update the driver unit structure. 3672 * 3673 * Arguments: un - driver soft state (unit) structure 3674 */ 3675 3676 static void 3677 sd_read_unit_properties(struct sd_lun *un) 3678 { 3679 /* 3680 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3681 * the "sd-config-list" property (from the sd.conf file) or if 3682 * there was not a match for the inquiry vid/pid. If this event 3683 * occurs the static driver configuration table is searched for 3684 * a match. 3685 */ 3686 ASSERT(un != NULL); 3687 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3688 sd_process_sdconf_table(un); 3689 } 3690 3691 /* check for LSI device */ 3692 sd_is_lsi(un); 3693 3694 3695 } 3696 3697 3698 /* 3699 * Function: sd_process_sdconf_file 3700 * 3701 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3702 * driver's config file (ie, sd.conf) and update the driver 3703 * soft state structure accordingly. 3704 * 3705 * Arguments: un - driver soft state (unit) structure 3706 * 3707 * Return Code: SD_SUCCESS - The properties were successfully set according 3708 * to the driver configuration file. 3709 * SD_FAILURE - The driver config list was not obtained or 3710 * there was no vid/pid match. This indicates that 3711 * the static config table should be used. 3712 * 3713 * The config file has a property, "sd-config-list". Currently we support 3714 * two kinds of formats. For both formats, the value of this property 3715 * is a list of duplets: 3716 * 3717 * sd-config-list= 3718 * <duplet>, 3719 * [,<duplet>]*; 3720 * 3721 * For the improved format, where 3722 * 3723 * <duplet>:= "<vid+pid>","<tunable-list>" 3724 * 3725 * and 3726 * 3727 * <tunable-list>:= <tunable> [, <tunable> ]*; 3728 * <tunable> = <name> : <value> 3729 * 3730 * The <vid+pid> is the string that is returned by the target device on a 3731 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3732 * to apply to all target devices with the specified <vid+pid>. 3733 * 3734 * Each <tunable> is a "<name> : <value>" pair. 3735 * 3736 * For the old format, the structure of each duplet is as follows: 3737 * 3738 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3739 * 3740 * The first entry of the duplet is the device ID string (the concatenated 3741 * vid & pid; not to be confused with a device_id). This is defined in 3742 * the same way as in the sd_disk_table. 3743 * 3744 * The second part of the duplet is a string that identifies a 3745 * data-property-name-list. The data-property-name-list is defined as 3746 * follows: 3747 * 3748 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3749 * 3750 * The syntax of <data-property-name> depends on the <version> field. 3751 * 3752 * If version = SD_CONF_VERSION_1 we have the following syntax: 3753 * 3754 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3755 * 3756 * where the prop0 value will be used to set prop0 if bit0 set in the 3757 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3758 * 3759 */ 3760 3761 static int 3762 sd_process_sdconf_file(struct sd_lun *un) 3763 { 3764 char **config_list = NULL; 3765 uint_t nelements; 3766 char *vidptr; 3767 int vidlen; 3768 char *dnlist_ptr; 3769 char *dataname_ptr; 3770 char *dataname_lasts; 3771 int *data_list = NULL; 3772 uint_t data_list_len; 3773 int rval = SD_FAILURE; 3774 int i; 3775 3776 ASSERT(un != NULL); 3777 3778 /* Obtain the configuration list associated with the .conf file */ 3779 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3780 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3781 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3782 return (SD_FAILURE); 3783 } 3784 3785 /* 3786 * Compare vids in each duplet to the inquiry vid - if a match is 3787 * made, get the data value and update the soft state structure 3788 * accordingly. 3789 * 3790 * Each duplet should show as a pair of strings, return SD_FAILURE 3791 * otherwise. 3792 */ 3793 if (nelements & 1) { 3794 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3795 "sd-config-list should show as pairs of strings.\n"); 3796 if (config_list) 3797 ddi_prop_free(config_list); 3798 return (SD_FAILURE); 3799 } 3800 3801 for (i = 0; i < nelements; i += 2) { 3802 /* 3803 * Note: The assumption here is that each vid entry is on 3804 * a unique line from its associated duplet. 3805 */ 3806 vidptr = config_list[i]; 3807 vidlen = (int)strlen(vidptr); 3808 if ((vidlen == 0) || 3809 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3810 continue; 3811 } 3812 3813 /* 3814 * dnlist contains 1 or more blank separated 3815 * data-property-name entries 3816 */ 3817 dnlist_ptr = config_list[i + 1]; 3818 3819 if (strchr(dnlist_ptr, ':') != NULL) { 3820 /* 3821 * Decode the improved format sd-config-list. 3822 */ 3823 sd_nvpair_str_decode(un, dnlist_ptr); 3824 } else { 3825 /* 3826 * The old format sd-config-list, loop through all 3827 * data-property-name entries in the 3828 * data-property-name-list 3829 * setting the properties for each. 3830 */ 3831 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3832 &dataname_lasts); dataname_ptr != NULL; 3833 dataname_ptr = sd_strtok_r(NULL, " \t", 3834 &dataname_lasts)) { 3835 int version; 3836 3837 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3838 "sd_process_sdconf_file: disk:%s, " 3839 "data:%s\n", vidptr, dataname_ptr); 3840 3841 /* Get the data list */ 3842 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3843 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3844 &data_list_len) != DDI_PROP_SUCCESS) { 3845 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3846 "sd_process_sdconf_file: data " 3847 "property (%s) has no value\n", 3848 dataname_ptr); 3849 continue; 3850 } 3851 3852 version = data_list[0]; 3853 3854 if (version == SD_CONF_VERSION_1) { 3855 sd_tunables values; 3856 3857 /* Set the properties */ 3858 if (sd_chk_vers1_data(un, data_list[1], 3859 &data_list[2], data_list_len, 3860 dataname_ptr) == SD_SUCCESS) { 3861 sd_get_tunables_from_conf(un, 3862 data_list[1], &data_list[2], 3863 &values); 3864 sd_set_vers1_properties(un, 3865 data_list[1], &values); 3866 rval = SD_SUCCESS; 3867 } else { 3868 rval = SD_FAILURE; 3869 } 3870 } else { 3871 scsi_log(SD_DEVINFO(un), sd_label, 3872 CE_WARN, "data property %s version " 3873 "0x%x is invalid.", 3874 dataname_ptr, version); 3875 rval = SD_FAILURE; 3876 } 3877 if (data_list) 3878 ddi_prop_free(data_list); 3879 } 3880 } 3881 } 3882 3883 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3884 if (config_list) { 3885 ddi_prop_free(config_list); 3886 } 3887 3888 return (rval); 3889 } 3890 3891 /* 3892 * Function: sd_nvpair_str_decode() 3893 * 3894 * Description: Parse the improved format sd-config-list to get 3895 * each entry of tunable, which includes a name-value pair. 3896 * Then call sd_set_properties() to set the property. 3897 * 3898 * Arguments: un - driver soft state (unit) structure 3899 * nvpair_str - the tunable list 3900 */ 3901 static void 3902 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3903 { 3904 char *nv, *name, *value, *token; 3905 char *nv_lasts, *v_lasts, *x_lasts; 3906 3907 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3908 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3909 token = sd_strtok_r(nv, ":", &v_lasts); 3910 name = sd_strtok_r(token, " \t", &x_lasts); 3911 token = sd_strtok_r(NULL, ":", &v_lasts); 3912 value = sd_strtok_r(token, " \t", &x_lasts); 3913 if (name == NULL || value == NULL) { 3914 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3915 "sd_nvpair_str_decode: " 3916 "name or value is not valid!\n"); 3917 } else { 3918 sd_set_properties(un, name, value); 3919 } 3920 } 3921 } 3922 3923 /* 3924 * Function: sd_strtok_r() 3925 * 3926 * Description: This function uses strpbrk and strspn to break 3927 * string into tokens on sequentially subsequent calls. Return 3928 * NULL when no non-separator characters remain. The first 3929 * argument is NULL for subsequent calls. 3930 */ 3931 static char * 3932 sd_strtok_r(char *string, const char *sepset, char **lasts) 3933 { 3934 char *q, *r; 3935 3936 /* First or subsequent call */ 3937 if (string == NULL) 3938 string = *lasts; 3939 3940 if (string == NULL) 3941 return (NULL); 3942 3943 /* Skip leading separators */ 3944 q = string + strspn(string, sepset); 3945 3946 if (*q == '\0') 3947 return (NULL); 3948 3949 if ((r = strpbrk(q, sepset)) == NULL) 3950 *lasts = NULL; 3951 else { 3952 *r = '\0'; 3953 *lasts = r + 1; 3954 } 3955 return (q); 3956 } 3957 3958 /* 3959 * Function: sd_set_properties() 3960 * 3961 * Description: Set device properties based on the improved 3962 * format sd-config-list. 3963 * 3964 * Arguments: un - driver soft state (unit) structure 3965 * name - supported tunable name 3966 * value - tunable value 3967 */ 3968 static void 3969 sd_set_properties(struct sd_lun *un, char *name, char *value) 3970 { 3971 char *endptr = NULL; 3972 long val = 0; 3973 3974 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3975 if (strcasecmp(value, "true") == 0) { 3976 un->un_f_suppress_cache_flush = TRUE; 3977 } else if (strcasecmp(value, "false") == 0) { 3978 un->un_f_suppress_cache_flush = FALSE; 3979 } else { 3980 goto value_invalid; 3981 } 3982 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3983 "suppress_cache_flush flag set to %d\n", 3984 un->un_f_suppress_cache_flush); 3985 return; 3986 } 3987 3988 if (strcasecmp(name, "controller-type") == 0) { 3989 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3990 un->un_ctype = val; 3991 } else { 3992 goto value_invalid; 3993 } 3994 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3995 "ctype set to %d\n", un->un_ctype); 3996 return; 3997 } 3998 3999 if (strcasecmp(name, "delay-busy") == 0) { 4000 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4001 un->un_busy_timeout = drv_usectohz(val / 1000); 4002 } else { 4003 goto value_invalid; 4004 } 4005 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4006 "busy_timeout set to %d\n", un->un_busy_timeout); 4007 return; 4008 } 4009 4010 if (strcasecmp(name, "disksort") == 0) { 4011 if (strcasecmp(value, "true") == 0) { 4012 un->un_f_disksort_disabled = FALSE; 4013 } else if (strcasecmp(value, "false") == 0) { 4014 un->un_f_disksort_disabled = TRUE; 4015 } else { 4016 goto value_invalid; 4017 } 4018 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4019 "disksort disabled flag set to %d\n", 4020 un->un_f_disksort_disabled); 4021 return; 4022 } 4023 4024 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4025 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4026 un->un_reserve_release_time = val; 4027 } else { 4028 goto value_invalid; 4029 } 4030 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4031 "reservation release timeout set to %d\n", 4032 un->un_reserve_release_time); 4033 return; 4034 } 4035 4036 if (strcasecmp(name, "reset-lun") == 0) { 4037 if (strcasecmp(value, "true") == 0) { 4038 un->un_f_lun_reset_enabled = TRUE; 4039 } else if (strcasecmp(value, "false") == 0) { 4040 un->un_f_lun_reset_enabled = FALSE; 4041 } else { 4042 goto value_invalid; 4043 } 4044 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4045 "lun reset enabled flag set to %d\n", 4046 un->un_f_lun_reset_enabled); 4047 return; 4048 } 4049 4050 if (strcasecmp(name, "retries-busy") == 0) { 4051 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4052 un->un_busy_retry_count = val; 4053 } else { 4054 goto value_invalid; 4055 } 4056 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4057 "busy retry count set to %d\n", un->un_busy_retry_count); 4058 return; 4059 } 4060 4061 if (strcasecmp(name, "retries-timeout") == 0) { 4062 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4063 un->un_retry_count = val; 4064 } else { 4065 goto value_invalid; 4066 } 4067 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4068 "timeout retry count set to %d\n", un->un_retry_count); 4069 return; 4070 } 4071 4072 if (strcasecmp(name, "retries-notready") == 0) { 4073 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4074 un->un_notready_retry_count = val; 4075 } else { 4076 goto value_invalid; 4077 } 4078 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4079 "notready retry count set to %d\n", 4080 un->un_notready_retry_count); 4081 return; 4082 } 4083 4084 if (strcasecmp(name, "retries-reset") == 0) { 4085 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4086 un->un_reset_retry_count = val; 4087 } else { 4088 goto value_invalid; 4089 } 4090 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4091 "reset retry count set to %d\n", 4092 un->un_reset_retry_count); 4093 return; 4094 } 4095 4096 if (strcasecmp(name, "throttle-max") == 0) { 4097 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4098 un->un_saved_throttle = un->un_throttle = val; 4099 } else { 4100 goto value_invalid; 4101 } 4102 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4103 "throttle set to %d\n", un->un_throttle); 4104 } 4105 4106 if (strcasecmp(name, "throttle-min") == 0) { 4107 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4108 un->un_min_throttle = val; 4109 } else { 4110 goto value_invalid; 4111 } 4112 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4113 "min throttle set to %d\n", un->un_min_throttle); 4114 } 4115 4116 if (strcasecmp(name, "rmw-type") == 0) { 4117 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4118 un->un_f_rmw_type = val; 4119 } else { 4120 goto value_invalid; 4121 } 4122 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4123 "RMW type set to %d\n", un->un_f_rmw_type); 4124 } 4125 4126 /* 4127 * Validate the throttle values. 4128 * If any of the numbers are invalid, set everything to defaults. 4129 */ 4130 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4131 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4132 (un->un_min_throttle > un->un_throttle)) { 4133 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4134 un->un_min_throttle = sd_min_throttle; 4135 } 4136 return; 4137 4138 value_invalid: 4139 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4140 "value of prop %s is invalid\n", name); 4141 } 4142 4143 /* 4144 * Function: sd_get_tunables_from_conf() 4145 * 4146 * 4147 * This function reads the data list from the sd.conf file and pulls 4148 * the values that can have numeric values as arguments and places 4149 * the values in the appropriate sd_tunables member. 4150 * Since the order of the data list members varies across platforms 4151 * This function reads them from the data list in a platform specific 4152 * order and places them into the correct sd_tunable member that is 4153 * consistent across all platforms. 4154 */ 4155 static void 4156 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4157 sd_tunables *values) 4158 { 4159 int i; 4160 int mask; 4161 4162 bzero(values, sizeof (sd_tunables)); 4163 4164 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4165 4166 mask = 1 << i; 4167 if (mask > flags) { 4168 break; 4169 } 4170 4171 switch (mask & flags) { 4172 case 0: /* This mask bit not set in flags */ 4173 continue; 4174 case SD_CONF_BSET_THROTTLE: 4175 values->sdt_throttle = data_list[i]; 4176 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4177 "sd_get_tunables_from_conf: throttle = %d\n", 4178 values->sdt_throttle); 4179 break; 4180 case SD_CONF_BSET_CTYPE: 4181 values->sdt_ctype = data_list[i]; 4182 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4183 "sd_get_tunables_from_conf: ctype = %d\n", 4184 values->sdt_ctype); 4185 break; 4186 case SD_CONF_BSET_NRR_COUNT: 4187 values->sdt_not_rdy_retries = data_list[i]; 4188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4189 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4190 values->sdt_not_rdy_retries); 4191 break; 4192 case SD_CONF_BSET_BSY_RETRY_COUNT: 4193 values->sdt_busy_retries = data_list[i]; 4194 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4195 "sd_get_tunables_from_conf: busy_retries = %d\n", 4196 values->sdt_busy_retries); 4197 break; 4198 case SD_CONF_BSET_RST_RETRIES: 4199 values->sdt_reset_retries = data_list[i]; 4200 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4201 "sd_get_tunables_from_conf: reset_retries = %d\n", 4202 values->sdt_reset_retries); 4203 break; 4204 case SD_CONF_BSET_RSV_REL_TIME: 4205 values->sdt_reserv_rel_time = data_list[i]; 4206 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4207 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4208 values->sdt_reserv_rel_time); 4209 break; 4210 case SD_CONF_BSET_MIN_THROTTLE: 4211 values->sdt_min_throttle = data_list[i]; 4212 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4213 "sd_get_tunables_from_conf: min_throttle = %d\n", 4214 values->sdt_min_throttle); 4215 break; 4216 case SD_CONF_BSET_DISKSORT_DISABLED: 4217 values->sdt_disk_sort_dis = data_list[i]; 4218 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4219 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4220 values->sdt_disk_sort_dis); 4221 break; 4222 case SD_CONF_BSET_LUN_RESET_ENABLED: 4223 values->sdt_lun_reset_enable = data_list[i]; 4224 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4225 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4226 "\n", values->sdt_lun_reset_enable); 4227 break; 4228 case SD_CONF_BSET_CACHE_IS_NV: 4229 values->sdt_suppress_cache_flush = data_list[i]; 4230 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4231 "sd_get_tunables_from_conf: \ 4232 suppress_cache_flush = %d" 4233 "\n", values->sdt_suppress_cache_flush); 4234 break; 4235 } 4236 } 4237 } 4238 4239 /* 4240 * Function: sd_process_sdconf_table 4241 * 4242 * Description: Search the static configuration table for a match on the 4243 * inquiry vid/pid and update the driver soft state structure 4244 * according to the table property values for the device. 4245 * 4246 * The form of a configuration table entry is: 4247 * <vid+pid>,<flags>,<property-data> 4248 * "SEAGATE ST42400N",1,0x40000, 4249 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4250 * 4251 * Arguments: un - driver soft state (unit) structure 4252 */ 4253 4254 static void 4255 sd_process_sdconf_table(struct sd_lun *un) 4256 { 4257 char *id = NULL; 4258 int table_index; 4259 int idlen; 4260 4261 ASSERT(un != NULL); 4262 for (table_index = 0; table_index < sd_disk_table_size; 4263 table_index++) { 4264 id = sd_disk_table[table_index].device_id; 4265 idlen = strlen(id); 4266 if (idlen == 0) { 4267 continue; 4268 } 4269 4270 /* 4271 * The static configuration table currently does not 4272 * implement version 10 properties. Additionally, 4273 * multiple data-property-name entries are not 4274 * implemented in the static configuration table. 4275 */ 4276 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4277 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4278 "sd_process_sdconf_table: disk %s\n", id); 4279 sd_set_vers1_properties(un, 4280 sd_disk_table[table_index].flags, 4281 sd_disk_table[table_index].properties); 4282 break; 4283 } 4284 } 4285 } 4286 4287 4288 /* 4289 * Function: sd_sdconf_id_match 4290 * 4291 * Description: This local function implements a case sensitive vid/pid 4292 * comparison as well as the boundary cases of wild card and 4293 * multiple blanks. 4294 * 4295 * Note: An implicit assumption made here is that the scsi 4296 * inquiry structure will always keep the vid, pid and 4297 * revision strings in consecutive sequence, so they can be 4298 * read as a single string. If this assumption is not the 4299 * case, a separate string, to be used for the check, needs 4300 * to be built with these strings concatenated. 4301 * 4302 * Arguments: un - driver soft state (unit) structure 4303 * id - table or config file vid/pid 4304 * idlen - length of the vid/pid (bytes) 4305 * 4306 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4307 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4308 */ 4309 4310 static int 4311 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4312 { 4313 struct scsi_inquiry *sd_inq; 4314 int rval = SD_SUCCESS; 4315 4316 ASSERT(un != NULL); 4317 sd_inq = un->un_sd->sd_inq; 4318 ASSERT(id != NULL); 4319 4320 /* 4321 * We use the inq_vid as a pointer to a buffer containing the 4322 * vid and pid and use the entire vid/pid length of the table 4323 * entry for the comparison. This works because the inq_pid 4324 * data member follows inq_vid in the scsi_inquiry structure. 4325 */ 4326 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4327 /* 4328 * The user id string is compared to the inquiry vid/pid 4329 * using a case insensitive comparison and ignoring 4330 * multiple spaces. 4331 */ 4332 rval = sd_blank_cmp(un, id, idlen); 4333 if (rval != SD_SUCCESS) { 4334 /* 4335 * User id strings that start and end with a "*" 4336 * are a special case. These do not have a 4337 * specific vendor, and the product string can 4338 * appear anywhere in the 16 byte PID portion of 4339 * the inquiry data. This is a simple strstr() 4340 * type search for the user id in the inquiry data. 4341 */ 4342 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4343 char *pidptr = &id[1]; 4344 int i; 4345 int j; 4346 int pidstrlen = idlen - 2; 4347 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4348 pidstrlen; 4349 4350 if (j < 0) { 4351 return (SD_FAILURE); 4352 } 4353 for (i = 0; i < j; i++) { 4354 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4355 pidptr, pidstrlen) == 0) { 4356 rval = SD_SUCCESS; 4357 break; 4358 } 4359 } 4360 } 4361 } 4362 } 4363 return (rval); 4364 } 4365 4366 4367 /* 4368 * Function: sd_blank_cmp 4369 * 4370 * Description: If the id string starts and ends with a space, treat 4371 * multiple consecutive spaces as equivalent to a single 4372 * space. For example, this causes a sd_disk_table entry 4373 * of " NEC CDROM " to match a device's id string of 4374 * "NEC CDROM". 4375 * 4376 * Note: The success exit condition for this routine is if 4377 * the pointer to the table entry is '\0' and the cnt of 4378 * the inquiry length is zero. This will happen if the inquiry 4379 * string returned by the device is padded with spaces to be 4380 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4381 * SCSI spec states that the inquiry string is to be padded with 4382 * spaces. 4383 * 4384 * Arguments: un - driver soft state (unit) structure 4385 * id - table or config file vid/pid 4386 * idlen - length of the vid/pid (bytes) 4387 * 4388 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4389 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4390 */ 4391 4392 static int 4393 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4394 { 4395 char *p1; 4396 char *p2; 4397 int cnt; 4398 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4399 sizeof (SD_INQUIRY(un)->inq_pid); 4400 4401 ASSERT(un != NULL); 4402 p2 = un->un_sd->sd_inq->inq_vid; 4403 ASSERT(id != NULL); 4404 p1 = id; 4405 4406 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4407 /* 4408 * Note: string p1 is terminated by a NUL but string p2 4409 * isn't. The end of p2 is determined by cnt. 4410 */ 4411 for (;;) { 4412 /* skip over any extra blanks in both strings */ 4413 while ((*p1 != '\0') && (*p1 == ' ')) { 4414 p1++; 4415 } 4416 while ((cnt != 0) && (*p2 == ' ')) { 4417 p2++; 4418 cnt--; 4419 } 4420 4421 /* compare the two strings */ 4422 if ((cnt == 0) || 4423 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4424 break; 4425 } 4426 while ((cnt > 0) && 4427 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4428 p1++; 4429 p2++; 4430 cnt--; 4431 } 4432 } 4433 } 4434 4435 /* return SD_SUCCESS if both strings match */ 4436 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4437 } 4438 4439 4440 /* 4441 * Function: sd_chk_vers1_data 4442 * 4443 * Description: Verify the version 1 device properties provided by the 4444 * user via the configuration file 4445 * 4446 * Arguments: un - driver soft state (unit) structure 4447 * flags - integer mask indicating properties to be set 4448 * prop_list - integer list of property values 4449 * list_len - number of the elements 4450 * 4451 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4452 * SD_FAILURE - Indicates the user provided data is invalid 4453 */ 4454 4455 static int 4456 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4457 int list_len, char *dataname_ptr) 4458 { 4459 int i; 4460 int mask = 1; 4461 int index = 0; 4462 4463 ASSERT(un != NULL); 4464 4465 /* Check for a NULL property name and list */ 4466 if (dataname_ptr == NULL) { 4467 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4468 "sd_chk_vers1_data: NULL data property name."); 4469 return (SD_FAILURE); 4470 } 4471 if (prop_list == NULL) { 4472 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4473 "sd_chk_vers1_data: %s NULL data property list.", 4474 dataname_ptr); 4475 return (SD_FAILURE); 4476 } 4477 4478 /* Display a warning if undefined bits are set in the flags */ 4479 if (flags & ~SD_CONF_BIT_MASK) { 4480 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4481 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4482 "Properties not set.", 4483 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4484 return (SD_FAILURE); 4485 } 4486 4487 /* 4488 * Verify the length of the list by identifying the highest bit set 4489 * in the flags and validating that the property list has a length 4490 * up to the index of this bit. 4491 */ 4492 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4493 if (flags & mask) { 4494 index++; 4495 } 4496 mask = 1 << i; 4497 } 4498 if (list_len < (index + 2)) { 4499 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4500 "sd_chk_vers1_data: " 4501 "Data property list %s size is incorrect. " 4502 "Properties not set.", dataname_ptr); 4503 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4504 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4505 return (SD_FAILURE); 4506 } 4507 return (SD_SUCCESS); 4508 } 4509 4510 4511 /* 4512 * Function: sd_set_vers1_properties 4513 * 4514 * Description: Set version 1 device properties based on a property list 4515 * retrieved from the driver configuration file or static 4516 * configuration table. Version 1 properties have the format: 4517 * 4518 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4519 * 4520 * where the prop0 value will be used to set prop0 if bit0 4521 * is set in the flags 4522 * 4523 * Arguments: un - driver soft state (unit) structure 4524 * flags - integer mask indicating properties to be set 4525 * prop_list - integer list of property values 4526 */ 4527 4528 static void 4529 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4530 { 4531 ASSERT(un != NULL); 4532 4533 /* 4534 * Set the flag to indicate cache is to be disabled. An attempt 4535 * to disable the cache via sd_cache_control() will be made 4536 * later during attach once the basic initialization is complete. 4537 */ 4538 if (flags & SD_CONF_BSET_NOCACHE) { 4539 un->un_f_opt_disable_cache = TRUE; 4540 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4541 "sd_set_vers1_properties: caching disabled flag set\n"); 4542 } 4543 4544 /* CD-specific configuration parameters */ 4545 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4546 un->un_f_cfg_playmsf_bcd = TRUE; 4547 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4548 "sd_set_vers1_properties: playmsf_bcd set\n"); 4549 } 4550 if (flags & SD_CONF_BSET_READSUB_BCD) { 4551 un->un_f_cfg_readsub_bcd = TRUE; 4552 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4553 "sd_set_vers1_properties: readsub_bcd set\n"); 4554 } 4555 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4556 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4557 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4558 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4559 } 4560 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4561 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4562 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4563 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4564 } 4565 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4566 un->un_f_cfg_no_read_header = TRUE; 4567 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4568 "sd_set_vers1_properties: no_read_header set\n"); 4569 } 4570 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4571 un->un_f_cfg_read_cd_xd4 = TRUE; 4572 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4573 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4574 } 4575 4576 /* Support for devices which do not have valid/unique serial numbers */ 4577 if (flags & SD_CONF_BSET_FAB_DEVID) { 4578 un->un_f_opt_fab_devid = TRUE; 4579 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4580 "sd_set_vers1_properties: fab_devid bit set\n"); 4581 } 4582 4583 /* Support for user throttle configuration */ 4584 if (flags & SD_CONF_BSET_THROTTLE) { 4585 ASSERT(prop_list != NULL); 4586 un->un_saved_throttle = un->un_throttle = 4587 prop_list->sdt_throttle; 4588 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4589 "sd_set_vers1_properties: throttle set to %d\n", 4590 prop_list->sdt_throttle); 4591 } 4592 4593 /* Set the per disk retry count according to the conf file or table. */ 4594 if (flags & SD_CONF_BSET_NRR_COUNT) { 4595 ASSERT(prop_list != NULL); 4596 if (prop_list->sdt_not_rdy_retries) { 4597 un->un_notready_retry_count = 4598 prop_list->sdt_not_rdy_retries; 4599 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4600 "sd_set_vers1_properties: not ready retry count" 4601 " set to %d\n", un->un_notready_retry_count); 4602 } 4603 } 4604 4605 /* The controller type is reported for generic disk driver ioctls */ 4606 if (flags & SD_CONF_BSET_CTYPE) { 4607 ASSERT(prop_list != NULL); 4608 switch (prop_list->sdt_ctype) { 4609 case CTYPE_CDROM: 4610 un->un_ctype = prop_list->sdt_ctype; 4611 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4612 "sd_set_vers1_properties: ctype set to " 4613 "CTYPE_CDROM\n"); 4614 break; 4615 case CTYPE_CCS: 4616 un->un_ctype = prop_list->sdt_ctype; 4617 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4618 "sd_set_vers1_properties: ctype set to " 4619 "CTYPE_CCS\n"); 4620 break; 4621 case CTYPE_ROD: /* RW optical */ 4622 un->un_ctype = prop_list->sdt_ctype; 4623 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4624 "sd_set_vers1_properties: ctype set to " 4625 "CTYPE_ROD\n"); 4626 break; 4627 default: 4628 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4629 "sd_set_vers1_properties: Could not set " 4630 "invalid ctype value (%d)", 4631 prop_list->sdt_ctype); 4632 } 4633 } 4634 4635 /* Purple failover timeout */ 4636 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4637 ASSERT(prop_list != NULL); 4638 un->un_busy_retry_count = 4639 prop_list->sdt_busy_retries; 4640 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4641 "sd_set_vers1_properties: " 4642 "busy retry count set to %d\n", 4643 un->un_busy_retry_count); 4644 } 4645 4646 /* Purple reset retry count */ 4647 if (flags & SD_CONF_BSET_RST_RETRIES) { 4648 ASSERT(prop_list != NULL); 4649 un->un_reset_retry_count = 4650 prop_list->sdt_reset_retries; 4651 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4652 "sd_set_vers1_properties: " 4653 "reset retry count set to %d\n", 4654 un->un_reset_retry_count); 4655 } 4656 4657 /* Purple reservation release timeout */ 4658 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4659 ASSERT(prop_list != NULL); 4660 un->un_reserve_release_time = 4661 prop_list->sdt_reserv_rel_time; 4662 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4663 "sd_set_vers1_properties: " 4664 "reservation release timeout set to %d\n", 4665 un->un_reserve_release_time); 4666 } 4667 4668 /* 4669 * Driver flag telling the driver to verify that no commands are pending 4670 * for a device before issuing a Test Unit Ready. This is a workaround 4671 * for a firmware bug in some Seagate eliteI drives. 4672 */ 4673 if (flags & SD_CONF_BSET_TUR_CHECK) { 4674 un->un_f_cfg_tur_check = TRUE; 4675 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4676 "sd_set_vers1_properties: tur queue check set\n"); 4677 } 4678 4679 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4680 un->un_min_throttle = prop_list->sdt_min_throttle; 4681 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4682 "sd_set_vers1_properties: min throttle set to %d\n", 4683 un->un_min_throttle); 4684 } 4685 4686 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4687 un->un_f_disksort_disabled = 4688 (prop_list->sdt_disk_sort_dis != 0) ? 4689 TRUE : FALSE; 4690 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4691 "sd_set_vers1_properties: disksort disabled " 4692 "flag set to %d\n", 4693 prop_list->sdt_disk_sort_dis); 4694 } 4695 4696 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4697 un->un_f_lun_reset_enabled = 4698 (prop_list->sdt_lun_reset_enable != 0) ? 4699 TRUE : FALSE; 4700 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4701 "sd_set_vers1_properties: lun reset enabled " 4702 "flag set to %d\n", 4703 prop_list->sdt_lun_reset_enable); 4704 } 4705 4706 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4707 un->un_f_suppress_cache_flush = 4708 (prop_list->sdt_suppress_cache_flush != 0) ? 4709 TRUE : FALSE; 4710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4711 "sd_set_vers1_properties: suppress_cache_flush " 4712 "flag set to %d\n", 4713 prop_list->sdt_suppress_cache_flush); 4714 } 4715 4716 /* 4717 * Validate the throttle values. 4718 * If any of the numbers are invalid, set everything to defaults. 4719 */ 4720 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4721 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4722 (un->un_min_throttle > un->un_throttle)) { 4723 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4724 un->un_min_throttle = sd_min_throttle; 4725 } 4726 } 4727 4728 /* 4729 * Function: sd_is_lsi() 4730 * 4731 * Description: Check for lsi devices, step through the static device 4732 * table to match vid/pid. 4733 * 4734 * Args: un - ptr to sd_lun 4735 * 4736 * Notes: When creating new LSI property, need to add the new LSI property 4737 * to this function. 4738 */ 4739 static void 4740 sd_is_lsi(struct sd_lun *un) 4741 { 4742 char *id = NULL; 4743 int table_index; 4744 int idlen; 4745 void *prop; 4746 4747 ASSERT(un != NULL); 4748 for (table_index = 0; table_index < sd_disk_table_size; 4749 table_index++) { 4750 id = sd_disk_table[table_index].device_id; 4751 idlen = strlen(id); 4752 if (idlen == 0) { 4753 continue; 4754 } 4755 4756 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4757 prop = sd_disk_table[table_index].properties; 4758 if (prop == &lsi_properties || 4759 prop == &lsi_oem_properties || 4760 prop == &lsi_properties_scsi || 4761 prop == &symbios_properties) { 4762 un->un_f_cfg_is_lsi = TRUE; 4763 } 4764 break; 4765 } 4766 } 4767 } 4768 4769 /* 4770 * Function: sd_get_physical_geometry 4771 * 4772 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4773 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4774 * target, and use this information to initialize the physical 4775 * geometry cache specified by pgeom_p. 4776 * 4777 * MODE SENSE is an optional command, so failure in this case 4778 * does not necessarily denote an error. We want to use the 4779 * MODE SENSE commands to derive the physical geometry of the 4780 * device, but if either command fails, the logical geometry is 4781 * used as the fallback for disk label geometry in cmlb. 4782 * 4783 * This requires that un->un_blockcount and un->un_tgt_blocksize 4784 * have already been initialized for the current target and 4785 * that the current values be passed as args so that we don't 4786 * end up ever trying to use -1 as a valid value. This could 4787 * happen if either value is reset while we're not holding 4788 * the mutex. 4789 * 4790 * Arguments: un - driver soft state (unit) structure 4791 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4792 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4793 * to use the USCSI "direct" chain and bypass the normal 4794 * command waitq. 4795 * 4796 * Context: Kernel thread only (can sleep). 4797 */ 4798 4799 static int 4800 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4801 diskaddr_t capacity, int lbasize, int path_flag) 4802 { 4803 struct mode_format *page3p; 4804 struct mode_geometry *page4p; 4805 struct mode_header *headerp; 4806 int sector_size; 4807 int nsect; 4808 int nhead; 4809 int ncyl; 4810 int intrlv; 4811 int spc; 4812 diskaddr_t modesense_capacity; 4813 int rpm; 4814 int bd_len; 4815 int mode_header_length; 4816 uchar_t *p3bufp; 4817 uchar_t *p4bufp; 4818 int cdbsize; 4819 int ret = EIO; 4820 sd_ssc_t *ssc; 4821 int status; 4822 4823 ASSERT(un != NULL); 4824 4825 if (lbasize == 0) { 4826 if (ISCD(un)) { 4827 lbasize = 2048; 4828 } else { 4829 lbasize = un->un_sys_blocksize; 4830 } 4831 } 4832 pgeom_p->g_secsize = (unsigned short)lbasize; 4833 4834 /* 4835 * If the unit is a cd/dvd drive MODE SENSE page three 4836 * and MODE SENSE page four are reserved (see SBC spec 4837 * and MMC spec). To prevent soft errors just return 4838 * using the default LBA size. 4839 */ 4840 if (ISCD(un)) 4841 return (ret); 4842 4843 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4844 4845 /* 4846 * Retrieve MODE SENSE page 3 - Format Device Page 4847 */ 4848 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4849 ssc = sd_ssc_init(un); 4850 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4851 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4852 if (status != 0) { 4853 SD_ERROR(SD_LOG_COMMON, un, 4854 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4855 goto page3_exit; 4856 } 4857 4858 /* 4859 * Determine size of Block Descriptors in order to locate the mode 4860 * page data. ATAPI devices return 0, SCSI devices should return 4861 * MODE_BLK_DESC_LENGTH. 4862 */ 4863 headerp = (struct mode_header *)p3bufp; 4864 if (un->un_f_cfg_is_atapi == TRUE) { 4865 struct mode_header_grp2 *mhp = 4866 (struct mode_header_grp2 *)headerp; 4867 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4868 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4869 } else { 4870 mode_header_length = MODE_HEADER_LENGTH; 4871 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4872 } 4873 4874 if (bd_len > MODE_BLK_DESC_LENGTH) { 4875 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4876 "sd_get_physical_geometry: received unexpected bd_len " 4877 "of %d, page3\n", bd_len); 4878 status = EIO; 4879 goto page3_exit; 4880 } 4881 4882 page3p = (struct mode_format *) 4883 ((caddr_t)headerp + mode_header_length + bd_len); 4884 4885 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4886 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4887 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4888 "%d\n", page3p->mode_page.code); 4889 status = EIO; 4890 goto page3_exit; 4891 } 4892 4893 /* 4894 * Use this physical geometry data only if BOTH MODE SENSE commands 4895 * complete successfully; otherwise, revert to the logical geometry. 4896 * So, we need to save everything in temporary variables. 4897 */ 4898 sector_size = BE_16(page3p->data_bytes_sect); 4899 4900 /* 4901 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4902 */ 4903 if (sector_size == 0) { 4904 sector_size = un->un_sys_blocksize; 4905 } else { 4906 sector_size &= ~(un->un_sys_blocksize - 1); 4907 } 4908 4909 nsect = BE_16(page3p->sect_track); 4910 intrlv = BE_16(page3p->interleave); 4911 4912 SD_INFO(SD_LOG_COMMON, un, 4913 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4914 SD_INFO(SD_LOG_COMMON, un, 4915 " mode page: %d; nsect: %d; sector size: %d;\n", 4916 page3p->mode_page.code, nsect, sector_size); 4917 SD_INFO(SD_LOG_COMMON, un, 4918 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4919 BE_16(page3p->track_skew), 4920 BE_16(page3p->cylinder_skew)); 4921 4922 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4923 4924 /* 4925 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4926 */ 4927 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4928 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4929 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4930 if (status != 0) { 4931 SD_ERROR(SD_LOG_COMMON, un, 4932 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4933 goto page4_exit; 4934 } 4935 4936 /* 4937 * Determine size of Block Descriptors in order to locate the mode 4938 * page data. ATAPI devices return 0, SCSI devices should return 4939 * MODE_BLK_DESC_LENGTH. 4940 */ 4941 headerp = (struct mode_header *)p4bufp; 4942 if (un->un_f_cfg_is_atapi == TRUE) { 4943 struct mode_header_grp2 *mhp = 4944 (struct mode_header_grp2 *)headerp; 4945 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4946 } else { 4947 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4948 } 4949 4950 if (bd_len > MODE_BLK_DESC_LENGTH) { 4951 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4952 "sd_get_physical_geometry: received unexpected bd_len of " 4953 "%d, page4\n", bd_len); 4954 status = EIO; 4955 goto page4_exit; 4956 } 4957 4958 page4p = (struct mode_geometry *) 4959 ((caddr_t)headerp + mode_header_length + bd_len); 4960 4961 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4962 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4963 "sd_get_physical_geometry: mode sense pg4 code mismatch " 4964 "%d\n", page4p->mode_page.code); 4965 status = EIO; 4966 goto page4_exit; 4967 } 4968 4969 /* 4970 * Stash the data now, after we know that both commands completed. 4971 */ 4972 4973 4974 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4975 spc = nhead * nsect; 4976 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4977 rpm = BE_16(page4p->rpm); 4978 4979 modesense_capacity = spc * ncyl; 4980 4981 SD_INFO(SD_LOG_COMMON, un, 4982 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4983 SD_INFO(SD_LOG_COMMON, un, 4984 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4985 SD_INFO(SD_LOG_COMMON, un, 4986 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4987 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4988 (void *)pgeom_p, capacity); 4989 4990 /* 4991 * Compensate if the drive's geometry is not rectangular, i.e., 4992 * the product of C * H * S returned by MODE SENSE >= that returned 4993 * by read capacity. This is an idiosyncrasy of the original x86 4994 * disk subsystem. 4995 */ 4996 if (modesense_capacity >= capacity) { 4997 SD_INFO(SD_LOG_COMMON, un, 4998 "sd_get_physical_geometry: adjusting acyl; " 4999 "old: %d; new: %d\n", pgeom_p->g_acyl, 5000 (modesense_capacity - capacity + spc - 1) / spc); 5001 if (sector_size != 0) { 5002 /* 1243403: NEC D38x7 drives don't support sec size */ 5003 pgeom_p->g_secsize = (unsigned short)sector_size; 5004 } 5005 pgeom_p->g_nsect = (unsigned short)nsect; 5006 pgeom_p->g_nhead = (unsigned short)nhead; 5007 pgeom_p->g_capacity = capacity; 5008 pgeom_p->g_acyl = 5009 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5010 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5011 } 5012 5013 pgeom_p->g_rpm = (unsigned short)rpm; 5014 pgeom_p->g_intrlv = (unsigned short)intrlv; 5015 ret = 0; 5016 5017 SD_INFO(SD_LOG_COMMON, un, 5018 "sd_get_physical_geometry: mode sense geometry:\n"); 5019 SD_INFO(SD_LOG_COMMON, un, 5020 " nsect: %d; sector size: %d; interlv: %d\n", 5021 nsect, sector_size, intrlv); 5022 SD_INFO(SD_LOG_COMMON, un, 5023 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5024 nhead, ncyl, rpm, modesense_capacity); 5025 SD_INFO(SD_LOG_COMMON, un, 5026 "sd_get_physical_geometry: (cached)\n"); 5027 SD_INFO(SD_LOG_COMMON, un, 5028 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5029 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5030 pgeom_p->g_nhead, pgeom_p->g_nsect); 5031 SD_INFO(SD_LOG_COMMON, un, 5032 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5033 pgeom_p->g_secsize, pgeom_p->g_capacity, 5034 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5035 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5036 5037 page4_exit: 5038 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5039 5040 page3_exit: 5041 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5042 5043 if (status != 0) { 5044 if (status == EIO) { 5045 /* 5046 * Some disks do not support mode sense(6), we 5047 * should ignore this kind of error(sense key is 5048 * 0x5 - illegal request). 5049 */ 5050 uint8_t *sensep; 5051 int senlen; 5052 5053 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5054 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5055 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5056 5057 if (senlen > 0 && 5058 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5059 sd_ssc_assessment(ssc, 5060 SD_FMT_IGNORE_COMPROMISE); 5061 } else { 5062 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5063 } 5064 } else { 5065 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5066 } 5067 } 5068 sd_ssc_fini(ssc); 5069 return (ret); 5070 } 5071 5072 /* 5073 * Function: sd_get_virtual_geometry 5074 * 5075 * Description: Ask the controller to tell us about the target device. 5076 * 5077 * Arguments: un - pointer to softstate 5078 * capacity - disk capacity in #blocks 5079 * lbasize - disk block size in bytes 5080 * 5081 * Context: Kernel thread only 5082 */ 5083 5084 static int 5085 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5086 diskaddr_t capacity, int lbasize) 5087 { 5088 uint_t geombuf; 5089 int spc; 5090 5091 ASSERT(un != NULL); 5092 5093 /* Set sector size, and total number of sectors */ 5094 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5095 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5096 5097 /* Let the HBA tell us its geometry */ 5098 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5099 5100 /* A value of -1 indicates an undefined "geometry" property */ 5101 if (geombuf == (-1)) { 5102 return (EINVAL); 5103 } 5104 5105 /* Initialize the logical geometry cache. */ 5106 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5107 lgeom_p->g_nsect = geombuf & 0xffff; 5108 lgeom_p->g_secsize = un->un_sys_blocksize; 5109 5110 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5111 5112 /* 5113 * Note: The driver originally converted the capacity value from 5114 * target blocks to system blocks. However, the capacity value passed 5115 * to this routine is already in terms of system blocks (this scaling 5116 * is done when the READ CAPACITY command is issued and processed). 5117 * This 'error' may have gone undetected because the usage of g_ncyl 5118 * (which is based upon g_capacity) is very limited within the driver 5119 */ 5120 lgeom_p->g_capacity = capacity; 5121 5122 /* 5123 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5124 * hba may return zero values if the device has been removed. 5125 */ 5126 if (spc == 0) { 5127 lgeom_p->g_ncyl = 0; 5128 } else { 5129 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5130 } 5131 lgeom_p->g_acyl = 0; 5132 5133 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5134 return (0); 5135 5136 } 5137 /* 5138 * Function: sd_update_block_info 5139 * 5140 * Description: Calculate a byte count to sector count bitshift value 5141 * from sector size. 5142 * 5143 * Arguments: un: unit struct. 5144 * lbasize: new target sector size 5145 * capacity: new target capacity, ie. block count 5146 * 5147 * Context: Kernel thread context 5148 */ 5149 5150 static void 5151 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5152 { 5153 if (lbasize != 0) { 5154 un->un_tgt_blocksize = lbasize; 5155 un->un_f_tgt_blocksize_is_valid = TRUE; 5156 if (!un->un_f_has_removable_media) { 5157 un->un_sys_blocksize = lbasize; 5158 } 5159 } 5160 5161 if (capacity != 0) { 5162 un->un_blockcount = capacity; 5163 un->un_f_blockcount_is_valid = TRUE; 5164 } 5165 } 5166 5167 5168 /* 5169 * Function: sd_register_devid 5170 * 5171 * Description: This routine will obtain the device id information from the 5172 * target, obtain the serial number, and register the device 5173 * id with the ddi framework. 5174 * 5175 * Arguments: devi - the system's dev_info_t for the device. 5176 * un - driver soft state (unit) structure 5177 * reservation_flag - indicates if a reservation conflict 5178 * occurred during attach 5179 * 5180 * Context: Kernel Thread 5181 */ 5182 static void 5183 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5184 { 5185 int rval = 0; 5186 uchar_t *inq80 = NULL; 5187 size_t inq80_len = MAX_INQUIRY_SIZE; 5188 size_t inq80_resid = 0; 5189 uchar_t *inq83 = NULL; 5190 size_t inq83_len = MAX_INQUIRY_SIZE; 5191 size_t inq83_resid = 0; 5192 int dlen, len; 5193 char *sn; 5194 struct sd_lun *un; 5195 5196 ASSERT(ssc != NULL); 5197 un = ssc->ssc_un; 5198 ASSERT(un != NULL); 5199 ASSERT(mutex_owned(SD_MUTEX(un))); 5200 ASSERT((SD_DEVINFO(un)) == devi); 5201 5202 5203 /* 5204 * We check the availability of the World Wide Name (0x83) and Unit 5205 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5206 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5207 * 0x83 is available, that is the best choice. Our next choice is 5208 * 0x80. If neither are available, we munge the devid from the device 5209 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5210 * to fabricate a devid for non-Sun qualified disks. 5211 */ 5212 if (sd_check_vpd_page_support(ssc) == 0) { 5213 /* collect page 80 data if available */ 5214 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5215 5216 mutex_exit(SD_MUTEX(un)); 5217 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5218 5219 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5220 0x01, 0x80, &inq80_resid); 5221 5222 if (rval != 0) { 5223 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5224 kmem_free(inq80, inq80_len); 5225 inq80 = NULL; 5226 inq80_len = 0; 5227 } else if (ddi_prop_exists( 5228 DDI_DEV_T_NONE, SD_DEVINFO(un), 5229 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5230 INQUIRY_SERIAL_NO) == 0) { 5231 /* 5232 * If we don't already have a serial number 5233 * property, do quick verify of data returned 5234 * and define property. 5235 */ 5236 dlen = inq80_len - inq80_resid; 5237 len = (size_t)inq80[3]; 5238 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5239 /* 5240 * Ensure sn termination, skip leading 5241 * blanks, and create property 5242 * 'inquiry-serial-no'. 5243 */ 5244 sn = (char *)&inq80[4]; 5245 sn[len] = 0; 5246 while (*sn && (*sn == ' ')) 5247 sn++; 5248 if (*sn) { 5249 (void) ddi_prop_update_string( 5250 DDI_DEV_T_NONE, 5251 SD_DEVINFO(un), 5252 INQUIRY_SERIAL_NO, sn); 5253 } 5254 } 5255 } 5256 mutex_enter(SD_MUTEX(un)); 5257 } 5258 5259 /* collect page 83 data if available */ 5260 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5261 mutex_exit(SD_MUTEX(un)); 5262 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5263 5264 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5265 0x01, 0x83, &inq83_resid); 5266 5267 if (rval != 0) { 5268 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5269 kmem_free(inq83, inq83_len); 5270 inq83 = NULL; 5271 inq83_len = 0; 5272 } 5273 mutex_enter(SD_MUTEX(un)); 5274 } 5275 } 5276 5277 /* 5278 * If transport has already registered a devid for this target 5279 * then that takes precedence over the driver's determination 5280 * of the devid. 5281 * 5282 * NOTE: The reason this check is done here instead of at the beginning 5283 * of the function is to allow the code above to create the 5284 * 'inquiry-serial-no' property. 5285 */ 5286 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5287 ASSERT(un->un_devid); 5288 un->un_f_devid_transport_defined = TRUE; 5289 goto cleanup; /* use devid registered by the transport */ 5290 } 5291 5292 /* 5293 * This is the case of antiquated Sun disk drives that have the 5294 * FAB_DEVID property set in the disk_table. These drives 5295 * manage the devid's by storing them in last 2 available sectors 5296 * on the drive and have them fabricated by the ddi layer by calling 5297 * ddi_devid_init and passing the DEVID_FAB flag. 5298 */ 5299 if (un->un_f_opt_fab_devid == TRUE) { 5300 /* 5301 * Depending on EINVAL isn't reliable, since a reserved disk 5302 * may result in invalid geometry, so check to make sure a 5303 * reservation conflict did not occur during attach. 5304 */ 5305 if ((sd_get_devid(ssc) == EINVAL) && 5306 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5307 /* 5308 * The devid is invalid AND there is no reservation 5309 * conflict. Fabricate a new devid. 5310 */ 5311 (void) sd_create_devid(ssc); 5312 } 5313 5314 /* Register the devid if it exists */ 5315 if (un->un_devid != NULL) { 5316 (void) ddi_devid_register(SD_DEVINFO(un), 5317 un->un_devid); 5318 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5319 "sd_register_devid: Devid Fabricated\n"); 5320 } 5321 goto cleanup; 5322 } 5323 5324 /* encode best devid possible based on data available */ 5325 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5326 (char *)ddi_driver_name(SD_DEVINFO(un)), 5327 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5328 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5329 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5330 5331 /* devid successfully encoded, register devid */ 5332 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5333 5334 } else { 5335 /* 5336 * Unable to encode a devid based on data available. 5337 * This is not a Sun qualified disk. Older Sun disk 5338 * drives that have the SD_FAB_DEVID property 5339 * set in the disk_table and non Sun qualified 5340 * disks are treated in the same manner. These 5341 * drives manage the devid's by storing them in 5342 * last 2 available sectors on the drive and 5343 * have them fabricated by the ddi layer by 5344 * calling ddi_devid_init and passing the 5345 * DEVID_FAB flag. 5346 * Create a fabricate devid only if there's no 5347 * fabricate devid existed. 5348 */ 5349 if (sd_get_devid(ssc) == EINVAL) { 5350 (void) sd_create_devid(ssc); 5351 } 5352 un->un_f_opt_fab_devid = TRUE; 5353 5354 /* Register the devid if it exists */ 5355 if (un->un_devid != NULL) { 5356 (void) ddi_devid_register(SD_DEVINFO(un), 5357 un->un_devid); 5358 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5359 "sd_register_devid: devid fabricated using " 5360 "ddi framework\n"); 5361 } 5362 } 5363 5364 cleanup: 5365 /* clean up resources */ 5366 if (inq80 != NULL) { 5367 kmem_free(inq80, inq80_len); 5368 } 5369 if (inq83 != NULL) { 5370 kmem_free(inq83, inq83_len); 5371 } 5372 } 5373 5374 5375 5376 /* 5377 * Function: sd_get_devid 5378 * 5379 * Description: This routine will return 0 if a valid device id has been 5380 * obtained from the target and stored in the soft state. If a 5381 * valid device id has not been previously read and stored, a 5382 * read attempt will be made. 5383 * 5384 * Arguments: un - driver soft state (unit) structure 5385 * 5386 * Return Code: 0 if we successfully get the device id 5387 * 5388 * Context: Kernel Thread 5389 */ 5390 5391 static int 5392 sd_get_devid(sd_ssc_t *ssc) 5393 { 5394 struct dk_devid *dkdevid; 5395 ddi_devid_t tmpid; 5396 uint_t *ip; 5397 size_t sz; 5398 diskaddr_t blk; 5399 int status; 5400 int chksum; 5401 int i; 5402 size_t buffer_size; 5403 struct sd_lun *un; 5404 5405 ASSERT(ssc != NULL); 5406 un = ssc->ssc_un; 5407 ASSERT(un != NULL); 5408 ASSERT(mutex_owned(SD_MUTEX(un))); 5409 5410 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5411 un); 5412 5413 if (un->un_devid != NULL) { 5414 return (0); 5415 } 5416 5417 mutex_exit(SD_MUTEX(un)); 5418 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5419 (void *)SD_PATH_DIRECT) != 0) { 5420 mutex_enter(SD_MUTEX(un)); 5421 return (EINVAL); 5422 } 5423 5424 /* 5425 * Read and verify device id, stored in the reserved cylinders at the 5426 * end of the disk. Backup label is on the odd sectors of the last 5427 * track of the last cylinder. Device id will be on track of the next 5428 * to last cylinder. 5429 */ 5430 mutex_enter(SD_MUTEX(un)); 5431 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5432 mutex_exit(SD_MUTEX(un)); 5433 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5434 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5435 SD_PATH_DIRECT); 5436 5437 if (status != 0) { 5438 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5439 goto error; 5440 } 5441 5442 /* Validate the revision */ 5443 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5444 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5445 status = EINVAL; 5446 goto error; 5447 } 5448 5449 /* Calculate the checksum */ 5450 chksum = 0; 5451 ip = (uint_t *)dkdevid; 5452 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5453 i++) { 5454 chksum ^= ip[i]; 5455 } 5456 5457 /* Compare the checksums */ 5458 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5459 status = EINVAL; 5460 goto error; 5461 } 5462 5463 /* Validate the device id */ 5464 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5465 status = EINVAL; 5466 goto error; 5467 } 5468 5469 /* 5470 * Store the device id in the driver soft state 5471 */ 5472 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5473 tmpid = kmem_alloc(sz, KM_SLEEP); 5474 5475 mutex_enter(SD_MUTEX(un)); 5476 5477 un->un_devid = tmpid; 5478 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5479 5480 kmem_free(dkdevid, buffer_size); 5481 5482 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5483 5484 return (status); 5485 error: 5486 mutex_enter(SD_MUTEX(un)); 5487 kmem_free(dkdevid, buffer_size); 5488 return (status); 5489 } 5490 5491 5492 /* 5493 * Function: sd_create_devid 5494 * 5495 * Description: This routine will fabricate the device id and write it 5496 * to the disk. 5497 * 5498 * Arguments: un - driver soft state (unit) structure 5499 * 5500 * Return Code: value of the fabricated device id 5501 * 5502 * Context: Kernel Thread 5503 */ 5504 5505 static ddi_devid_t 5506 sd_create_devid(sd_ssc_t *ssc) 5507 { 5508 struct sd_lun *un; 5509 5510 ASSERT(ssc != NULL); 5511 un = ssc->ssc_un; 5512 ASSERT(un != NULL); 5513 5514 /* Fabricate the devid */ 5515 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5516 == DDI_FAILURE) { 5517 return (NULL); 5518 } 5519 5520 /* Write the devid to disk */ 5521 if (sd_write_deviceid(ssc) != 0) { 5522 ddi_devid_free(un->un_devid); 5523 un->un_devid = NULL; 5524 } 5525 5526 return (un->un_devid); 5527 } 5528 5529 5530 /* 5531 * Function: sd_write_deviceid 5532 * 5533 * Description: This routine will write the device id to the disk 5534 * reserved sector. 5535 * 5536 * Arguments: un - driver soft state (unit) structure 5537 * 5538 * Return Code: EINVAL 5539 * value returned by sd_send_scsi_cmd 5540 * 5541 * Context: Kernel Thread 5542 */ 5543 5544 static int 5545 sd_write_deviceid(sd_ssc_t *ssc) 5546 { 5547 struct dk_devid *dkdevid; 5548 uchar_t *buf; 5549 diskaddr_t blk; 5550 uint_t *ip, chksum; 5551 int status; 5552 int i; 5553 struct sd_lun *un; 5554 5555 ASSERT(ssc != NULL); 5556 un = ssc->ssc_un; 5557 ASSERT(un != NULL); 5558 ASSERT(mutex_owned(SD_MUTEX(un))); 5559 5560 mutex_exit(SD_MUTEX(un)); 5561 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5562 (void *)SD_PATH_DIRECT) != 0) { 5563 mutex_enter(SD_MUTEX(un)); 5564 return (-1); 5565 } 5566 5567 5568 /* Allocate the buffer */ 5569 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5570 dkdevid = (struct dk_devid *)buf; 5571 5572 /* Fill in the revision */ 5573 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5574 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5575 5576 /* Copy in the device id */ 5577 mutex_enter(SD_MUTEX(un)); 5578 bcopy(un->un_devid, &dkdevid->dkd_devid, 5579 ddi_devid_sizeof(un->un_devid)); 5580 mutex_exit(SD_MUTEX(un)); 5581 5582 /* Calculate the checksum */ 5583 chksum = 0; 5584 ip = (uint_t *)dkdevid; 5585 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5586 i++) { 5587 chksum ^= ip[i]; 5588 } 5589 5590 /* Fill-in checksum */ 5591 DKD_FORMCHKSUM(chksum, dkdevid); 5592 5593 /* Write the reserved sector */ 5594 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5595 SD_PATH_DIRECT); 5596 if (status != 0) 5597 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5598 5599 kmem_free(buf, un->un_sys_blocksize); 5600 5601 mutex_enter(SD_MUTEX(un)); 5602 return (status); 5603 } 5604 5605 5606 /* 5607 * Function: sd_check_vpd_page_support 5608 * 5609 * Description: This routine sends an inquiry command with the EVPD bit set and 5610 * a page code of 0x00 to the device. It is used to determine which 5611 * vital product pages are available to find the devid. We are 5612 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5613 * device does not support that command. 5614 * 5615 * Arguments: un - driver soft state (unit) structure 5616 * 5617 * Return Code: 0 - success 5618 * 1 - check condition 5619 * 5620 * Context: This routine can sleep. 5621 */ 5622 5623 static int 5624 sd_check_vpd_page_support(sd_ssc_t *ssc) 5625 { 5626 uchar_t *page_list = NULL; 5627 uchar_t page_length = 0xff; /* Use max possible length */ 5628 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5629 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5630 int rval = 0; 5631 int counter; 5632 struct sd_lun *un; 5633 5634 ASSERT(ssc != NULL); 5635 un = ssc->ssc_un; 5636 ASSERT(un != NULL); 5637 ASSERT(mutex_owned(SD_MUTEX(un))); 5638 5639 mutex_exit(SD_MUTEX(un)); 5640 5641 /* 5642 * We'll set the page length to the maximum to save figuring it out 5643 * with an additional call. 5644 */ 5645 page_list = kmem_zalloc(page_length, KM_SLEEP); 5646 5647 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5648 page_code, NULL); 5649 5650 if (rval != 0) 5651 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5652 5653 mutex_enter(SD_MUTEX(un)); 5654 5655 /* 5656 * Now we must validate that the device accepted the command, as some 5657 * drives do not support it. If the drive does support it, we will 5658 * return 0, and the supported pages will be in un_vpd_page_mask. If 5659 * not, we return -1. 5660 */ 5661 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5662 /* Loop to find one of the 2 pages we need */ 5663 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5664 5665 /* 5666 * Pages are returned in ascending order, and 0x83 is what we 5667 * are hoping for. 5668 */ 5669 while ((page_list[counter] <= 0x86) && 5670 (counter <= (page_list[VPD_PAGE_LENGTH] + 5671 VPD_HEAD_OFFSET))) { 5672 /* 5673 * Add 3 because page_list[3] is the number of 5674 * pages minus 3 5675 */ 5676 5677 switch (page_list[counter]) { 5678 case 0x00: 5679 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5680 break; 5681 case 0x80: 5682 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5683 break; 5684 case 0x81: 5685 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5686 break; 5687 case 0x82: 5688 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5689 break; 5690 case 0x83: 5691 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5692 break; 5693 case 0x86: 5694 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5695 break; 5696 } 5697 counter++; 5698 } 5699 5700 } else { 5701 rval = -1; 5702 5703 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5704 "sd_check_vpd_page_support: This drive does not implement " 5705 "VPD pages.\n"); 5706 } 5707 5708 kmem_free(page_list, page_length); 5709 5710 return (rval); 5711 } 5712 5713 5714 /* 5715 * Function: sd_setup_pm 5716 * 5717 * Description: Initialize Power Management on the device 5718 * 5719 * Context: Kernel Thread 5720 */ 5721 5722 static void 5723 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5724 { 5725 uint_t log_page_size; 5726 uchar_t *log_page_data; 5727 int rval = 0; 5728 struct sd_lun *un; 5729 5730 ASSERT(ssc != NULL); 5731 un = ssc->ssc_un; 5732 ASSERT(un != NULL); 5733 5734 /* 5735 * Since we are called from attach, holding a mutex for 5736 * un is unnecessary. Because some of the routines called 5737 * from here require SD_MUTEX to not be held, assert this 5738 * right up front. 5739 */ 5740 ASSERT(!mutex_owned(SD_MUTEX(un))); 5741 /* 5742 * Since the sd device does not have the 'reg' property, 5743 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5744 * The following code is to tell cpr that this device 5745 * DOES need to be suspended and resumed. 5746 */ 5747 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5748 "pm-hardware-state", "needs-suspend-resume"); 5749 5750 /* 5751 * This complies with the new power management framework 5752 * for certain desktop machines. Create the pm_components 5753 * property as a string array property. 5754 */ 5755 if (un->un_f_pm_supported) { 5756 /* 5757 * not all devices have a motor, try it first. 5758 * some devices may return ILLEGAL REQUEST, some 5759 * will hang 5760 * The following START_STOP_UNIT is used to check if target 5761 * device has a motor. 5762 */ 5763 un->un_f_start_stop_supported = TRUE; 5764 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5765 SD_PATH_DIRECT); 5766 5767 if (rval != 0) { 5768 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5769 un->un_f_start_stop_supported = FALSE; 5770 } 5771 5772 /* 5773 * create pm properties anyways otherwise the parent can't 5774 * go to sleep 5775 */ 5776 (void) sd_create_pm_components(devi, un); 5777 un->un_f_pm_is_enabled = TRUE; 5778 return; 5779 } 5780 5781 if (!un->un_f_log_sense_supported) { 5782 un->un_power_level = SD_SPINDLE_ON; 5783 un->un_f_pm_is_enabled = FALSE; 5784 return; 5785 } 5786 5787 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5788 5789 #ifdef SDDEBUG 5790 if (sd_force_pm_supported) { 5791 /* Force a successful result */ 5792 rval = 1; 5793 } 5794 #endif 5795 5796 /* 5797 * If the start-stop cycle counter log page is not supported 5798 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5799 * then we should not create the pm_components property. 5800 */ 5801 if (rval == -1) { 5802 /* 5803 * Error. 5804 * Reading log sense failed, most likely this is 5805 * an older drive that does not support log sense. 5806 * If this fails auto-pm is not supported. 5807 */ 5808 un->un_power_level = SD_SPINDLE_ON; 5809 un->un_f_pm_is_enabled = FALSE; 5810 5811 } else if (rval == 0) { 5812 /* 5813 * Page not found. 5814 * The start stop cycle counter is implemented as page 5815 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5816 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5817 */ 5818 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5819 /* 5820 * Page found, use this one. 5821 */ 5822 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5823 un->un_f_pm_is_enabled = TRUE; 5824 } else { 5825 /* 5826 * Error or page not found. 5827 * auto-pm is not supported for this device. 5828 */ 5829 un->un_power_level = SD_SPINDLE_ON; 5830 un->un_f_pm_is_enabled = FALSE; 5831 } 5832 } else { 5833 /* 5834 * Page found, use it. 5835 */ 5836 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5837 un->un_f_pm_is_enabled = TRUE; 5838 } 5839 5840 5841 if (un->un_f_pm_is_enabled == TRUE) { 5842 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5843 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5844 5845 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5846 log_page_size, un->un_start_stop_cycle_page, 5847 0x01, 0, SD_PATH_DIRECT); 5848 5849 if (rval != 0) { 5850 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5851 } 5852 5853 #ifdef SDDEBUG 5854 if (sd_force_pm_supported) { 5855 /* Force a successful result */ 5856 rval = 0; 5857 } 5858 #endif 5859 5860 /* 5861 * If the Log sense for Page( Start/stop cycle counter page) 5862 * succeeds, then power management is supported and we can 5863 * enable auto-pm. 5864 */ 5865 if (rval == 0) { 5866 (void) sd_create_pm_components(devi, un); 5867 } else { 5868 un->un_power_level = SD_SPINDLE_ON; 5869 un->un_f_pm_is_enabled = FALSE; 5870 } 5871 5872 kmem_free(log_page_data, log_page_size); 5873 } 5874 } 5875 5876 5877 /* 5878 * Function: sd_create_pm_components 5879 * 5880 * Description: Initialize PM property. 5881 * 5882 * Context: Kernel thread context 5883 */ 5884 5885 static void 5886 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5887 { 5888 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5889 5890 ASSERT(!mutex_owned(SD_MUTEX(un))); 5891 5892 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5893 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5894 /* 5895 * When components are initially created they are idle, 5896 * power up any non-removables. 5897 * Note: the return value of pm_raise_power can't be used 5898 * for determining if PM should be enabled for this device. 5899 * Even if you check the return values and remove this 5900 * property created above, the PM framework will not honor the 5901 * change after the first call to pm_raise_power. Hence, 5902 * removal of that property does not help if pm_raise_power 5903 * fails. In the case of removable media, the start/stop 5904 * will fail if the media is not present. 5905 */ 5906 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5907 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5908 mutex_enter(SD_MUTEX(un)); 5909 un->un_power_level = SD_SPINDLE_ON; 5910 mutex_enter(&un->un_pm_mutex); 5911 /* Set to on and not busy. */ 5912 un->un_pm_count = 0; 5913 } else { 5914 mutex_enter(SD_MUTEX(un)); 5915 un->un_power_level = SD_SPINDLE_OFF; 5916 mutex_enter(&un->un_pm_mutex); 5917 /* Set to off. */ 5918 un->un_pm_count = -1; 5919 } 5920 mutex_exit(&un->un_pm_mutex); 5921 mutex_exit(SD_MUTEX(un)); 5922 } else { 5923 un->un_power_level = SD_SPINDLE_ON; 5924 un->un_f_pm_is_enabled = FALSE; 5925 } 5926 } 5927 5928 5929 /* 5930 * Function: sd_ddi_suspend 5931 * 5932 * Description: Performs system power-down operations. This includes 5933 * setting the drive state to indicate its suspended so 5934 * that no new commands will be accepted. Also, wait for 5935 * all commands that are in transport or queued to a timer 5936 * for retry to complete. All timeout threads are cancelled. 5937 * 5938 * Return Code: DDI_FAILURE or DDI_SUCCESS 5939 * 5940 * Context: Kernel thread context 5941 */ 5942 5943 static int 5944 sd_ddi_suspend(dev_info_t *devi) 5945 { 5946 struct sd_lun *un; 5947 clock_t wait_cmds_complete; 5948 5949 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5950 if (un == NULL) { 5951 return (DDI_FAILURE); 5952 } 5953 5954 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5955 5956 mutex_enter(SD_MUTEX(un)); 5957 5958 /* Return success if the device is already suspended. */ 5959 if (un->un_state == SD_STATE_SUSPENDED) { 5960 mutex_exit(SD_MUTEX(un)); 5961 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5962 "device already suspended, exiting\n"); 5963 return (DDI_SUCCESS); 5964 } 5965 5966 /* Return failure if the device is being used by HA */ 5967 if (un->un_resvd_status & 5968 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5969 mutex_exit(SD_MUTEX(un)); 5970 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5971 "device in use by HA, exiting\n"); 5972 return (DDI_FAILURE); 5973 } 5974 5975 /* 5976 * Return failure if the device is in a resource wait 5977 * or power changing state. 5978 */ 5979 if ((un->un_state == SD_STATE_RWAIT) || 5980 (un->un_state == SD_STATE_PM_CHANGING)) { 5981 mutex_exit(SD_MUTEX(un)); 5982 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5983 "device in resource wait state, exiting\n"); 5984 return (DDI_FAILURE); 5985 } 5986 5987 5988 un->un_save_state = un->un_last_state; 5989 New_state(un, SD_STATE_SUSPENDED); 5990 5991 /* 5992 * Wait for all commands that are in transport or queued to a timer 5993 * for retry to complete. 5994 * 5995 * While waiting, no new commands will be accepted or sent because of 5996 * the new state we set above. 5997 * 5998 * Wait till current operation has completed. If we are in the resource 5999 * wait state (with an intr outstanding) then we need to wait till the 6000 * intr completes and starts the next cmd. We want to wait for 6001 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6002 */ 6003 wait_cmds_complete = ddi_get_lbolt() + 6004 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6005 6006 while (un->un_ncmds_in_transport != 0) { 6007 /* 6008 * Fail if commands do not finish in the specified time. 6009 */ 6010 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6011 wait_cmds_complete) == -1) { 6012 /* 6013 * Undo the state changes made above. Everything 6014 * must go back to it's original value. 6015 */ 6016 Restore_state(un); 6017 un->un_last_state = un->un_save_state; 6018 /* Wake up any threads that might be waiting. */ 6019 cv_broadcast(&un->un_suspend_cv); 6020 mutex_exit(SD_MUTEX(un)); 6021 SD_ERROR(SD_LOG_IO_PM, un, 6022 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6023 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6024 return (DDI_FAILURE); 6025 } 6026 } 6027 6028 /* 6029 * Cancel SCSI watch thread and timeouts, if any are active 6030 */ 6031 6032 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6033 opaque_t temp_token = un->un_swr_token; 6034 mutex_exit(SD_MUTEX(un)); 6035 scsi_watch_suspend(temp_token); 6036 mutex_enter(SD_MUTEX(un)); 6037 } 6038 6039 if (un->un_reset_throttle_timeid != NULL) { 6040 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6041 un->un_reset_throttle_timeid = NULL; 6042 mutex_exit(SD_MUTEX(un)); 6043 (void) untimeout(temp_id); 6044 mutex_enter(SD_MUTEX(un)); 6045 } 6046 6047 if (un->un_dcvb_timeid != NULL) { 6048 timeout_id_t temp_id = un->un_dcvb_timeid; 6049 un->un_dcvb_timeid = NULL; 6050 mutex_exit(SD_MUTEX(un)); 6051 (void) untimeout(temp_id); 6052 mutex_enter(SD_MUTEX(un)); 6053 } 6054 6055 mutex_enter(&un->un_pm_mutex); 6056 if (un->un_pm_timeid != NULL) { 6057 timeout_id_t temp_id = un->un_pm_timeid; 6058 un->un_pm_timeid = NULL; 6059 mutex_exit(&un->un_pm_mutex); 6060 mutex_exit(SD_MUTEX(un)); 6061 (void) untimeout(temp_id); 6062 mutex_enter(SD_MUTEX(un)); 6063 } else { 6064 mutex_exit(&un->un_pm_mutex); 6065 } 6066 6067 if (un->un_rmw_msg_timeid != NULL) { 6068 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6069 un->un_rmw_msg_timeid = NULL; 6070 mutex_exit(SD_MUTEX(un)); 6071 (void) untimeout(temp_id); 6072 mutex_enter(SD_MUTEX(un)); 6073 } 6074 6075 if (un->un_retry_timeid != NULL) { 6076 timeout_id_t temp_id = un->un_retry_timeid; 6077 un->un_retry_timeid = NULL; 6078 mutex_exit(SD_MUTEX(un)); 6079 (void) untimeout(temp_id); 6080 mutex_enter(SD_MUTEX(un)); 6081 6082 if (un->un_retry_bp != NULL) { 6083 un->un_retry_bp->av_forw = un->un_waitq_headp; 6084 un->un_waitq_headp = un->un_retry_bp; 6085 if (un->un_waitq_tailp == NULL) { 6086 un->un_waitq_tailp = un->un_retry_bp; 6087 } 6088 un->un_retry_bp = NULL; 6089 un->un_retry_statp = NULL; 6090 } 6091 } 6092 6093 if (un->un_direct_priority_timeid != NULL) { 6094 timeout_id_t temp_id = un->un_direct_priority_timeid; 6095 un->un_direct_priority_timeid = NULL; 6096 mutex_exit(SD_MUTEX(un)); 6097 (void) untimeout(temp_id); 6098 mutex_enter(SD_MUTEX(un)); 6099 } 6100 6101 if (un->un_f_is_fibre == TRUE) { 6102 /* 6103 * Remove callbacks for insert and remove events 6104 */ 6105 if (un->un_insert_event != NULL) { 6106 mutex_exit(SD_MUTEX(un)); 6107 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6108 mutex_enter(SD_MUTEX(un)); 6109 un->un_insert_event = NULL; 6110 } 6111 6112 if (un->un_remove_event != NULL) { 6113 mutex_exit(SD_MUTEX(un)); 6114 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6115 mutex_enter(SD_MUTEX(un)); 6116 un->un_remove_event = NULL; 6117 } 6118 } 6119 6120 mutex_exit(SD_MUTEX(un)); 6121 6122 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6123 6124 return (DDI_SUCCESS); 6125 } 6126 6127 6128 /* 6129 * Function: sd_ddi_pm_suspend 6130 * 6131 * Description: Set the drive state to low power. 6132 * Someone else is required to actually change the drive 6133 * power level. 6134 * 6135 * Arguments: un - driver soft state (unit) structure 6136 * 6137 * Return Code: DDI_FAILURE or DDI_SUCCESS 6138 * 6139 * Context: Kernel thread context 6140 */ 6141 6142 static int 6143 sd_ddi_pm_suspend(struct sd_lun *un) 6144 { 6145 ASSERT(un != NULL); 6146 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6147 6148 ASSERT(!mutex_owned(SD_MUTEX(un))); 6149 mutex_enter(SD_MUTEX(un)); 6150 6151 /* 6152 * Exit if power management is not enabled for this device, or if 6153 * the device is being used by HA. 6154 */ 6155 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6156 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6157 mutex_exit(SD_MUTEX(un)); 6158 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6159 return (DDI_SUCCESS); 6160 } 6161 6162 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6163 un->un_ncmds_in_driver); 6164 6165 /* 6166 * See if the device is not busy, ie.: 6167 * - we have no commands in the driver for this device 6168 * - not waiting for resources 6169 */ 6170 if ((un->un_ncmds_in_driver == 0) && 6171 (un->un_state != SD_STATE_RWAIT)) { 6172 /* 6173 * The device is not busy, so it is OK to go to low power state. 6174 * Indicate low power, but rely on someone else to actually 6175 * change it. 6176 */ 6177 mutex_enter(&un->un_pm_mutex); 6178 un->un_pm_count = -1; 6179 mutex_exit(&un->un_pm_mutex); 6180 un->un_power_level = SD_SPINDLE_OFF; 6181 } 6182 6183 mutex_exit(SD_MUTEX(un)); 6184 6185 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6186 6187 return (DDI_SUCCESS); 6188 } 6189 6190 6191 /* 6192 * Function: sd_ddi_resume 6193 * 6194 * Description: Performs system power-up operations.. 6195 * 6196 * Return Code: DDI_SUCCESS 6197 * DDI_FAILURE 6198 * 6199 * Context: Kernel thread context 6200 */ 6201 6202 static int 6203 sd_ddi_resume(dev_info_t *devi) 6204 { 6205 struct sd_lun *un; 6206 6207 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6208 if (un == NULL) { 6209 return (DDI_FAILURE); 6210 } 6211 6212 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6213 6214 mutex_enter(SD_MUTEX(un)); 6215 Restore_state(un); 6216 6217 /* 6218 * Restore the state which was saved to give the 6219 * the right state in un_last_state 6220 */ 6221 un->un_last_state = un->un_save_state; 6222 /* 6223 * Note: throttle comes back at full. 6224 * Also note: this MUST be done before calling pm_raise_power 6225 * otherwise the system can get hung in biowait. The scenario where 6226 * this'll happen is under cpr suspend. Writing of the system 6227 * state goes through sddump, which writes 0 to un_throttle. If 6228 * writing the system state then fails, example if the partition is 6229 * too small, then cpr attempts a resume. If throttle isn't restored 6230 * from the saved value until after calling pm_raise_power then 6231 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6232 * in biowait. 6233 */ 6234 un->un_throttle = un->un_saved_throttle; 6235 6236 /* 6237 * The chance of failure is very rare as the only command done in power 6238 * entry point is START command when you transition from 0->1 or 6239 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6240 * which suspend was done. Ignore the return value as the resume should 6241 * not be failed. In the case of removable media the media need not be 6242 * inserted and hence there is a chance that raise power will fail with 6243 * media not present. 6244 */ 6245 if (un->un_f_attach_spinup) { 6246 mutex_exit(SD_MUTEX(un)); 6247 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6248 mutex_enter(SD_MUTEX(un)); 6249 } 6250 6251 /* 6252 * Don't broadcast to the suspend cv and therefore possibly 6253 * start I/O until after power has been restored. 6254 */ 6255 cv_broadcast(&un->un_suspend_cv); 6256 cv_broadcast(&un->un_state_cv); 6257 6258 /* restart thread */ 6259 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6260 scsi_watch_resume(un->un_swr_token); 6261 } 6262 6263 #if (defined(__fibre)) 6264 if (un->un_f_is_fibre == TRUE) { 6265 /* 6266 * Add callbacks for insert and remove events 6267 */ 6268 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6269 sd_init_event_callbacks(un); 6270 } 6271 } 6272 #endif 6273 6274 /* 6275 * Transport any pending commands to the target. 6276 * 6277 * If this is a low-activity device commands in queue will have to wait 6278 * until new commands come in, which may take awhile. Also, we 6279 * specifically don't check un_ncmds_in_transport because we know that 6280 * there really are no commands in progress after the unit was 6281 * suspended and we could have reached the throttle level, been 6282 * suspended, and have no new commands coming in for awhile. Highly 6283 * unlikely, but so is the low-activity disk scenario. 6284 */ 6285 ddi_xbuf_dispatch(un->un_xbuf_attr); 6286 6287 sd_start_cmds(un, NULL); 6288 mutex_exit(SD_MUTEX(un)); 6289 6290 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6291 6292 return (DDI_SUCCESS); 6293 } 6294 6295 6296 /* 6297 * Function: sd_ddi_pm_resume 6298 * 6299 * Description: Set the drive state to powered on. 6300 * Someone else is required to actually change the drive 6301 * power level. 6302 * 6303 * Arguments: un - driver soft state (unit) structure 6304 * 6305 * Return Code: DDI_SUCCESS 6306 * 6307 * Context: Kernel thread context 6308 */ 6309 6310 static int 6311 sd_ddi_pm_resume(struct sd_lun *un) 6312 { 6313 ASSERT(un != NULL); 6314 6315 ASSERT(!mutex_owned(SD_MUTEX(un))); 6316 mutex_enter(SD_MUTEX(un)); 6317 un->un_power_level = SD_SPINDLE_ON; 6318 6319 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6320 mutex_enter(&un->un_pm_mutex); 6321 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6322 un->un_pm_count++; 6323 ASSERT(un->un_pm_count == 0); 6324 /* 6325 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6326 * un_suspend_cv is for a system resume, not a power management 6327 * device resume. (4297749) 6328 * cv_broadcast(&un->un_suspend_cv); 6329 */ 6330 } 6331 mutex_exit(&un->un_pm_mutex); 6332 mutex_exit(SD_MUTEX(un)); 6333 6334 return (DDI_SUCCESS); 6335 } 6336 6337 6338 /* 6339 * Function: sd_pm_idletimeout_handler 6340 * 6341 * Description: A timer routine that's active only while a device is busy. 6342 * The purpose is to extend slightly the pm framework's busy 6343 * view of the device to prevent busy/idle thrashing for 6344 * back-to-back commands. Do this by comparing the current time 6345 * to the time at which the last command completed and when the 6346 * difference is greater than sd_pm_idletime, call 6347 * pm_idle_component. In addition to indicating idle to the pm 6348 * framework, update the chain type to again use the internal pm 6349 * layers of the driver. 6350 * 6351 * Arguments: arg - driver soft state (unit) structure 6352 * 6353 * Context: Executes in a timeout(9F) thread context 6354 */ 6355 6356 static void 6357 sd_pm_idletimeout_handler(void *arg) 6358 { 6359 struct sd_lun *un = arg; 6360 6361 time_t now; 6362 6363 mutex_enter(&sd_detach_mutex); 6364 if (un->un_detach_count != 0) { 6365 /* Abort if the instance is detaching */ 6366 mutex_exit(&sd_detach_mutex); 6367 return; 6368 } 6369 mutex_exit(&sd_detach_mutex); 6370 6371 now = ddi_get_time(); 6372 /* 6373 * Grab both mutexes, in the proper order, since we're accessing 6374 * both PM and softstate variables. 6375 */ 6376 mutex_enter(SD_MUTEX(un)); 6377 mutex_enter(&un->un_pm_mutex); 6378 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6379 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6380 /* 6381 * Update the chain types. 6382 * This takes affect on the next new command received. 6383 */ 6384 if (un->un_f_non_devbsize_supported) { 6385 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6386 } else { 6387 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6388 } 6389 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6390 6391 SD_TRACE(SD_LOG_IO_PM, un, 6392 "sd_pm_idletimeout_handler: idling device\n"); 6393 (void) pm_idle_component(SD_DEVINFO(un), 0); 6394 un->un_pm_idle_timeid = NULL; 6395 } else { 6396 un->un_pm_idle_timeid = 6397 timeout(sd_pm_idletimeout_handler, un, 6398 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6399 } 6400 mutex_exit(&un->un_pm_mutex); 6401 mutex_exit(SD_MUTEX(un)); 6402 } 6403 6404 6405 /* 6406 * Function: sd_pm_timeout_handler 6407 * 6408 * Description: Callback to tell framework we are idle. 6409 * 6410 * Context: timeout(9f) thread context. 6411 */ 6412 6413 static void 6414 sd_pm_timeout_handler(void *arg) 6415 { 6416 struct sd_lun *un = arg; 6417 6418 (void) pm_idle_component(SD_DEVINFO(un), 0); 6419 mutex_enter(&un->un_pm_mutex); 6420 un->un_pm_timeid = NULL; 6421 mutex_exit(&un->un_pm_mutex); 6422 } 6423 6424 6425 /* 6426 * Function: sdpower 6427 * 6428 * Description: PM entry point. 6429 * 6430 * Return Code: DDI_SUCCESS 6431 * DDI_FAILURE 6432 * 6433 * Context: Kernel thread context 6434 */ 6435 6436 static int 6437 sdpower(dev_info_t *devi, int component, int level) 6438 { 6439 struct sd_lun *un; 6440 int instance; 6441 int rval = DDI_SUCCESS; 6442 uint_t i, log_page_size, maxcycles, ncycles; 6443 uchar_t *log_page_data; 6444 int log_sense_page; 6445 int medium_present; 6446 time_t intvlp; 6447 dev_t dev; 6448 struct pm_trans_data sd_pm_tran_data; 6449 uchar_t save_state; 6450 int sval; 6451 uchar_t state_before_pm; 6452 int got_semaphore_here; 6453 sd_ssc_t *ssc; 6454 6455 instance = ddi_get_instance(devi); 6456 6457 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6458 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6459 component != 0) { 6460 return (DDI_FAILURE); 6461 } 6462 6463 dev = sd_make_device(SD_DEVINFO(un)); 6464 ssc = sd_ssc_init(un); 6465 6466 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6467 6468 /* 6469 * Must synchronize power down with close. 6470 * Attempt to decrement/acquire the open/close semaphore, 6471 * but do NOT wait on it. If it's not greater than zero, 6472 * ie. it can't be decremented without waiting, then 6473 * someone else, either open or close, already has it 6474 * and the try returns 0. Use that knowledge here to determine 6475 * if it's OK to change the device power level. 6476 * Also, only increment it on exit if it was decremented, ie. gotten, 6477 * here. 6478 */ 6479 got_semaphore_here = sema_tryp(&un->un_semoclose); 6480 6481 mutex_enter(SD_MUTEX(un)); 6482 6483 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6484 un->un_ncmds_in_driver); 6485 6486 /* 6487 * If un_ncmds_in_driver is non-zero it indicates commands are 6488 * already being processed in the driver, or if the semaphore was 6489 * not gotten here it indicates an open or close is being processed. 6490 * At the same time somebody is requesting to go low power which 6491 * can't happen, therefore we need to return failure. 6492 */ 6493 if ((level == SD_SPINDLE_OFF) && 6494 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6495 mutex_exit(SD_MUTEX(un)); 6496 6497 if (got_semaphore_here != 0) { 6498 sema_v(&un->un_semoclose); 6499 } 6500 SD_TRACE(SD_LOG_IO_PM, un, 6501 "sdpower: exit, device has queued cmds.\n"); 6502 6503 goto sdpower_failed; 6504 } 6505 6506 /* 6507 * if it is OFFLINE that means the disk is completely dead 6508 * in our case we have to put the disk in on or off by sending commands 6509 * Of course that will fail anyway so return back here. 6510 * 6511 * Power changes to a device that's OFFLINE or SUSPENDED 6512 * are not allowed. 6513 */ 6514 if ((un->un_state == SD_STATE_OFFLINE) || 6515 (un->un_state == SD_STATE_SUSPENDED)) { 6516 mutex_exit(SD_MUTEX(un)); 6517 6518 if (got_semaphore_here != 0) { 6519 sema_v(&un->un_semoclose); 6520 } 6521 SD_TRACE(SD_LOG_IO_PM, un, 6522 "sdpower: exit, device is off-line.\n"); 6523 6524 goto sdpower_failed; 6525 } 6526 6527 /* 6528 * Change the device's state to indicate it's power level 6529 * is being changed. Do this to prevent a power off in the 6530 * middle of commands, which is especially bad on devices 6531 * that are really powered off instead of just spun down. 6532 */ 6533 state_before_pm = un->un_state; 6534 un->un_state = SD_STATE_PM_CHANGING; 6535 6536 mutex_exit(SD_MUTEX(un)); 6537 6538 /* 6539 * If "pm-capable" property is set to TRUE by HBA drivers, 6540 * bypass the following checking, otherwise, check the log 6541 * sense information for this device 6542 */ 6543 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6544 /* 6545 * Get the log sense information to understand whether the 6546 * the powercycle counts have gone beyond the threshhold. 6547 */ 6548 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6549 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6550 6551 mutex_enter(SD_MUTEX(un)); 6552 log_sense_page = un->un_start_stop_cycle_page; 6553 mutex_exit(SD_MUTEX(un)); 6554 6555 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6556 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6557 6558 if (rval != 0) { 6559 if (rval == EIO) 6560 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6561 else 6562 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6563 } 6564 6565 #ifdef SDDEBUG 6566 if (sd_force_pm_supported) { 6567 /* Force a successful result */ 6568 rval = 0; 6569 } 6570 #endif 6571 if (rval != 0) { 6572 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6573 "Log Sense Failed\n"); 6574 6575 kmem_free(log_page_data, log_page_size); 6576 /* Cannot support power management on those drives */ 6577 6578 if (got_semaphore_here != 0) { 6579 sema_v(&un->un_semoclose); 6580 } 6581 /* 6582 * On exit put the state back to it's original value 6583 * and broadcast to anyone waiting for the power 6584 * change completion. 6585 */ 6586 mutex_enter(SD_MUTEX(un)); 6587 un->un_state = state_before_pm; 6588 cv_broadcast(&un->un_suspend_cv); 6589 mutex_exit(SD_MUTEX(un)); 6590 SD_TRACE(SD_LOG_IO_PM, un, 6591 "sdpower: exit, Log Sense Failed.\n"); 6592 6593 goto sdpower_failed; 6594 } 6595 6596 /* 6597 * From the page data - Convert the essential information to 6598 * pm_trans_data 6599 */ 6600 maxcycles = 6601 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6602 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6603 6604 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6605 6606 ncycles = 6607 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6608 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6609 6610 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6611 6612 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6613 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6614 log_page_data[8+i]; 6615 } 6616 6617 kmem_free(log_page_data, log_page_size); 6618 6619 /* 6620 * Call pm_trans_check routine to get the Ok from 6621 * the global policy 6622 */ 6623 6624 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6625 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6626 6627 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6628 #ifdef SDDEBUG 6629 if (sd_force_pm_supported) { 6630 /* Force a successful result */ 6631 rval = 1; 6632 } 6633 #endif 6634 switch (rval) { 6635 case 0: 6636 /* 6637 * Not Ok to Power cycle or error in parameters passed 6638 * Would have given the advised time to consider power 6639 * cycle. Based on the new intvlp parameter we are 6640 * supposed to pretend we are busy so that pm framework 6641 * will never call our power entry point. Because of 6642 * that install a timeout handler and wait for the 6643 * recommended time to elapse so that power management 6644 * can be effective again. 6645 * 6646 * To effect this behavior, call pm_busy_component to 6647 * indicate to the framework this device is busy. 6648 * By not adjusting un_pm_count the rest of PM in 6649 * the driver will function normally, and independent 6650 * of this but because the framework is told the device 6651 * is busy it won't attempt powering down until it gets 6652 * a matching idle. The timeout handler sends this. 6653 * Note: sd_pm_entry can't be called here to do this 6654 * because sdpower may have been called as a result 6655 * of a call to pm_raise_power from within sd_pm_entry. 6656 * 6657 * If a timeout handler is already active then 6658 * don't install another. 6659 */ 6660 mutex_enter(&un->un_pm_mutex); 6661 if (un->un_pm_timeid == NULL) { 6662 un->un_pm_timeid = 6663 timeout(sd_pm_timeout_handler, 6664 un, intvlp * drv_usectohz(1000000)); 6665 mutex_exit(&un->un_pm_mutex); 6666 (void) pm_busy_component(SD_DEVINFO(un), 0); 6667 } else { 6668 mutex_exit(&un->un_pm_mutex); 6669 } 6670 if (got_semaphore_here != 0) { 6671 sema_v(&un->un_semoclose); 6672 } 6673 /* 6674 * On exit put the state back to it's original value 6675 * and broadcast to anyone waiting for the power 6676 * change completion. 6677 */ 6678 mutex_enter(SD_MUTEX(un)); 6679 un->un_state = state_before_pm; 6680 cv_broadcast(&un->un_suspend_cv); 6681 mutex_exit(SD_MUTEX(un)); 6682 6683 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6684 "trans check Failed, not ok to power cycle.\n"); 6685 6686 goto sdpower_failed; 6687 case -1: 6688 if (got_semaphore_here != 0) { 6689 sema_v(&un->un_semoclose); 6690 } 6691 /* 6692 * On exit put the state back to it's original value 6693 * and broadcast to anyone waiting for the power 6694 * change completion. 6695 */ 6696 mutex_enter(SD_MUTEX(un)); 6697 un->un_state = state_before_pm; 6698 cv_broadcast(&un->un_suspend_cv); 6699 mutex_exit(SD_MUTEX(un)); 6700 SD_TRACE(SD_LOG_IO_PM, un, 6701 "sdpower: exit, trans check command Failed.\n"); 6702 6703 goto sdpower_failed; 6704 } 6705 } 6706 6707 if (level == SD_SPINDLE_OFF) { 6708 /* 6709 * Save the last state... if the STOP FAILS we need it 6710 * for restoring 6711 */ 6712 mutex_enter(SD_MUTEX(un)); 6713 save_state = un->un_last_state; 6714 /* 6715 * There must not be any cmds. getting processed 6716 * in the driver when we get here. Power to the 6717 * device is potentially going off. 6718 */ 6719 ASSERT(un->un_ncmds_in_driver == 0); 6720 mutex_exit(SD_MUTEX(un)); 6721 6722 /* 6723 * For now suspend the device completely before spindle is 6724 * turned off 6725 */ 6726 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6727 if (got_semaphore_here != 0) { 6728 sema_v(&un->un_semoclose); 6729 } 6730 /* 6731 * On exit put the state back to it's original value 6732 * and broadcast to anyone waiting for the power 6733 * change completion. 6734 */ 6735 mutex_enter(SD_MUTEX(un)); 6736 un->un_state = state_before_pm; 6737 cv_broadcast(&un->un_suspend_cv); 6738 mutex_exit(SD_MUTEX(un)); 6739 SD_TRACE(SD_LOG_IO_PM, un, 6740 "sdpower: exit, PM suspend Failed.\n"); 6741 6742 goto sdpower_failed; 6743 } 6744 } 6745 6746 /* 6747 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6748 * close, or strategy. Dump no long uses this routine, it uses it's 6749 * own code so it can be done in polled mode. 6750 */ 6751 6752 medium_present = TRUE; 6753 6754 /* 6755 * When powering up, issue a TUR in case the device is at unit 6756 * attention. Don't do retries. Bypass the PM layer, otherwise 6757 * a deadlock on un_pm_busy_cv will occur. 6758 */ 6759 if (level == SD_SPINDLE_ON) { 6760 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6761 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6762 if (sval != 0) 6763 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6764 } 6765 6766 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6767 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6768 6769 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6770 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6771 SD_PATH_DIRECT); 6772 if (sval != 0) { 6773 if (sval == EIO) 6774 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6775 else 6776 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6777 } 6778 6779 /* Command failed, check for media present. */ 6780 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6781 medium_present = FALSE; 6782 } 6783 6784 /* 6785 * The conditions of interest here are: 6786 * if a spindle off with media present fails, 6787 * then restore the state and return an error. 6788 * else if a spindle on fails, 6789 * then return an error (there's no state to restore). 6790 * In all other cases we setup for the new state 6791 * and return success. 6792 */ 6793 switch (level) { 6794 case SD_SPINDLE_OFF: 6795 if ((medium_present == TRUE) && (sval != 0)) { 6796 /* The stop command from above failed */ 6797 rval = DDI_FAILURE; 6798 /* 6799 * The stop command failed, and we have media 6800 * present. Put the level back by calling the 6801 * sd_pm_resume() and set the state back to 6802 * it's previous value. 6803 */ 6804 (void) sd_ddi_pm_resume(un); 6805 mutex_enter(SD_MUTEX(un)); 6806 un->un_last_state = save_state; 6807 mutex_exit(SD_MUTEX(un)); 6808 break; 6809 } 6810 /* 6811 * The stop command from above succeeded. 6812 */ 6813 if (un->un_f_monitor_media_state) { 6814 /* 6815 * Terminate watch thread in case of removable media 6816 * devices going into low power state. This is as per 6817 * the requirements of pm framework, otherwise commands 6818 * will be generated for the device (through watch 6819 * thread), even when the device is in low power state. 6820 */ 6821 mutex_enter(SD_MUTEX(un)); 6822 un->un_f_watcht_stopped = FALSE; 6823 if (un->un_swr_token != NULL) { 6824 opaque_t temp_token = un->un_swr_token; 6825 un->un_f_watcht_stopped = TRUE; 6826 un->un_swr_token = NULL; 6827 mutex_exit(SD_MUTEX(un)); 6828 (void) scsi_watch_request_terminate(temp_token, 6829 SCSI_WATCH_TERMINATE_ALL_WAIT); 6830 } else { 6831 mutex_exit(SD_MUTEX(un)); 6832 } 6833 } 6834 break; 6835 6836 default: /* The level requested is spindle on... */ 6837 /* 6838 * Legacy behavior: return success on a failed spinup 6839 * if there is no media in the drive. 6840 * Do this by looking at medium_present here. 6841 */ 6842 if ((sval != 0) && medium_present) { 6843 /* The start command from above failed */ 6844 rval = DDI_FAILURE; 6845 break; 6846 } 6847 /* 6848 * The start command from above succeeded 6849 * Resume the devices now that we have 6850 * started the disks 6851 */ 6852 (void) sd_ddi_pm_resume(un); 6853 6854 /* 6855 * Resume the watch thread since it was suspended 6856 * when the device went into low power mode. 6857 */ 6858 if (un->un_f_monitor_media_state) { 6859 mutex_enter(SD_MUTEX(un)); 6860 if (un->un_f_watcht_stopped == TRUE) { 6861 opaque_t temp_token; 6862 6863 un->un_f_watcht_stopped = FALSE; 6864 mutex_exit(SD_MUTEX(un)); 6865 temp_token = scsi_watch_request_submit( 6866 SD_SCSI_DEVP(un), 6867 sd_check_media_time, 6868 SENSE_LENGTH, sd_media_watch_cb, 6869 (caddr_t)dev); 6870 mutex_enter(SD_MUTEX(un)); 6871 un->un_swr_token = temp_token; 6872 } 6873 mutex_exit(SD_MUTEX(un)); 6874 } 6875 } 6876 if (got_semaphore_here != 0) { 6877 sema_v(&un->un_semoclose); 6878 } 6879 /* 6880 * On exit put the state back to it's original value 6881 * and broadcast to anyone waiting for the power 6882 * change completion. 6883 */ 6884 mutex_enter(SD_MUTEX(un)); 6885 un->un_state = state_before_pm; 6886 cv_broadcast(&un->un_suspend_cv); 6887 mutex_exit(SD_MUTEX(un)); 6888 6889 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6890 6891 sd_ssc_fini(ssc); 6892 return (rval); 6893 6894 sdpower_failed: 6895 6896 sd_ssc_fini(ssc); 6897 return (DDI_FAILURE); 6898 } 6899 6900 6901 6902 /* 6903 * Function: sdattach 6904 * 6905 * Description: Driver's attach(9e) entry point function. 6906 * 6907 * Arguments: devi - opaque device info handle 6908 * cmd - attach type 6909 * 6910 * Return Code: DDI_SUCCESS 6911 * DDI_FAILURE 6912 * 6913 * Context: Kernel thread context 6914 */ 6915 6916 static int 6917 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6918 { 6919 switch (cmd) { 6920 case DDI_ATTACH: 6921 return (sd_unit_attach(devi)); 6922 case DDI_RESUME: 6923 return (sd_ddi_resume(devi)); 6924 default: 6925 break; 6926 } 6927 return (DDI_FAILURE); 6928 } 6929 6930 6931 /* 6932 * Function: sddetach 6933 * 6934 * Description: Driver's detach(9E) entry point function. 6935 * 6936 * Arguments: devi - opaque device info handle 6937 * cmd - detach type 6938 * 6939 * Return Code: DDI_SUCCESS 6940 * DDI_FAILURE 6941 * 6942 * Context: Kernel thread context 6943 */ 6944 6945 static int 6946 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6947 { 6948 switch (cmd) { 6949 case DDI_DETACH: 6950 return (sd_unit_detach(devi)); 6951 case DDI_SUSPEND: 6952 return (sd_ddi_suspend(devi)); 6953 default: 6954 break; 6955 } 6956 return (DDI_FAILURE); 6957 } 6958 6959 6960 /* 6961 * Function: sd_sync_with_callback 6962 * 6963 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6964 * state while the callback routine is active. 6965 * 6966 * Arguments: un: softstate structure for the instance 6967 * 6968 * Context: Kernel thread context 6969 */ 6970 6971 static void 6972 sd_sync_with_callback(struct sd_lun *un) 6973 { 6974 ASSERT(un != NULL); 6975 6976 mutex_enter(SD_MUTEX(un)); 6977 6978 ASSERT(un->un_in_callback >= 0); 6979 6980 while (un->un_in_callback > 0) { 6981 mutex_exit(SD_MUTEX(un)); 6982 delay(2); 6983 mutex_enter(SD_MUTEX(un)); 6984 } 6985 6986 mutex_exit(SD_MUTEX(un)); 6987 } 6988 6989 /* 6990 * Function: sd_unit_attach 6991 * 6992 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6993 * the soft state structure for the device and performs 6994 * all necessary structure and device initializations. 6995 * 6996 * Arguments: devi: the system's dev_info_t for the device. 6997 * 6998 * Return Code: DDI_SUCCESS if attach is successful. 6999 * DDI_FAILURE if any part of the attach fails. 7000 * 7001 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7002 * Kernel thread context only. Can sleep. 7003 */ 7004 7005 static int 7006 sd_unit_attach(dev_info_t *devi) 7007 { 7008 struct scsi_device *devp; 7009 struct sd_lun *un; 7010 char *variantp; 7011 char name_str[48]; 7012 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7013 int instance; 7014 int rval; 7015 int wc_enabled; 7016 int tgt; 7017 uint64_t capacity; 7018 uint_t lbasize = 0; 7019 dev_info_t *pdip = ddi_get_parent(devi); 7020 int offbyone = 0; 7021 int geom_label_valid = 0; 7022 sd_ssc_t *ssc; 7023 int status; 7024 struct sd_fm_internal *sfip = NULL; 7025 int max_xfer_size; 7026 7027 /* 7028 * Retrieve the target driver's private data area. This was set 7029 * up by the HBA. 7030 */ 7031 devp = ddi_get_driver_private(devi); 7032 7033 /* 7034 * Retrieve the target ID of the device. 7035 */ 7036 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7037 SCSI_ADDR_PROP_TARGET, -1); 7038 7039 /* 7040 * Since we have no idea what state things were left in by the last 7041 * user of the device, set up some 'default' settings, ie. turn 'em 7042 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7043 * Do this before the scsi_probe, which sends an inquiry. 7044 * This is a fix for bug (4430280). 7045 * Of special importance is wide-xfer. The drive could have been left 7046 * in wide transfer mode by the last driver to communicate with it, 7047 * this includes us. If that's the case, and if the following is not 7048 * setup properly or we don't re-negotiate with the drive prior to 7049 * transferring data to/from the drive, it causes bus parity errors, 7050 * data overruns, and unexpected interrupts. This first occurred when 7051 * the fix for bug (4378686) was made. 7052 */ 7053 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7054 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7055 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7056 7057 /* 7058 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7059 * on a target. Setting it per lun instance actually sets the 7060 * capability of this target, which affects those luns already 7061 * attached on the same target. So during attach, we can only disable 7062 * this capability only when no other lun has been attached on this 7063 * target. By doing this, we assume a target has the same tagged-qing 7064 * capability for every lun. The condition can be removed when HBA 7065 * is changed to support per lun based tagged-qing capability. 7066 */ 7067 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7068 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7069 } 7070 7071 /* 7072 * Use scsi_probe() to issue an INQUIRY command to the device. 7073 * This call will allocate and fill in the scsi_inquiry structure 7074 * and point the sd_inq member of the scsi_device structure to it. 7075 * If the attach succeeds, then this memory will not be de-allocated 7076 * (via scsi_unprobe()) until the instance is detached. 7077 */ 7078 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7079 goto probe_failed; 7080 } 7081 7082 /* 7083 * Check the device type as specified in the inquiry data and 7084 * claim it if it is of a type that we support. 7085 */ 7086 switch (devp->sd_inq->inq_dtype) { 7087 case DTYPE_DIRECT: 7088 break; 7089 case DTYPE_RODIRECT: 7090 break; 7091 case DTYPE_OPTICAL: 7092 break; 7093 case DTYPE_NOTPRESENT: 7094 default: 7095 /* Unsupported device type; fail the attach. */ 7096 goto probe_failed; 7097 } 7098 7099 /* 7100 * Allocate the soft state structure for this unit. 7101 * 7102 * We rely upon this memory being set to all zeroes by 7103 * ddi_soft_state_zalloc(). We assume that any member of the 7104 * soft state structure that is not explicitly initialized by 7105 * this routine will have a value of zero. 7106 */ 7107 instance = ddi_get_instance(devp->sd_dev); 7108 #ifndef XPV_HVM_DRIVER 7109 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7110 goto probe_failed; 7111 } 7112 #endif /* !XPV_HVM_DRIVER */ 7113 7114 /* 7115 * Retrieve a pointer to the newly-allocated soft state. 7116 * 7117 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7118 * was successful, unless something has gone horribly wrong and the 7119 * ddi's soft state internals are corrupt (in which case it is 7120 * probably better to halt here than just fail the attach....) 7121 */ 7122 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7123 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7124 instance); 7125 /*NOTREACHED*/ 7126 } 7127 7128 /* 7129 * Link the back ptr of the driver soft state to the scsi_device 7130 * struct for this lun. 7131 * Save a pointer to the softstate in the driver-private area of 7132 * the scsi_device struct. 7133 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7134 * we first set un->un_sd below. 7135 */ 7136 un->un_sd = devp; 7137 devp->sd_private = (opaque_t)un; 7138 7139 /* 7140 * The following must be after devp is stored in the soft state struct. 7141 */ 7142 #ifdef SDDEBUG 7143 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7144 "%s_unit_attach: un:0x%p instance:%d\n", 7145 ddi_driver_name(devi), un, instance); 7146 #endif 7147 7148 /* 7149 * Set up the device type and node type (for the minor nodes). 7150 * By default we assume that the device can at least support the 7151 * Common Command Set. Call it a CD-ROM if it reports itself 7152 * as a RODIRECT device. 7153 */ 7154 switch (devp->sd_inq->inq_dtype) { 7155 case DTYPE_RODIRECT: 7156 un->un_node_type = DDI_NT_CD_CHAN; 7157 un->un_ctype = CTYPE_CDROM; 7158 break; 7159 case DTYPE_OPTICAL: 7160 un->un_node_type = DDI_NT_BLOCK_CHAN; 7161 un->un_ctype = CTYPE_ROD; 7162 break; 7163 default: 7164 un->un_node_type = DDI_NT_BLOCK_CHAN; 7165 un->un_ctype = CTYPE_CCS; 7166 break; 7167 } 7168 7169 /* 7170 * Try to read the interconnect type from the HBA. 7171 * 7172 * Note: This driver is currently compiled as two binaries, a parallel 7173 * scsi version (sd) and a fibre channel version (ssd). All functional 7174 * differences are determined at compile time. In the future a single 7175 * binary will be provided and the interconnect type will be used to 7176 * differentiate between fibre and parallel scsi behaviors. At that time 7177 * it will be necessary for all fibre channel HBAs to support this 7178 * property. 7179 * 7180 * set un_f_is_fiber to TRUE ( default fiber ) 7181 */ 7182 un->un_f_is_fibre = TRUE; 7183 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7184 case INTERCONNECT_SSA: 7185 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7186 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7187 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7188 break; 7189 case INTERCONNECT_PARALLEL: 7190 un->un_f_is_fibre = FALSE; 7191 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7192 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7193 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7194 break; 7195 case INTERCONNECT_SAS: 7196 un->un_f_is_fibre = FALSE; 7197 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7198 un->un_node_type = DDI_NT_BLOCK_SAS; 7199 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7200 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7201 break; 7202 case INTERCONNECT_SATA: 7203 un->un_f_is_fibre = FALSE; 7204 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7205 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7206 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7207 break; 7208 case INTERCONNECT_FIBRE: 7209 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7210 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7211 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7212 break; 7213 case INTERCONNECT_FABRIC: 7214 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7215 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7216 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7217 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7218 break; 7219 default: 7220 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7221 /* 7222 * The HBA does not support the "interconnect-type" property 7223 * (or did not provide a recognized type). 7224 * 7225 * Note: This will be obsoleted when a single fibre channel 7226 * and parallel scsi driver is delivered. In the meantime the 7227 * interconnect type will be set to the platform default.If that 7228 * type is not parallel SCSI, it means that we should be 7229 * assuming "ssd" semantics. However, here this also means that 7230 * the FC HBA is not supporting the "interconnect-type" property 7231 * like we expect it to, so log this occurrence. 7232 */ 7233 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7234 if (!SD_IS_PARALLEL_SCSI(un)) { 7235 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7236 "sd_unit_attach: un:0x%p Assuming " 7237 "INTERCONNECT_FIBRE\n", un); 7238 } else { 7239 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7240 "sd_unit_attach: un:0x%p Assuming " 7241 "INTERCONNECT_PARALLEL\n", un); 7242 un->un_f_is_fibre = FALSE; 7243 } 7244 #else 7245 /* 7246 * Note: This source will be implemented when a single fibre 7247 * channel and parallel scsi driver is delivered. The default 7248 * will be to assume that if a device does not support the 7249 * "interconnect-type" property it is a parallel SCSI HBA and 7250 * we will set the interconnect type for parallel scsi. 7251 */ 7252 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7253 un->un_f_is_fibre = FALSE; 7254 #endif 7255 break; 7256 } 7257 7258 if (un->un_f_is_fibre == TRUE) { 7259 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7260 SCSI_VERSION_3) { 7261 switch (un->un_interconnect_type) { 7262 case SD_INTERCONNECT_FIBRE: 7263 case SD_INTERCONNECT_SSA: 7264 un->un_node_type = DDI_NT_BLOCK_WWN; 7265 break; 7266 default: 7267 break; 7268 } 7269 } 7270 } 7271 7272 /* 7273 * Initialize the Request Sense command for the target 7274 */ 7275 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7276 goto alloc_rqs_failed; 7277 } 7278 7279 /* 7280 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7281 * with separate binary for sd and ssd. 7282 * 7283 * x86 has 1 binary, un_retry_count is set base on connection type. 7284 * The hardcoded values will go away when Sparc uses 1 binary 7285 * for sd and ssd. This hardcoded values need to match 7286 * SD_RETRY_COUNT in sddef.h 7287 * The value used is base on interconnect type. 7288 * fibre = 3, parallel = 5 7289 */ 7290 #if defined(__i386) || defined(__amd64) 7291 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7292 #else 7293 un->un_retry_count = SD_RETRY_COUNT; 7294 #endif 7295 7296 /* 7297 * Set the per disk retry count to the default number of retries 7298 * for disks and CDROMs. This value can be overridden by the 7299 * disk property list or an entry in sd.conf. 7300 */ 7301 un->un_notready_retry_count = 7302 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7303 : DISK_NOT_READY_RETRY_COUNT(un); 7304 7305 /* 7306 * Set the busy retry count to the default value of un_retry_count. 7307 * This can be overridden by entries in sd.conf or the device 7308 * config table. 7309 */ 7310 un->un_busy_retry_count = un->un_retry_count; 7311 7312 /* 7313 * Init the reset threshold for retries. This number determines 7314 * how many retries must be performed before a reset can be issued 7315 * (for certain error conditions). This can be overridden by entries 7316 * in sd.conf or the device config table. 7317 */ 7318 un->un_reset_retry_count = (un->un_retry_count / 2); 7319 7320 /* 7321 * Set the victim_retry_count to the default un_retry_count 7322 */ 7323 un->un_victim_retry_count = (2 * un->un_retry_count); 7324 7325 /* 7326 * Set the reservation release timeout to the default value of 7327 * 5 seconds. This can be overridden by entries in ssd.conf or the 7328 * device config table. 7329 */ 7330 un->un_reserve_release_time = 5; 7331 7332 /* 7333 * Set up the default maximum transfer size. Note that this may 7334 * get updated later in the attach, when setting up default wide 7335 * operations for disks. 7336 */ 7337 #if defined(__i386) || defined(__amd64) 7338 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7339 un->un_partial_dma_supported = 1; 7340 #else 7341 un->un_max_xfer_size = (uint_t)maxphys; 7342 #endif 7343 7344 /* 7345 * Get "allow bus device reset" property (defaults to "enabled" if 7346 * the property was not defined). This is to disable bus resets for 7347 * certain kinds of error recovery. Note: In the future when a run-time 7348 * fibre check is available the soft state flag should default to 7349 * enabled. 7350 */ 7351 if (un->un_f_is_fibre == TRUE) { 7352 un->un_f_allow_bus_device_reset = TRUE; 7353 } else { 7354 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7355 "allow-bus-device-reset", 1) != 0) { 7356 un->un_f_allow_bus_device_reset = TRUE; 7357 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7358 "sd_unit_attach: un:0x%p Bus device reset " 7359 "enabled\n", un); 7360 } else { 7361 un->un_f_allow_bus_device_reset = FALSE; 7362 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7363 "sd_unit_attach: un:0x%p Bus device reset " 7364 "disabled\n", un); 7365 } 7366 } 7367 7368 /* 7369 * Check if this is an ATAPI device. ATAPI devices use Group 1 7370 * Read/Write commands and Group 2 Mode Sense/Select commands. 7371 * 7372 * Note: The "obsolete" way of doing this is to check for the "atapi" 7373 * property. The new "variant" property with a value of "atapi" has been 7374 * introduced so that future 'variants' of standard SCSI behavior (like 7375 * atapi) could be specified by the underlying HBA drivers by supplying 7376 * a new value for the "variant" property, instead of having to define a 7377 * new property. 7378 */ 7379 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7380 un->un_f_cfg_is_atapi = TRUE; 7381 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7382 "sd_unit_attach: un:0x%p Atapi device\n", un); 7383 } 7384 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7385 &variantp) == DDI_PROP_SUCCESS) { 7386 if (strcmp(variantp, "atapi") == 0) { 7387 un->un_f_cfg_is_atapi = TRUE; 7388 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7389 "sd_unit_attach: un:0x%p Atapi device\n", un); 7390 } 7391 ddi_prop_free(variantp); 7392 } 7393 7394 un->un_cmd_timeout = SD_IO_TIME; 7395 7396 un->un_busy_timeout = SD_BSY_TIMEOUT; 7397 7398 /* Info on current states, statuses, etc. (Updated frequently) */ 7399 un->un_state = SD_STATE_NORMAL; 7400 un->un_last_state = SD_STATE_NORMAL; 7401 7402 /* Control & status info for command throttling */ 7403 un->un_throttle = sd_max_throttle; 7404 un->un_saved_throttle = sd_max_throttle; 7405 un->un_min_throttle = sd_min_throttle; 7406 7407 if (un->un_f_is_fibre == TRUE) { 7408 un->un_f_use_adaptive_throttle = TRUE; 7409 } else { 7410 un->un_f_use_adaptive_throttle = FALSE; 7411 } 7412 7413 /* Removable media support. */ 7414 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7415 un->un_mediastate = DKIO_NONE; 7416 un->un_specified_mediastate = DKIO_NONE; 7417 7418 /* CVs for suspend/resume (PM or DR) */ 7419 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7420 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7421 7422 /* Power management support. */ 7423 un->un_power_level = SD_SPINDLE_UNINIT; 7424 7425 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7426 un->un_f_wcc_inprog = 0; 7427 7428 /* 7429 * The open/close semaphore is used to serialize threads executing 7430 * in the driver's open & close entry point routines for a given 7431 * instance. 7432 */ 7433 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7434 7435 /* 7436 * The conf file entry and softstate variable is a forceful override, 7437 * meaning a non-zero value must be entered to change the default. 7438 */ 7439 un->un_f_disksort_disabled = FALSE; 7440 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7441 7442 /* 7443 * Retrieve the properties from the static driver table or the driver 7444 * configuration file (.conf) for this unit and update the soft state 7445 * for the device as needed for the indicated properties. 7446 * Note: the property configuration needs to occur here as some of the 7447 * following routines may have dependencies on soft state flags set 7448 * as part of the driver property configuration. 7449 */ 7450 sd_read_unit_properties(un); 7451 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7452 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7453 7454 /* 7455 * Only if a device has "hotpluggable" property, it is 7456 * treated as hotpluggable device. Otherwise, it is 7457 * regarded as non-hotpluggable one. 7458 */ 7459 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7460 -1) != -1) { 7461 un->un_f_is_hotpluggable = TRUE; 7462 } 7463 7464 /* 7465 * set unit's attributes(flags) according to "hotpluggable" and 7466 * RMB bit in INQUIRY data. 7467 */ 7468 sd_set_unit_attributes(un, devi); 7469 7470 /* 7471 * By default, we mark the capacity, lbasize, and geometry 7472 * as invalid. Only if we successfully read a valid capacity 7473 * will we update the un_blockcount and un_tgt_blocksize with the 7474 * valid values (the geometry will be validated later). 7475 */ 7476 un->un_f_blockcount_is_valid = FALSE; 7477 un->un_f_tgt_blocksize_is_valid = FALSE; 7478 7479 /* 7480 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7481 * otherwise. 7482 */ 7483 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7484 un->un_blockcount = 0; 7485 7486 /* 7487 * Set up the per-instance info needed to determine the correct 7488 * CDBs and other info for issuing commands to the target. 7489 */ 7490 sd_init_cdb_limits(un); 7491 7492 /* 7493 * Set up the IO chains to use, based upon the target type. 7494 */ 7495 if (un->un_f_non_devbsize_supported) { 7496 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7497 } else { 7498 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7499 } 7500 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7501 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7502 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7503 7504 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7505 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7506 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7507 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7508 7509 7510 if (ISCD(un)) { 7511 un->un_additional_codes = sd_additional_codes; 7512 } else { 7513 un->un_additional_codes = NULL; 7514 } 7515 7516 /* 7517 * Create the kstats here so they can be available for attach-time 7518 * routines that send commands to the unit (either polled or via 7519 * sd_send_scsi_cmd). 7520 * 7521 * Note: This is a critical sequence that needs to be maintained: 7522 * 1) Instantiate the kstats here, before any routines using the 7523 * iopath (i.e. sd_send_scsi_cmd). 7524 * 2) Instantiate and initialize the partition stats 7525 * (sd_set_pstats). 7526 * 3) Initialize the error stats (sd_set_errstats), following 7527 * sd_validate_geometry(),sd_register_devid(), 7528 * and sd_cache_control(). 7529 */ 7530 7531 un->un_stats = kstat_create(sd_label, instance, 7532 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7533 if (un->un_stats != NULL) { 7534 un->un_stats->ks_lock = SD_MUTEX(un); 7535 kstat_install(un->un_stats); 7536 } 7537 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7538 "sd_unit_attach: un:0x%p un_stats created\n", un); 7539 7540 sd_create_errstats(un, instance); 7541 if (un->un_errstats == NULL) { 7542 goto create_errstats_failed; 7543 } 7544 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7545 "sd_unit_attach: un:0x%p errstats created\n", un); 7546 7547 /* 7548 * The following if/else code was relocated here from below as part 7549 * of the fix for bug (4430280). However with the default setup added 7550 * on entry to this routine, it's no longer absolutely necessary for 7551 * this to be before the call to sd_spin_up_unit. 7552 */ 7553 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7554 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7555 (devp->sd_inq->inq_ansi == 5)) && 7556 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7557 7558 /* 7559 * If tagged queueing is supported by the target 7560 * and by the host adapter then we will enable it 7561 */ 7562 un->un_tagflags = 0; 7563 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7564 (un->un_f_arq_enabled == TRUE)) { 7565 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7566 1, 1) == 1) { 7567 un->un_tagflags = FLAG_STAG; 7568 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7569 "sd_unit_attach: un:0x%p tag queueing " 7570 "enabled\n", un); 7571 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7572 "untagged-qing", 0) == 1) { 7573 un->un_f_opt_queueing = TRUE; 7574 un->un_saved_throttle = un->un_throttle = 7575 min(un->un_throttle, 3); 7576 } else { 7577 un->un_f_opt_queueing = FALSE; 7578 un->un_saved_throttle = un->un_throttle = 1; 7579 } 7580 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7581 == 1) && (un->un_f_arq_enabled == TRUE)) { 7582 /* The Host Adapter supports internal queueing. */ 7583 un->un_f_opt_queueing = TRUE; 7584 un->un_saved_throttle = un->un_throttle = 7585 min(un->un_throttle, 3); 7586 } else { 7587 un->un_f_opt_queueing = FALSE; 7588 un->un_saved_throttle = un->un_throttle = 1; 7589 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7590 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7591 } 7592 7593 /* 7594 * Enable large transfers for SATA/SAS drives 7595 */ 7596 if (SD_IS_SERIAL(un)) { 7597 un->un_max_xfer_size = 7598 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7599 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7600 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7601 "sd_unit_attach: un:0x%p max transfer " 7602 "size=0x%x\n", un, un->un_max_xfer_size); 7603 7604 } 7605 7606 /* Setup or tear down default wide operations for disks */ 7607 7608 /* 7609 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7610 * and "ssd_max_xfer_size" to exist simultaneously on the same 7611 * system and be set to different values. In the future this 7612 * code may need to be updated when the ssd module is 7613 * obsoleted and removed from the system. (4299588) 7614 */ 7615 if (SD_IS_PARALLEL_SCSI(un) && 7616 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7617 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7618 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7619 1, 1) == 1) { 7620 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7621 "sd_unit_attach: un:0x%p Wide Transfer " 7622 "enabled\n", un); 7623 } 7624 7625 /* 7626 * If tagged queuing has also been enabled, then 7627 * enable large xfers 7628 */ 7629 if (un->un_saved_throttle == sd_max_throttle) { 7630 un->un_max_xfer_size = 7631 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7632 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7633 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7634 "sd_unit_attach: un:0x%p max transfer " 7635 "size=0x%x\n", un, un->un_max_xfer_size); 7636 } 7637 } else { 7638 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7639 0, 1) == 1) { 7640 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7641 "sd_unit_attach: un:0x%p " 7642 "Wide Transfer disabled\n", un); 7643 } 7644 } 7645 } else { 7646 un->un_tagflags = FLAG_STAG; 7647 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7648 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7649 } 7650 7651 /* 7652 * If this target supports LUN reset, try to enable it. 7653 */ 7654 if (un->un_f_lun_reset_enabled) { 7655 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7656 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7657 "un:0x%p lun_reset capability set\n", un); 7658 } else { 7659 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7660 "un:0x%p lun-reset capability not set\n", un); 7661 } 7662 } 7663 7664 /* 7665 * Adjust the maximum transfer size. This is to fix 7666 * the problem of partial DMA support on SPARC. Some 7667 * HBA driver, like aac, has very small dma_attr_maxxfer 7668 * size, which requires partial DMA support on SPARC. 7669 * In the future the SPARC pci nexus driver may solve 7670 * the problem instead of this fix. 7671 */ 7672 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7673 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7674 /* We need DMA partial even on sparc to ensure sddump() works */ 7675 un->un_max_xfer_size = max_xfer_size; 7676 if (un->un_partial_dma_supported == 0) 7677 un->un_partial_dma_supported = 1; 7678 } 7679 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7680 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7681 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7682 un->un_max_xfer_size) == 1) { 7683 un->un_buf_breakup_supported = 1; 7684 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7685 "un:0x%p Buf breakup enabled\n", un); 7686 } 7687 } 7688 7689 /* 7690 * Set PKT_DMA_PARTIAL flag. 7691 */ 7692 if (un->un_partial_dma_supported == 1) { 7693 un->un_pkt_flags = PKT_DMA_PARTIAL; 7694 } else { 7695 un->un_pkt_flags = 0; 7696 } 7697 7698 /* Initialize sd_ssc_t for internal uscsi commands */ 7699 ssc = sd_ssc_init(un); 7700 scsi_fm_init(devp); 7701 7702 /* 7703 * Allocate memory for SCSI FMA stuffs. 7704 */ 7705 un->un_fm_private = 7706 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7707 sfip = (struct sd_fm_internal *)un->un_fm_private; 7708 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7709 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7710 sfip->fm_ssc.ssc_un = un; 7711 7712 if (ISCD(un) || 7713 un->un_f_has_removable_media || 7714 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7715 /* 7716 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7717 * Their log are unchanged. 7718 */ 7719 sfip->fm_log_level = SD_FM_LOG_NSUP; 7720 } else { 7721 /* 7722 * If enter here, it should be non-CDROM and FM-capable 7723 * device, and it will not keep the old scsi_log as before 7724 * in /var/adm/messages. However, the property 7725 * "fm-scsi-log" will control whether the FM telemetry will 7726 * be logged in /var/adm/messages. 7727 */ 7728 int fm_scsi_log; 7729 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7730 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7731 7732 if (fm_scsi_log) 7733 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7734 else 7735 sfip->fm_log_level = SD_FM_LOG_SILENT; 7736 } 7737 7738 /* 7739 * At this point in the attach, we have enough info in the 7740 * soft state to be able to issue commands to the target. 7741 * 7742 * All command paths used below MUST issue their commands as 7743 * SD_PATH_DIRECT. This is important as intermediate layers 7744 * are not all initialized yet (such as PM). 7745 */ 7746 7747 /* 7748 * Send a TEST UNIT READY command to the device. This should clear 7749 * any outstanding UNIT ATTENTION that may be present. 7750 * 7751 * Note: Don't check for success, just track if there is a reservation, 7752 * this is a throw away command to clear any unit attentions. 7753 * 7754 * Note: This MUST be the first command issued to the target during 7755 * attach to ensure power on UNIT ATTENTIONS are cleared. 7756 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7757 * with attempts at spinning up a device with no media. 7758 */ 7759 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7760 if (status != 0) { 7761 if (status == EACCES) 7762 reservation_flag = SD_TARGET_IS_RESERVED; 7763 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7764 } 7765 7766 /* 7767 * If the device is NOT a removable media device, attempt to spin 7768 * it up (using the START_STOP_UNIT command) and read its capacity 7769 * (using the READ CAPACITY command). Note, however, that either 7770 * of these could fail and in some cases we would continue with 7771 * the attach despite the failure (see below). 7772 */ 7773 if (un->un_f_descr_format_supported) { 7774 7775 switch (sd_spin_up_unit(ssc)) { 7776 case 0: 7777 /* 7778 * Spin-up was successful; now try to read the 7779 * capacity. If successful then save the results 7780 * and mark the capacity & lbasize as valid. 7781 */ 7782 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7783 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7784 7785 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7786 &lbasize, SD_PATH_DIRECT); 7787 7788 switch (status) { 7789 case 0: { 7790 if (capacity > DK_MAX_BLOCKS) { 7791 #ifdef _LP64 7792 if ((capacity + 1) > 7793 SD_GROUP1_MAX_ADDRESS) { 7794 /* 7795 * Enable descriptor format 7796 * sense data so that we can 7797 * get 64 bit sense data 7798 * fields. 7799 */ 7800 sd_enable_descr_sense(ssc); 7801 } 7802 #else 7803 /* 32-bit kernels can't handle this */ 7804 scsi_log(SD_DEVINFO(un), 7805 sd_label, CE_WARN, 7806 "disk has %llu blocks, which " 7807 "is too large for a 32-bit " 7808 "kernel", capacity); 7809 7810 #if defined(__i386) || defined(__amd64) 7811 /* 7812 * 1TB disk was treated as (1T - 512)B 7813 * in the past, so that it might have 7814 * valid VTOC and solaris partitions, 7815 * we have to allow it to continue to 7816 * work. 7817 */ 7818 if (capacity -1 > DK_MAX_BLOCKS) 7819 #endif 7820 goto spinup_failed; 7821 #endif 7822 } 7823 7824 /* 7825 * Here it's not necessary to check the case: 7826 * the capacity of the device is bigger than 7827 * what the max hba cdb can support. Because 7828 * sd_send_scsi_READ_CAPACITY will retrieve 7829 * the capacity by sending USCSI command, which 7830 * is constrained by the max hba cdb. Actually, 7831 * sd_send_scsi_READ_CAPACITY will return 7832 * EINVAL when using bigger cdb than required 7833 * cdb length. Will handle this case in 7834 * "case EINVAL". 7835 */ 7836 7837 /* 7838 * The following relies on 7839 * sd_send_scsi_READ_CAPACITY never 7840 * returning 0 for capacity and/or lbasize. 7841 */ 7842 sd_update_block_info(un, lbasize, capacity); 7843 7844 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7845 "sd_unit_attach: un:0x%p capacity = %ld " 7846 "blocks; lbasize= %ld.\n", un, 7847 un->un_blockcount, un->un_tgt_blocksize); 7848 7849 break; 7850 } 7851 case EINVAL: 7852 /* 7853 * In the case where the max-cdb-length property 7854 * is smaller than the required CDB length for 7855 * a SCSI device, a target driver can fail to 7856 * attach to that device. 7857 */ 7858 scsi_log(SD_DEVINFO(un), 7859 sd_label, CE_WARN, 7860 "disk capacity is too large " 7861 "for current cdb length"); 7862 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7863 7864 goto spinup_failed; 7865 case EACCES: 7866 /* 7867 * Should never get here if the spin-up 7868 * succeeded, but code it in anyway. 7869 * From here, just continue with the attach... 7870 */ 7871 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7872 "sd_unit_attach: un:0x%p " 7873 "sd_send_scsi_READ_CAPACITY " 7874 "returned reservation conflict\n", un); 7875 reservation_flag = SD_TARGET_IS_RESERVED; 7876 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7877 break; 7878 default: 7879 /* 7880 * Likewise, should never get here if the 7881 * spin-up succeeded. Just continue with 7882 * the attach... 7883 */ 7884 if (status == EIO) 7885 sd_ssc_assessment(ssc, 7886 SD_FMT_STATUS_CHECK); 7887 else 7888 sd_ssc_assessment(ssc, 7889 SD_FMT_IGNORE); 7890 break; 7891 } 7892 break; 7893 case EACCES: 7894 /* 7895 * Device is reserved by another host. In this case 7896 * we could not spin it up or read the capacity, but 7897 * we continue with the attach anyway. 7898 */ 7899 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7900 "sd_unit_attach: un:0x%p spin-up reservation " 7901 "conflict.\n", un); 7902 reservation_flag = SD_TARGET_IS_RESERVED; 7903 break; 7904 default: 7905 /* Fail the attach if the spin-up failed. */ 7906 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7907 "sd_unit_attach: un:0x%p spin-up failed.", un); 7908 goto spinup_failed; 7909 } 7910 7911 } 7912 7913 /* 7914 * Check to see if this is a MMC drive 7915 */ 7916 if (ISCD(un)) { 7917 sd_set_mmc_caps(ssc); 7918 } 7919 7920 7921 /* 7922 * Add a zero-length attribute to tell the world we support 7923 * kernel ioctls (for layered drivers) 7924 */ 7925 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7926 DDI_KERNEL_IOCTL, NULL, 0); 7927 7928 /* 7929 * Add a boolean property to tell the world we support 7930 * the B_FAILFAST flag (for layered drivers) 7931 */ 7932 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7933 "ddi-failfast-supported", NULL, 0); 7934 7935 /* 7936 * Initialize power management 7937 */ 7938 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7939 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7940 sd_setup_pm(ssc, devi); 7941 if (un->un_f_pm_is_enabled == FALSE) { 7942 /* 7943 * For performance, point to a jump table that does 7944 * not include pm. 7945 * The direct and priority chains don't change with PM. 7946 * 7947 * Note: this is currently done based on individual device 7948 * capabilities. When an interface for determining system 7949 * power enabled state becomes available, or when additional 7950 * layers are added to the command chain, these values will 7951 * have to be re-evaluated for correctness. 7952 */ 7953 if (un->un_f_non_devbsize_supported) { 7954 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7955 } else { 7956 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7957 } 7958 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7959 } 7960 7961 /* 7962 * This property is set to 0 by HA software to avoid retries 7963 * on a reserved disk. (The preferred property name is 7964 * "retry-on-reservation-conflict") (1189689) 7965 * 7966 * Note: The use of a global here can have unintended consequences. A 7967 * per instance variable is preferable to match the capabilities of 7968 * different underlying hba's (4402600) 7969 */ 7970 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7971 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7972 sd_retry_on_reservation_conflict); 7973 if (sd_retry_on_reservation_conflict != 0) { 7974 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7975 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7976 sd_retry_on_reservation_conflict); 7977 } 7978 7979 /* Set up options for QFULL handling. */ 7980 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7981 "qfull-retries", -1)) != -1) { 7982 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7983 rval, 1); 7984 } 7985 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7986 "qfull-retry-interval", -1)) != -1) { 7987 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7988 rval, 1); 7989 } 7990 7991 /* 7992 * This just prints a message that announces the existence of the 7993 * device. The message is always printed in the system logfile, but 7994 * only appears on the console if the system is booted with the 7995 * -v (verbose) argument. 7996 */ 7997 ddi_report_dev(devi); 7998 7999 un->un_mediastate = DKIO_NONE; 8000 8001 cmlb_alloc_handle(&un->un_cmlbhandle); 8002 8003 #if defined(__i386) || defined(__amd64) 8004 /* 8005 * On x86, compensate for off-by-1 legacy error 8006 */ 8007 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8008 (lbasize == un->un_sys_blocksize)) 8009 offbyone = CMLB_OFF_BY_ONE; 8010 #endif 8011 8012 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8013 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8014 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8015 un->un_node_type, offbyone, un->un_cmlbhandle, 8016 (void *)SD_PATH_DIRECT) != 0) { 8017 goto cmlb_attach_failed; 8018 } 8019 8020 8021 /* 8022 * Read and validate the device's geometry (ie, disk label) 8023 * A new unformatted drive will not have a valid geometry, but 8024 * the driver needs to successfully attach to this device so 8025 * the drive can be formatted via ioctls. 8026 */ 8027 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8028 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8029 8030 mutex_enter(SD_MUTEX(un)); 8031 8032 /* 8033 * Read and initialize the devid for the unit. 8034 */ 8035 if (un->un_f_devid_supported) { 8036 sd_register_devid(ssc, devi, reservation_flag); 8037 } 8038 mutex_exit(SD_MUTEX(un)); 8039 8040 #if (defined(__fibre)) 8041 /* 8042 * Register callbacks for fibre only. You can't do this solely 8043 * on the basis of the devid_type because this is hba specific. 8044 * We need to query our hba capabilities to find out whether to 8045 * register or not. 8046 */ 8047 if (un->un_f_is_fibre) { 8048 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8049 sd_init_event_callbacks(un); 8050 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8051 "sd_unit_attach: un:0x%p event callbacks inserted", 8052 un); 8053 } 8054 } 8055 #endif 8056 8057 if (un->un_f_opt_disable_cache == TRUE) { 8058 /* 8059 * Disable both read cache and write cache. This is 8060 * the historic behavior of the keywords in the config file. 8061 */ 8062 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8063 0) { 8064 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8065 "sd_unit_attach: un:0x%p Could not disable " 8066 "caching", un); 8067 goto devid_failed; 8068 } 8069 } 8070 8071 /* 8072 * Check the value of the WCE bit now and 8073 * set un_f_write_cache_enabled accordingly. 8074 */ 8075 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8076 mutex_enter(SD_MUTEX(un)); 8077 un->un_f_write_cache_enabled = (wc_enabled != 0); 8078 mutex_exit(SD_MUTEX(un)); 8079 8080 if (un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8081 un->un_tgt_blocksize != DEV_BSIZE) { 8082 if (!(un->un_wm_cache)) { 8083 (void) snprintf(name_str, sizeof (name_str), 8084 "%s%d_cache", 8085 ddi_driver_name(SD_DEVINFO(un)), 8086 ddi_get_instance(SD_DEVINFO(un))); 8087 un->un_wm_cache = kmem_cache_create( 8088 name_str, sizeof (struct sd_w_map), 8089 8, sd_wm_cache_constructor, 8090 sd_wm_cache_destructor, NULL, 8091 (void *)un, NULL, 0); 8092 if (!(un->un_wm_cache)) { 8093 goto wm_cache_failed; 8094 } 8095 } 8096 } 8097 8098 /* 8099 * Check the value of the NV_SUP bit and set 8100 * un_f_suppress_cache_flush accordingly. 8101 */ 8102 sd_get_nv_sup(ssc); 8103 8104 /* 8105 * Find out what type of reservation this disk supports. 8106 */ 8107 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8108 8109 switch (status) { 8110 case 0: 8111 /* 8112 * SCSI-3 reservations are supported. 8113 */ 8114 un->un_reservation_type = SD_SCSI3_RESERVATION; 8115 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8116 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8117 break; 8118 case ENOTSUP: 8119 /* 8120 * The PERSISTENT RESERVE IN command would not be recognized by 8121 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8122 */ 8123 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8124 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8125 un->un_reservation_type = SD_SCSI2_RESERVATION; 8126 8127 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8128 break; 8129 default: 8130 /* 8131 * default to SCSI-3 reservations 8132 */ 8133 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8134 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8135 un->un_reservation_type = SD_SCSI3_RESERVATION; 8136 8137 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8138 break; 8139 } 8140 8141 /* 8142 * Set the pstat and error stat values here, so data obtained during the 8143 * previous attach-time routines is available. 8144 * 8145 * Note: This is a critical sequence that needs to be maintained: 8146 * 1) Instantiate the kstats before any routines using the iopath 8147 * (i.e. sd_send_scsi_cmd). 8148 * 2) Initialize the error stats (sd_set_errstats) and partition 8149 * stats (sd_set_pstats)here, following 8150 * cmlb_validate_geometry(), sd_register_devid(), and 8151 * sd_cache_control(). 8152 */ 8153 8154 if (un->un_f_pkstats_enabled && geom_label_valid) { 8155 sd_set_pstats(un); 8156 SD_TRACE(SD_LOG_IO_PARTITION, un, 8157 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8158 } 8159 8160 sd_set_errstats(un); 8161 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8162 "sd_unit_attach: un:0x%p errstats set\n", un); 8163 8164 8165 /* 8166 * After successfully attaching an instance, we record the information 8167 * of how many luns have been attached on the relative target and 8168 * controller for parallel SCSI. This information is used when sd tries 8169 * to set the tagged queuing capability in HBA. 8170 */ 8171 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8172 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8173 } 8174 8175 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8176 "sd_unit_attach: un:0x%p exit success\n", un); 8177 8178 /* Uninitialize sd_ssc_t pointer */ 8179 sd_ssc_fini(ssc); 8180 8181 return (DDI_SUCCESS); 8182 8183 /* 8184 * An error occurred during the attach; clean up & return failure. 8185 */ 8186 wm_cache_failed: 8187 devid_failed: 8188 8189 setup_pm_failed: 8190 ddi_remove_minor_node(devi, NULL); 8191 8192 cmlb_attach_failed: 8193 /* 8194 * Cleanup from the scsi_ifsetcap() calls (437868) 8195 */ 8196 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8197 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8198 8199 /* 8200 * Refer to the comments of setting tagged-qing in the beginning of 8201 * sd_unit_attach. We can only disable tagged queuing when there is 8202 * no lun attached on the target. 8203 */ 8204 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8205 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8206 } 8207 8208 if (un->un_f_is_fibre == FALSE) { 8209 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8210 } 8211 8212 spinup_failed: 8213 8214 /* Uninitialize sd_ssc_t pointer */ 8215 sd_ssc_fini(ssc); 8216 8217 mutex_enter(SD_MUTEX(un)); 8218 8219 /* Deallocate SCSI FMA memory spaces */ 8220 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8221 8222 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8223 if (un->un_direct_priority_timeid != NULL) { 8224 timeout_id_t temp_id = un->un_direct_priority_timeid; 8225 un->un_direct_priority_timeid = NULL; 8226 mutex_exit(SD_MUTEX(un)); 8227 (void) untimeout(temp_id); 8228 mutex_enter(SD_MUTEX(un)); 8229 } 8230 8231 /* Cancel any pending start/stop timeouts */ 8232 if (un->un_startstop_timeid != NULL) { 8233 timeout_id_t temp_id = un->un_startstop_timeid; 8234 un->un_startstop_timeid = NULL; 8235 mutex_exit(SD_MUTEX(un)); 8236 (void) untimeout(temp_id); 8237 mutex_enter(SD_MUTEX(un)); 8238 } 8239 8240 /* Cancel any pending reset-throttle timeouts */ 8241 if (un->un_reset_throttle_timeid != NULL) { 8242 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8243 un->un_reset_throttle_timeid = NULL; 8244 mutex_exit(SD_MUTEX(un)); 8245 (void) untimeout(temp_id); 8246 mutex_enter(SD_MUTEX(un)); 8247 } 8248 8249 /* Cancel rmw warning message timeouts */ 8250 if (un->un_rmw_msg_timeid != NULL) { 8251 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8252 un->un_rmw_msg_timeid = NULL; 8253 mutex_exit(SD_MUTEX(un)); 8254 (void) untimeout(temp_id); 8255 mutex_enter(SD_MUTEX(un)); 8256 } 8257 8258 /* Cancel any pending retry timeouts */ 8259 if (un->un_retry_timeid != NULL) { 8260 timeout_id_t temp_id = un->un_retry_timeid; 8261 un->un_retry_timeid = NULL; 8262 mutex_exit(SD_MUTEX(un)); 8263 (void) untimeout(temp_id); 8264 mutex_enter(SD_MUTEX(un)); 8265 } 8266 8267 /* Cancel any pending delayed cv broadcast timeouts */ 8268 if (un->un_dcvb_timeid != NULL) { 8269 timeout_id_t temp_id = un->un_dcvb_timeid; 8270 un->un_dcvb_timeid = NULL; 8271 mutex_exit(SD_MUTEX(un)); 8272 (void) untimeout(temp_id); 8273 mutex_enter(SD_MUTEX(un)); 8274 } 8275 8276 mutex_exit(SD_MUTEX(un)); 8277 8278 /* There should not be any in-progress I/O so ASSERT this check */ 8279 ASSERT(un->un_ncmds_in_transport == 0); 8280 ASSERT(un->un_ncmds_in_driver == 0); 8281 8282 /* Do not free the softstate if the callback routine is active */ 8283 sd_sync_with_callback(un); 8284 8285 /* 8286 * Partition stats apparently are not used with removables. These would 8287 * not have been created during attach, so no need to clean them up... 8288 */ 8289 if (un->un_errstats != NULL) { 8290 kstat_delete(un->un_errstats); 8291 un->un_errstats = NULL; 8292 } 8293 8294 create_errstats_failed: 8295 8296 if (un->un_stats != NULL) { 8297 kstat_delete(un->un_stats); 8298 un->un_stats = NULL; 8299 } 8300 8301 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8302 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8303 8304 ddi_prop_remove_all(devi); 8305 sema_destroy(&un->un_semoclose); 8306 cv_destroy(&un->un_state_cv); 8307 8308 getrbuf_failed: 8309 8310 sd_free_rqs(un); 8311 8312 alloc_rqs_failed: 8313 8314 devp->sd_private = NULL; 8315 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8316 8317 get_softstate_failed: 8318 /* 8319 * Note: the man pages are unclear as to whether or not doing a 8320 * ddi_soft_state_free(sd_state, instance) is the right way to 8321 * clean up after the ddi_soft_state_zalloc() if the subsequent 8322 * ddi_get_soft_state() fails. The implication seems to be 8323 * that the get_soft_state cannot fail if the zalloc succeeds. 8324 */ 8325 #ifndef XPV_HVM_DRIVER 8326 ddi_soft_state_free(sd_state, instance); 8327 #endif /* !XPV_HVM_DRIVER */ 8328 8329 probe_failed: 8330 scsi_unprobe(devp); 8331 8332 return (DDI_FAILURE); 8333 } 8334 8335 8336 /* 8337 * Function: sd_unit_detach 8338 * 8339 * Description: Performs DDI_DETACH processing for sddetach(). 8340 * 8341 * Return Code: DDI_SUCCESS 8342 * DDI_FAILURE 8343 * 8344 * Context: Kernel thread context 8345 */ 8346 8347 static int 8348 sd_unit_detach(dev_info_t *devi) 8349 { 8350 struct scsi_device *devp; 8351 struct sd_lun *un; 8352 int i; 8353 int tgt; 8354 dev_t dev; 8355 dev_info_t *pdip = ddi_get_parent(devi); 8356 #ifndef XPV_HVM_DRIVER 8357 int instance = ddi_get_instance(devi); 8358 #endif /* !XPV_HVM_DRIVER */ 8359 8360 mutex_enter(&sd_detach_mutex); 8361 8362 /* 8363 * Fail the detach for any of the following: 8364 * - Unable to get the sd_lun struct for the instance 8365 * - A layered driver has an outstanding open on the instance 8366 * - Another thread is already detaching this instance 8367 * - Another thread is currently performing an open 8368 */ 8369 devp = ddi_get_driver_private(devi); 8370 if ((devp == NULL) || 8371 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8372 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8373 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8374 mutex_exit(&sd_detach_mutex); 8375 return (DDI_FAILURE); 8376 } 8377 8378 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8379 8380 /* 8381 * Mark this instance as currently in a detach, to inhibit any 8382 * opens from a layered driver. 8383 */ 8384 un->un_detach_count++; 8385 mutex_exit(&sd_detach_mutex); 8386 8387 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8388 SCSI_ADDR_PROP_TARGET, -1); 8389 8390 dev = sd_make_device(SD_DEVINFO(un)); 8391 8392 #ifndef lint 8393 _NOTE(COMPETING_THREADS_NOW); 8394 #endif 8395 8396 mutex_enter(SD_MUTEX(un)); 8397 8398 /* 8399 * Fail the detach if there are any outstanding layered 8400 * opens on this device. 8401 */ 8402 for (i = 0; i < NDKMAP; i++) { 8403 if (un->un_ocmap.lyropen[i] != 0) { 8404 goto err_notclosed; 8405 } 8406 } 8407 8408 /* 8409 * Verify there are NO outstanding commands issued to this device. 8410 * ie, un_ncmds_in_transport == 0. 8411 * It's possible to have outstanding commands through the physio 8412 * code path, even though everything's closed. 8413 */ 8414 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8415 (un->un_direct_priority_timeid != NULL) || 8416 (un->un_state == SD_STATE_RWAIT)) { 8417 mutex_exit(SD_MUTEX(un)); 8418 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8419 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8420 goto err_stillbusy; 8421 } 8422 8423 /* 8424 * If we have the device reserved, release the reservation. 8425 */ 8426 if ((un->un_resvd_status & SD_RESERVE) && 8427 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8428 mutex_exit(SD_MUTEX(un)); 8429 /* 8430 * Note: sd_reserve_release sends a command to the device 8431 * via the sd_ioctlcmd() path, and can sleep. 8432 */ 8433 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8434 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8435 "sd_dr_detach: Cannot release reservation \n"); 8436 } 8437 } else { 8438 mutex_exit(SD_MUTEX(un)); 8439 } 8440 8441 /* 8442 * Untimeout any reserve recover, throttle reset, restart unit 8443 * and delayed broadcast timeout threads. Protect the timeout pointer 8444 * from getting nulled by their callback functions. 8445 */ 8446 mutex_enter(SD_MUTEX(un)); 8447 if (un->un_resvd_timeid != NULL) { 8448 timeout_id_t temp_id = un->un_resvd_timeid; 8449 un->un_resvd_timeid = NULL; 8450 mutex_exit(SD_MUTEX(un)); 8451 (void) untimeout(temp_id); 8452 mutex_enter(SD_MUTEX(un)); 8453 } 8454 8455 if (un->un_reset_throttle_timeid != NULL) { 8456 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8457 un->un_reset_throttle_timeid = NULL; 8458 mutex_exit(SD_MUTEX(un)); 8459 (void) untimeout(temp_id); 8460 mutex_enter(SD_MUTEX(un)); 8461 } 8462 8463 if (un->un_startstop_timeid != NULL) { 8464 timeout_id_t temp_id = un->un_startstop_timeid; 8465 un->un_startstop_timeid = NULL; 8466 mutex_exit(SD_MUTEX(un)); 8467 (void) untimeout(temp_id); 8468 mutex_enter(SD_MUTEX(un)); 8469 } 8470 8471 if (un->un_rmw_msg_timeid != NULL) { 8472 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8473 un->un_rmw_msg_timeid = NULL; 8474 mutex_exit(SD_MUTEX(un)); 8475 (void) untimeout(temp_id); 8476 mutex_enter(SD_MUTEX(un)); 8477 } 8478 8479 if (un->un_dcvb_timeid != NULL) { 8480 timeout_id_t temp_id = un->un_dcvb_timeid; 8481 un->un_dcvb_timeid = NULL; 8482 mutex_exit(SD_MUTEX(un)); 8483 (void) untimeout(temp_id); 8484 } else { 8485 mutex_exit(SD_MUTEX(un)); 8486 } 8487 8488 /* Remove any pending reservation reclaim requests for this device */ 8489 sd_rmv_resv_reclaim_req(dev); 8490 8491 mutex_enter(SD_MUTEX(un)); 8492 8493 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8494 if (un->un_direct_priority_timeid != NULL) { 8495 timeout_id_t temp_id = un->un_direct_priority_timeid; 8496 un->un_direct_priority_timeid = NULL; 8497 mutex_exit(SD_MUTEX(un)); 8498 (void) untimeout(temp_id); 8499 mutex_enter(SD_MUTEX(un)); 8500 } 8501 8502 /* Cancel any active multi-host disk watch thread requests */ 8503 if (un->un_mhd_token != NULL) { 8504 mutex_exit(SD_MUTEX(un)); 8505 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8506 if (scsi_watch_request_terminate(un->un_mhd_token, 8507 SCSI_WATCH_TERMINATE_NOWAIT)) { 8508 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8509 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8510 /* 8511 * Note: We are returning here after having removed 8512 * some driver timeouts above. This is consistent with 8513 * the legacy implementation but perhaps the watch 8514 * terminate call should be made with the wait flag set. 8515 */ 8516 goto err_stillbusy; 8517 } 8518 mutex_enter(SD_MUTEX(un)); 8519 un->un_mhd_token = NULL; 8520 } 8521 8522 if (un->un_swr_token != NULL) { 8523 mutex_exit(SD_MUTEX(un)); 8524 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8525 if (scsi_watch_request_terminate(un->un_swr_token, 8526 SCSI_WATCH_TERMINATE_NOWAIT)) { 8527 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8528 "sd_dr_detach: Cannot cancel swr watch request\n"); 8529 /* 8530 * Note: We are returning here after having removed 8531 * some driver timeouts above. This is consistent with 8532 * the legacy implementation but perhaps the watch 8533 * terminate call should be made with the wait flag set. 8534 */ 8535 goto err_stillbusy; 8536 } 8537 mutex_enter(SD_MUTEX(un)); 8538 un->un_swr_token = NULL; 8539 } 8540 8541 mutex_exit(SD_MUTEX(un)); 8542 8543 /* 8544 * Clear any scsi_reset_notifies. We clear the reset notifies 8545 * if we have not registered one. 8546 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8547 */ 8548 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8549 sd_mhd_reset_notify_cb, (caddr_t)un); 8550 8551 /* 8552 * protect the timeout pointers from getting nulled by 8553 * their callback functions during the cancellation process. 8554 * In such a scenario untimeout can be invoked with a null value. 8555 */ 8556 _NOTE(NO_COMPETING_THREADS_NOW); 8557 8558 mutex_enter(&un->un_pm_mutex); 8559 if (un->un_pm_idle_timeid != NULL) { 8560 timeout_id_t temp_id = un->un_pm_idle_timeid; 8561 un->un_pm_idle_timeid = NULL; 8562 mutex_exit(&un->un_pm_mutex); 8563 8564 /* 8565 * Timeout is active; cancel it. 8566 * Note that it'll never be active on a device 8567 * that does not support PM therefore we don't 8568 * have to check before calling pm_idle_component. 8569 */ 8570 (void) untimeout(temp_id); 8571 (void) pm_idle_component(SD_DEVINFO(un), 0); 8572 mutex_enter(&un->un_pm_mutex); 8573 } 8574 8575 /* 8576 * Check whether there is already a timeout scheduled for power 8577 * management. If yes then don't lower the power here, that's. 8578 * the timeout handler's job. 8579 */ 8580 if (un->un_pm_timeid != NULL) { 8581 timeout_id_t temp_id = un->un_pm_timeid; 8582 un->un_pm_timeid = NULL; 8583 mutex_exit(&un->un_pm_mutex); 8584 /* 8585 * Timeout is active; cancel it. 8586 * Note that it'll never be active on a device 8587 * that does not support PM therefore we don't 8588 * have to check before calling pm_idle_component. 8589 */ 8590 (void) untimeout(temp_id); 8591 (void) pm_idle_component(SD_DEVINFO(un), 0); 8592 8593 } else { 8594 mutex_exit(&un->un_pm_mutex); 8595 if ((un->un_f_pm_is_enabled == TRUE) && 8596 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8597 DDI_SUCCESS)) { 8598 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8599 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8600 /* 8601 * Fix for bug: 4297749, item # 13 8602 * The above test now includes a check to see if PM is 8603 * supported by this device before call 8604 * pm_lower_power(). 8605 * Note, the following is not dead code. The call to 8606 * pm_lower_power above will generate a call back into 8607 * our sdpower routine which might result in a timeout 8608 * handler getting activated. Therefore the following 8609 * code is valid and necessary. 8610 */ 8611 mutex_enter(&un->un_pm_mutex); 8612 if (un->un_pm_timeid != NULL) { 8613 timeout_id_t temp_id = un->un_pm_timeid; 8614 un->un_pm_timeid = NULL; 8615 mutex_exit(&un->un_pm_mutex); 8616 (void) untimeout(temp_id); 8617 (void) pm_idle_component(SD_DEVINFO(un), 0); 8618 } else { 8619 mutex_exit(&un->un_pm_mutex); 8620 } 8621 } 8622 } 8623 8624 /* 8625 * Cleanup from the scsi_ifsetcap() calls (437868) 8626 * Relocated here from above to be after the call to 8627 * pm_lower_power, which was getting errors. 8628 */ 8629 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8630 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8631 8632 /* 8633 * Currently, tagged queuing is supported per target based by HBA. 8634 * Setting this per lun instance actually sets the capability of this 8635 * target in HBA, which affects those luns already attached on the 8636 * same target. So during detach, we can only disable this capability 8637 * only when this is the only lun left on this target. By doing 8638 * this, we assume a target has the same tagged queuing capability 8639 * for every lun. The condition can be removed when HBA is changed to 8640 * support per lun based tagged queuing capability. 8641 */ 8642 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8643 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8644 } 8645 8646 if (un->un_f_is_fibre == FALSE) { 8647 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8648 } 8649 8650 /* 8651 * Remove any event callbacks, fibre only 8652 */ 8653 if (un->un_f_is_fibre == TRUE) { 8654 if ((un->un_insert_event != NULL) && 8655 (ddi_remove_event_handler(un->un_insert_cb_id) != 8656 DDI_SUCCESS)) { 8657 /* 8658 * Note: We are returning here after having done 8659 * substantial cleanup above. This is consistent 8660 * with the legacy implementation but this may not 8661 * be the right thing to do. 8662 */ 8663 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8664 "sd_dr_detach: Cannot cancel insert event\n"); 8665 goto err_remove_event; 8666 } 8667 un->un_insert_event = NULL; 8668 8669 if ((un->un_remove_event != NULL) && 8670 (ddi_remove_event_handler(un->un_remove_cb_id) != 8671 DDI_SUCCESS)) { 8672 /* 8673 * Note: We are returning here after having done 8674 * substantial cleanup above. This is consistent 8675 * with the legacy implementation but this may not 8676 * be the right thing to do. 8677 */ 8678 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8679 "sd_dr_detach: Cannot cancel remove event\n"); 8680 goto err_remove_event; 8681 } 8682 un->un_remove_event = NULL; 8683 } 8684 8685 /* Do not free the softstate if the callback routine is active */ 8686 sd_sync_with_callback(un); 8687 8688 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8689 cmlb_free_handle(&un->un_cmlbhandle); 8690 8691 /* 8692 * Hold the detach mutex here, to make sure that no other threads ever 8693 * can access a (partially) freed soft state structure. 8694 */ 8695 mutex_enter(&sd_detach_mutex); 8696 8697 /* 8698 * Clean up the soft state struct. 8699 * Cleanup is done in reverse order of allocs/inits. 8700 * At this point there should be no competing threads anymore. 8701 */ 8702 8703 scsi_fm_fini(devp); 8704 8705 /* 8706 * Deallocate memory for SCSI FMA. 8707 */ 8708 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8709 8710 /* 8711 * Unregister and free device id if it was not registered 8712 * by the transport. 8713 */ 8714 if (un->un_f_devid_transport_defined == FALSE) 8715 ddi_devid_unregister(devi); 8716 8717 /* 8718 * free the devid structure if allocated before (by ddi_devid_init() 8719 * or ddi_devid_get()). 8720 */ 8721 if (un->un_devid) { 8722 ddi_devid_free(un->un_devid); 8723 un->un_devid = NULL; 8724 } 8725 8726 /* 8727 * Destroy wmap cache if it exists. 8728 */ 8729 if (un->un_wm_cache != NULL) { 8730 kmem_cache_destroy(un->un_wm_cache); 8731 un->un_wm_cache = NULL; 8732 } 8733 8734 /* 8735 * kstat cleanup is done in detach for all device types (4363169). 8736 * We do not want to fail detach if the device kstats are not deleted 8737 * since there is a confusion about the devo_refcnt for the device. 8738 * We just delete the kstats and let detach complete successfully. 8739 */ 8740 if (un->un_stats != NULL) { 8741 kstat_delete(un->un_stats); 8742 un->un_stats = NULL; 8743 } 8744 if (un->un_errstats != NULL) { 8745 kstat_delete(un->un_errstats); 8746 un->un_errstats = NULL; 8747 } 8748 8749 /* Remove partition stats */ 8750 if (un->un_f_pkstats_enabled) { 8751 for (i = 0; i < NSDMAP; i++) { 8752 if (un->un_pstats[i] != NULL) { 8753 kstat_delete(un->un_pstats[i]); 8754 un->un_pstats[i] = NULL; 8755 } 8756 } 8757 } 8758 8759 /* Remove xbuf registration */ 8760 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8761 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8762 8763 /* Remove driver properties */ 8764 ddi_prop_remove_all(devi); 8765 8766 mutex_destroy(&un->un_pm_mutex); 8767 cv_destroy(&un->un_pm_busy_cv); 8768 8769 cv_destroy(&un->un_wcc_cv); 8770 8771 /* Open/close semaphore */ 8772 sema_destroy(&un->un_semoclose); 8773 8774 /* Removable media condvar. */ 8775 cv_destroy(&un->un_state_cv); 8776 8777 /* Suspend/resume condvar. */ 8778 cv_destroy(&un->un_suspend_cv); 8779 cv_destroy(&un->un_disk_busy_cv); 8780 8781 sd_free_rqs(un); 8782 8783 /* Free up soft state */ 8784 devp->sd_private = NULL; 8785 8786 bzero(un, sizeof (struct sd_lun)); 8787 #ifndef XPV_HVM_DRIVER 8788 ddi_soft_state_free(sd_state, instance); 8789 #endif /* !XPV_HVM_DRIVER */ 8790 8791 mutex_exit(&sd_detach_mutex); 8792 8793 /* This frees up the INQUIRY data associated with the device. */ 8794 scsi_unprobe(devp); 8795 8796 /* 8797 * After successfully detaching an instance, we update the information 8798 * of how many luns have been attached in the relative target and 8799 * controller for parallel SCSI. This information is used when sd tries 8800 * to set the tagged queuing capability in HBA. 8801 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8802 * check if the device is parallel SCSI. However, we don't need to 8803 * check here because we've already checked during attach. No device 8804 * that is not parallel SCSI is in the chain. 8805 */ 8806 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8807 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8808 } 8809 8810 return (DDI_SUCCESS); 8811 8812 err_notclosed: 8813 mutex_exit(SD_MUTEX(un)); 8814 8815 err_stillbusy: 8816 _NOTE(NO_COMPETING_THREADS_NOW); 8817 8818 err_remove_event: 8819 mutex_enter(&sd_detach_mutex); 8820 un->un_detach_count--; 8821 mutex_exit(&sd_detach_mutex); 8822 8823 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8824 return (DDI_FAILURE); 8825 } 8826 8827 8828 /* 8829 * Function: sd_create_errstats 8830 * 8831 * Description: This routine instantiates the device error stats. 8832 * 8833 * Note: During attach the stats are instantiated first so they are 8834 * available for attach-time routines that utilize the driver 8835 * iopath to send commands to the device. The stats are initialized 8836 * separately so data obtained during some attach-time routines is 8837 * available. (4362483) 8838 * 8839 * Arguments: un - driver soft state (unit) structure 8840 * instance - driver instance 8841 * 8842 * Context: Kernel thread context 8843 */ 8844 8845 static void 8846 sd_create_errstats(struct sd_lun *un, int instance) 8847 { 8848 struct sd_errstats *stp; 8849 char kstatmodule_err[KSTAT_STRLEN]; 8850 char kstatname[KSTAT_STRLEN]; 8851 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8852 8853 ASSERT(un != NULL); 8854 8855 if (un->un_errstats != NULL) { 8856 return; 8857 } 8858 8859 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8860 "%serr", sd_label); 8861 (void) snprintf(kstatname, sizeof (kstatname), 8862 "%s%d,err", sd_label, instance); 8863 8864 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8865 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8866 8867 if (un->un_errstats == NULL) { 8868 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8869 "sd_create_errstats: Failed kstat_create\n"); 8870 return; 8871 } 8872 8873 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8874 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8875 KSTAT_DATA_UINT32); 8876 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8877 KSTAT_DATA_UINT32); 8878 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8879 KSTAT_DATA_UINT32); 8880 kstat_named_init(&stp->sd_vid, "Vendor", 8881 KSTAT_DATA_CHAR); 8882 kstat_named_init(&stp->sd_pid, "Product", 8883 KSTAT_DATA_CHAR); 8884 kstat_named_init(&stp->sd_revision, "Revision", 8885 KSTAT_DATA_CHAR); 8886 kstat_named_init(&stp->sd_serial, "Serial No", 8887 KSTAT_DATA_CHAR); 8888 kstat_named_init(&stp->sd_capacity, "Size", 8889 KSTAT_DATA_ULONGLONG); 8890 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8891 KSTAT_DATA_UINT32); 8892 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8893 KSTAT_DATA_UINT32); 8894 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8895 KSTAT_DATA_UINT32); 8896 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8897 KSTAT_DATA_UINT32); 8898 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8899 KSTAT_DATA_UINT32); 8900 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8901 KSTAT_DATA_UINT32); 8902 8903 un->un_errstats->ks_private = un; 8904 un->un_errstats->ks_update = nulldev; 8905 8906 kstat_install(un->un_errstats); 8907 } 8908 8909 8910 /* 8911 * Function: sd_set_errstats 8912 * 8913 * Description: This routine sets the value of the vendor id, product id, 8914 * revision, serial number, and capacity device error stats. 8915 * 8916 * Note: During attach the stats are instantiated first so they are 8917 * available for attach-time routines that utilize the driver 8918 * iopath to send commands to the device. The stats are initialized 8919 * separately so data obtained during some attach-time routines is 8920 * available. (4362483) 8921 * 8922 * Arguments: un - driver soft state (unit) structure 8923 * 8924 * Context: Kernel thread context 8925 */ 8926 8927 static void 8928 sd_set_errstats(struct sd_lun *un) 8929 { 8930 struct sd_errstats *stp; 8931 8932 ASSERT(un != NULL); 8933 ASSERT(un->un_errstats != NULL); 8934 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8935 ASSERT(stp != NULL); 8936 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8937 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8938 (void) strncpy(stp->sd_revision.value.c, 8939 un->un_sd->sd_inq->inq_revision, 4); 8940 8941 /* 8942 * All the errstats are persistent across detach/attach, 8943 * so reset all the errstats here in case of the hot 8944 * replacement of disk drives, except for not changed 8945 * Sun qualified drives. 8946 */ 8947 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8948 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8949 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8950 stp->sd_softerrs.value.ui32 = 0; 8951 stp->sd_harderrs.value.ui32 = 0; 8952 stp->sd_transerrs.value.ui32 = 0; 8953 stp->sd_rq_media_err.value.ui32 = 0; 8954 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8955 stp->sd_rq_nodev_err.value.ui32 = 0; 8956 stp->sd_rq_recov_err.value.ui32 = 0; 8957 stp->sd_rq_illrq_err.value.ui32 = 0; 8958 stp->sd_rq_pfa_err.value.ui32 = 0; 8959 } 8960 8961 /* 8962 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8963 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8964 * (4376302)) 8965 */ 8966 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8967 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8968 sizeof (SD_INQUIRY(un)->inq_serial)); 8969 } 8970 8971 if (un->un_f_blockcount_is_valid != TRUE) { 8972 /* 8973 * Set capacity error stat to 0 for no media. This ensures 8974 * a valid capacity is displayed in response to 'iostat -E' 8975 * when no media is present in the device. 8976 */ 8977 stp->sd_capacity.value.ui64 = 0; 8978 } else { 8979 /* 8980 * Multiply un_blockcount by un->un_sys_blocksize to get 8981 * capacity. 8982 * 8983 * Note: for non-512 blocksize devices "un_blockcount" has been 8984 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8985 * (un_tgt_blocksize / un->un_sys_blocksize). 8986 */ 8987 stp->sd_capacity.value.ui64 = (uint64_t) 8988 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8989 } 8990 } 8991 8992 8993 /* 8994 * Function: sd_set_pstats 8995 * 8996 * Description: This routine instantiates and initializes the partition 8997 * stats for each partition with more than zero blocks. 8998 * (4363169) 8999 * 9000 * Arguments: un - driver soft state (unit) structure 9001 * 9002 * Context: Kernel thread context 9003 */ 9004 9005 static void 9006 sd_set_pstats(struct sd_lun *un) 9007 { 9008 char kstatname[KSTAT_STRLEN]; 9009 int instance; 9010 int i; 9011 diskaddr_t nblks = 0; 9012 char *partname = NULL; 9013 9014 ASSERT(un != NULL); 9015 9016 instance = ddi_get_instance(SD_DEVINFO(un)); 9017 9018 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9019 for (i = 0; i < NSDMAP; i++) { 9020 9021 if (cmlb_partinfo(un->un_cmlbhandle, i, 9022 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9023 continue; 9024 mutex_enter(SD_MUTEX(un)); 9025 9026 if ((un->un_pstats[i] == NULL) && 9027 (nblks != 0)) { 9028 9029 (void) snprintf(kstatname, sizeof (kstatname), 9030 "%s%d,%s", sd_label, instance, 9031 partname); 9032 9033 un->un_pstats[i] = kstat_create(sd_label, 9034 instance, kstatname, "partition", KSTAT_TYPE_IO, 9035 1, KSTAT_FLAG_PERSISTENT); 9036 if (un->un_pstats[i] != NULL) { 9037 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9038 kstat_install(un->un_pstats[i]); 9039 } 9040 } 9041 mutex_exit(SD_MUTEX(un)); 9042 } 9043 } 9044 9045 9046 #if (defined(__fibre)) 9047 /* 9048 * Function: sd_init_event_callbacks 9049 * 9050 * Description: This routine initializes the insertion and removal event 9051 * callbacks. (fibre only) 9052 * 9053 * Arguments: un - driver soft state (unit) structure 9054 * 9055 * Context: Kernel thread context 9056 */ 9057 9058 static void 9059 sd_init_event_callbacks(struct sd_lun *un) 9060 { 9061 ASSERT(un != NULL); 9062 9063 if ((un->un_insert_event == NULL) && 9064 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9065 &un->un_insert_event) == DDI_SUCCESS)) { 9066 /* 9067 * Add the callback for an insertion event 9068 */ 9069 (void) ddi_add_event_handler(SD_DEVINFO(un), 9070 un->un_insert_event, sd_event_callback, (void *)un, 9071 &(un->un_insert_cb_id)); 9072 } 9073 9074 if ((un->un_remove_event == NULL) && 9075 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9076 &un->un_remove_event) == DDI_SUCCESS)) { 9077 /* 9078 * Add the callback for a removal event 9079 */ 9080 (void) ddi_add_event_handler(SD_DEVINFO(un), 9081 un->un_remove_event, sd_event_callback, (void *)un, 9082 &(un->un_remove_cb_id)); 9083 } 9084 } 9085 9086 9087 /* 9088 * Function: sd_event_callback 9089 * 9090 * Description: This routine handles insert/remove events (photon). The 9091 * state is changed to OFFLINE which can be used to supress 9092 * error msgs. (fibre only) 9093 * 9094 * Arguments: un - driver soft state (unit) structure 9095 * 9096 * Context: Callout thread context 9097 */ 9098 /* ARGSUSED */ 9099 static void 9100 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9101 void *bus_impldata) 9102 { 9103 struct sd_lun *un = (struct sd_lun *)arg; 9104 9105 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9106 if (event == un->un_insert_event) { 9107 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9108 mutex_enter(SD_MUTEX(un)); 9109 if (un->un_state == SD_STATE_OFFLINE) { 9110 if (un->un_last_state != SD_STATE_SUSPENDED) { 9111 un->un_state = un->un_last_state; 9112 } else { 9113 /* 9114 * We have gone through SUSPEND/RESUME while 9115 * we were offline. Restore the last state 9116 */ 9117 un->un_state = un->un_save_state; 9118 } 9119 } 9120 mutex_exit(SD_MUTEX(un)); 9121 9122 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9123 } else if (event == un->un_remove_event) { 9124 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9125 mutex_enter(SD_MUTEX(un)); 9126 /* 9127 * We need to handle an event callback that occurs during 9128 * the suspend operation, since we don't prevent it. 9129 */ 9130 if (un->un_state != SD_STATE_OFFLINE) { 9131 if (un->un_state != SD_STATE_SUSPENDED) { 9132 New_state(un, SD_STATE_OFFLINE); 9133 } else { 9134 un->un_last_state = SD_STATE_OFFLINE; 9135 } 9136 } 9137 mutex_exit(SD_MUTEX(un)); 9138 } else { 9139 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9140 "!Unknown event\n"); 9141 } 9142 9143 } 9144 #endif 9145 9146 /* 9147 * Function: sd_cache_control() 9148 * 9149 * Description: This routine is the driver entry point for setting 9150 * read and write caching by modifying the WCE (write cache 9151 * enable) and RCD (read cache disable) bits of mode 9152 * page 8 (MODEPAGE_CACHING). 9153 * 9154 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9155 * structure for this target. 9156 * rcd_flag - flag for controlling the read cache 9157 * wce_flag - flag for controlling the write cache 9158 * 9159 * Return Code: EIO 9160 * code returned by sd_send_scsi_MODE_SENSE and 9161 * sd_send_scsi_MODE_SELECT 9162 * 9163 * Context: Kernel Thread 9164 */ 9165 9166 static int 9167 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9168 { 9169 struct mode_caching *mode_caching_page; 9170 uchar_t *header; 9171 size_t buflen; 9172 int hdrlen; 9173 int bd_len; 9174 int rval = 0; 9175 struct mode_header_grp2 *mhp; 9176 struct sd_lun *un; 9177 int status; 9178 9179 ASSERT(ssc != NULL); 9180 un = ssc->ssc_un; 9181 ASSERT(un != NULL); 9182 9183 /* 9184 * Do a test unit ready, otherwise a mode sense may not work if this 9185 * is the first command sent to the device after boot. 9186 */ 9187 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9188 if (status != 0) 9189 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9190 9191 if (un->un_f_cfg_is_atapi == TRUE) { 9192 hdrlen = MODE_HEADER_LENGTH_GRP2; 9193 } else { 9194 hdrlen = MODE_HEADER_LENGTH; 9195 } 9196 9197 /* 9198 * Allocate memory for the retrieved mode page and its headers. Set 9199 * a pointer to the page itself. Use mode_cache_scsi3 to insure 9200 * we get all of the mode sense data otherwise, the mode select 9201 * will fail. mode_cache_scsi3 is a superset of mode_caching. 9202 */ 9203 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 9204 sizeof (struct mode_cache_scsi3); 9205 9206 header = kmem_zalloc(buflen, KM_SLEEP); 9207 9208 /* Get the information from the device. */ 9209 if (un->un_f_cfg_is_atapi == TRUE) { 9210 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9211 MODEPAGE_CACHING, SD_PATH_DIRECT); 9212 } else { 9213 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9214 MODEPAGE_CACHING, SD_PATH_DIRECT); 9215 } 9216 9217 if (rval != 0) { 9218 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9219 "sd_cache_control: Mode Sense Failed\n"); 9220 goto mode_sense_failed; 9221 } 9222 9223 /* 9224 * Determine size of Block Descriptors in order to locate 9225 * the mode page data. ATAPI devices return 0, SCSI devices 9226 * should return MODE_BLK_DESC_LENGTH. 9227 */ 9228 if (un->un_f_cfg_is_atapi == TRUE) { 9229 mhp = (struct mode_header_grp2 *)header; 9230 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9231 } else { 9232 bd_len = ((struct mode_header *)header)->bdesc_length; 9233 } 9234 9235 if (bd_len > MODE_BLK_DESC_LENGTH) { 9236 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9237 "sd_cache_control: Mode Sense returned invalid block " 9238 "descriptor length\n"); 9239 rval = EIO; 9240 goto mode_sense_failed; 9241 } 9242 9243 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9244 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9245 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9246 "sd_cache_control: Mode Sense caching page code mismatch " 9247 "%d\n", mode_caching_page->mode_page.code); 9248 rval = EIO; 9249 goto mode_sense_failed; 9250 } 9251 9252 /* Check the relevant bits on successful mode sense. */ 9253 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9254 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9255 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9256 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9257 9258 size_t sbuflen; 9259 uchar_t save_pg; 9260 9261 /* 9262 * Construct select buffer length based on the 9263 * length of the sense data returned. 9264 */ 9265 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 9266 sizeof (struct mode_page) + 9267 (int)mode_caching_page->mode_page.length; 9268 9269 /* 9270 * Set the caching bits as requested. 9271 */ 9272 if (rcd_flag == SD_CACHE_ENABLE) 9273 mode_caching_page->rcd = 0; 9274 else if (rcd_flag == SD_CACHE_DISABLE) 9275 mode_caching_page->rcd = 1; 9276 9277 if (wce_flag == SD_CACHE_ENABLE) 9278 mode_caching_page->wce = 1; 9279 else if (wce_flag == SD_CACHE_DISABLE) 9280 mode_caching_page->wce = 0; 9281 9282 /* 9283 * Save the page if the mode sense says the 9284 * drive supports it. 9285 */ 9286 save_pg = mode_caching_page->mode_page.ps ? 9287 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9288 9289 /* Clear reserved bits before mode select. */ 9290 mode_caching_page->mode_page.ps = 0; 9291 9292 /* 9293 * Clear out mode header for mode select. 9294 * The rest of the retrieved page will be reused. 9295 */ 9296 bzero(header, hdrlen); 9297 9298 if (un->un_f_cfg_is_atapi == TRUE) { 9299 mhp = (struct mode_header_grp2 *)header; 9300 mhp->bdesc_length_hi = bd_len >> 8; 9301 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9302 } else { 9303 ((struct mode_header *)header)->bdesc_length = bd_len; 9304 } 9305 9306 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9307 9308 /* Issue mode select to change the cache settings */ 9309 if (un->un_f_cfg_is_atapi == TRUE) { 9310 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9311 sbuflen, save_pg, SD_PATH_DIRECT); 9312 } else { 9313 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9314 sbuflen, save_pg, SD_PATH_DIRECT); 9315 } 9316 9317 } 9318 9319 9320 mode_sense_failed: 9321 9322 kmem_free(header, buflen); 9323 9324 if (rval != 0) { 9325 if (rval == EIO) 9326 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9327 else 9328 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9329 } 9330 return (rval); 9331 } 9332 9333 9334 /* 9335 * Function: sd_get_write_cache_enabled() 9336 * 9337 * Description: This routine is the driver entry point for determining if 9338 * write caching is enabled. It examines the WCE (write cache 9339 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9340 * 9341 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9342 * structure for this target. 9343 * is_enabled - pointer to int where write cache enabled state 9344 * is returned (non-zero -> write cache enabled) 9345 * 9346 * 9347 * Return Code: EIO 9348 * code returned by sd_send_scsi_MODE_SENSE 9349 * 9350 * Context: Kernel Thread 9351 * 9352 * NOTE: If ioctl is added to disable write cache, this sequence should 9353 * be followed so that no locking is required for accesses to 9354 * un->un_f_write_cache_enabled: 9355 * do mode select to clear wce 9356 * do synchronize cache to flush cache 9357 * set un->un_f_write_cache_enabled = FALSE 9358 * 9359 * Conversely, an ioctl to enable the write cache should be done 9360 * in this order: 9361 * set un->un_f_write_cache_enabled = TRUE 9362 * do mode select to set wce 9363 */ 9364 9365 static int 9366 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9367 { 9368 struct mode_caching *mode_caching_page; 9369 uchar_t *header; 9370 size_t buflen; 9371 int hdrlen; 9372 int bd_len; 9373 int rval = 0; 9374 struct sd_lun *un; 9375 int status; 9376 9377 ASSERT(ssc != NULL); 9378 un = ssc->ssc_un; 9379 ASSERT(un != NULL); 9380 ASSERT(is_enabled != NULL); 9381 9382 /* in case of error, flag as enabled */ 9383 *is_enabled = TRUE; 9384 9385 /* 9386 * Do a test unit ready, otherwise a mode sense may not work if this 9387 * is the first command sent to the device after boot. 9388 */ 9389 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9390 9391 if (status != 0) 9392 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9393 9394 if (un->un_f_cfg_is_atapi == TRUE) { 9395 hdrlen = MODE_HEADER_LENGTH_GRP2; 9396 } else { 9397 hdrlen = MODE_HEADER_LENGTH; 9398 } 9399 9400 /* 9401 * Allocate memory for the retrieved mode page and its headers. Set 9402 * a pointer to the page itself. 9403 */ 9404 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9405 header = kmem_zalloc(buflen, KM_SLEEP); 9406 9407 /* Get the information from the device. */ 9408 if (un->un_f_cfg_is_atapi == TRUE) { 9409 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9410 MODEPAGE_CACHING, SD_PATH_DIRECT); 9411 } else { 9412 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9413 MODEPAGE_CACHING, SD_PATH_DIRECT); 9414 } 9415 9416 if (rval != 0) { 9417 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9418 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9419 goto mode_sense_failed; 9420 } 9421 9422 /* 9423 * Determine size of Block Descriptors in order to locate 9424 * the mode page data. ATAPI devices return 0, SCSI devices 9425 * should return MODE_BLK_DESC_LENGTH. 9426 */ 9427 if (un->un_f_cfg_is_atapi == TRUE) { 9428 struct mode_header_grp2 *mhp; 9429 mhp = (struct mode_header_grp2 *)header; 9430 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9431 } else { 9432 bd_len = ((struct mode_header *)header)->bdesc_length; 9433 } 9434 9435 if (bd_len > MODE_BLK_DESC_LENGTH) { 9436 /* FMA should make upset complain here */ 9437 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9438 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9439 "block descriptor length\n"); 9440 rval = EIO; 9441 goto mode_sense_failed; 9442 } 9443 9444 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9445 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9446 /* FMA could make upset complain here */ 9447 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9448 "sd_get_write_cache_enabled: Mode Sense caching page " 9449 "code mismatch %d\n", mode_caching_page->mode_page.code); 9450 rval = EIO; 9451 goto mode_sense_failed; 9452 } 9453 *is_enabled = mode_caching_page->wce; 9454 9455 mode_sense_failed: 9456 if (rval == 0) { 9457 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9458 } else if (rval == EIO) { 9459 /* 9460 * Some disks do not support mode sense(6), we 9461 * should ignore this kind of error(sense key is 9462 * 0x5 - illegal request). 9463 */ 9464 uint8_t *sensep; 9465 int senlen; 9466 9467 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9468 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9469 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9470 9471 if (senlen > 0 && 9472 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9473 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9474 } else { 9475 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9476 } 9477 } else { 9478 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9479 } 9480 kmem_free(header, buflen); 9481 return (rval); 9482 } 9483 9484 /* 9485 * Function: sd_get_nv_sup() 9486 * 9487 * Description: This routine is the driver entry point for 9488 * determining whether non-volatile cache is supported. This 9489 * determination process works as follows: 9490 * 9491 * 1. sd first queries sd.conf on whether 9492 * suppress_cache_flush bit is set for this device. 9493 * 9494 * 2. if not there, then queries the internal disk table. 9495 * 9496 * 3. if either sd.conf or internal disk table specifies 9497 * cache flush be suppressed, we don't bother checking 9498 * NV_SUP bit. 9499 * 9500 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9501 * the optional INQUIRY VPD page 0x86. If the device 9502 * supports VPD page 0x86, sd examines the NV_SUP 9503 * (non-volatile cache support) bit in the INQUIRY VPD page 9504 * 0x86: 9505 * o If NV_SUP bit is set, sd assumes the device has a 9506 * non-volatile cache and set the 9507 * un_f_sync_nv_supported to TRUE. 9508 * o Otherwise cache is not non-volatile, 9509 * un_f_sync_nv_supported is set to FALSE. 9510 * 9511 * Arguments: un - driver soft state (unit) structure 9512 * 9513 * Return Code: 9514 * 9515 * Context: Kernel Thread 9516 */ 9517 9518 static void 9519 sd_get_nv_sup(sd_ssc_t *ssc) 9520 { 9521 int rval = 0; 9522 uchar_t *inq86 = NULL; 9523 size_t inq86_len = MAX_INQUIRY_SIZE; 9524 size_t inq86_resid = 0; 9525 struct dk_callback *dkc; 9526 struct sd_lun *un; 9527 9528 ASSERT(ssc != NULL); 9529 un = ssc->ssc_un; 9530 ASSERT(un != NULL); 9531 9532 mutex_enter(SD_MUTEX(un)); 9533 9534 /* 9535 * Be conservative on the device's support of 9536 * SYNC_NV bit: un_f_sync_nv_supported is 9537 * initialized to be false. 9538 */ 9539 un->un_f_sync_nv_supported = FALSE; 9540 9541 /* 9542 * If either sd.conf or internal disk table 9543 * specifies cache flush be suppressed, then 9544 * we don't bother checking NV_SUP bit. 9545 */ 9546 if (un->un_f_suppress_cache_flush == TRUE) { 9547 mutex_exit(SD_MUTEX(un)); 9548 return; 9549 } 9550 9551 if (sd_check_vpd_page_support(ssc) == 0 && 9552 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9553 mutex_exit(SD_MUTEX(un)); 9554 /* collect page 86 data if available */ 9555 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9556 9557 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9558 0x01, 0x86, &inq86_resid); 9559 9560 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9561 SD_TRACE(SD_LOG_COMMON, un, 9562 "sd_get_nv_sup: \ 9563 successfully get VPD page: %x \ 9564 PAGE LENGTH: %x BYTE 6: %x\n", 9565 inq86[1], inq86[3], inq86[6]); 9566 9567 mutex_enter(SD_MUTEX(un)); 9568 /* 9569 * check the value of NV_SUP bit: only if the device 9570 * reports NV_SUP bit to be 1, the 9571 * un_f_sync_nv_supported bit will be set to true. 9572 */ 9573 if (inq86[6] & SD_VPD_NV_SUP) { 9574 un->un_f_sync_nv_supported = TRUE; 9575 } 9576 mutex_exit(SD_MUTEX(un)); 9577 } else if (rval != 0) { 9578 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9579 } 9580 9581 kmem_free(inq86, inq86_len); 9582 } else { 9583 mutex_exit(SD_MUTEX(un)); 9584 } 9585 9586 /* 9587 * Send a SYNC CACHE command to check whether 9588 * SYNC_NV bit is supported. This command should have 9589 * un_f_sync_nv_supported set to correct value. 9590 */ 9591 mutex_enter(SD_MUTEX(un)); 9592 if (un->un_f_sync_nv_supported) { 9593 mutex_exit(SD_MUTEX(un)); 9594 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9595 dkc->dkc_flag = FLUSH_VOLATILE; 9596 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9597 9598 /* 9599 * Send a TEST UNIT READY command to the device. This should 9600 * clear any outstanding UNIT ATTENTION that may be present. 9601 */ 9602 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9603 if (rval != 0) 9604 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9605 9606 kmem_free(dkc, sizeof (struct dk_callback)); 9607 } else { 9608 mutex_exit(SD_MUTEX(un)); 9609 } 9610 9611 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9612 un_f_suppress_cache_flush is set to %d\n", 9613 un->un_f_suppress_cache_flush); 9614 } 9615 9616 /* 9617 * Function: sd_make_device 9618 * 9619 * Description: Utility routine to return the Solaris device number from 9620 * the data in the device's dev_info structure. 9621 * 9622 * Return Code: The Solaris device number 9623 * 9624 * Context: Any 9625 */ 9626 9627 static dev_t 9628 sd_make_device(dev_info_t *devi) 9629 { 9630 return (makedevice(ddi_driver_major(devi), 9631 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9632 } 9633 9634 9635 /* 9636 * Function: sd_pm_entry 9637 * 9638 * Description: Called at the start of a new command to manage power 9639 * and busy status of a device. This includes determining whether 9640 * the current power state of the device is sufficient for 9641 * performing the command or whether it must be changed. 9642 * The PM framework is notified appropriately. 9643 * Only with a return status of DDI_SUCCESS will the 9644 * component be busy to the framework. 9645 * 9646 * All callers of sd_pm_entry must check the return status 9647 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9648 * of DDI_FAILURE indicates the device failed to power up. 9649 * In this case un_pm_count has been adjusted so the result 9650 * on exit is still powered down, ie. count is less than 0. 9651 * Calling sd_pm_exit with this count value hits an ASSERT. 9652 * 9653 * Return Code: DDI_SUCCESS or DDI_FAILURE 9654 * 9655 * Context: Kernel thread context. 9656 */ 9657 9658 static int 9659 sd_pm_entry(struct sd_lun *un) 9660 { 9661 int return_status = DDI_SUCCESS; 9662 9663 ASSERT(!mutex_owned(SD_MUTEX(un))); 9664 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9665 9666 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9667 9668 if (un->un_f_pm_is_enabled == FALSE) { 9669 SD_TRACE(SD_LOG_IO_PM, un, 9670 "sd_pm_entry: exiting, PM not enabled\n"); 9671 return (return_status); 9672 } 9673 9674 /* 9675 * Just increment a counter if PM is enabled. On the transition from 9676 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9677 * the count with each IO and mark the device as idle when the count 9678 * hits 0. 9679 * 9680 * If the count is less than 0 the device is powered down. If a powered 9681 * down device is successfully powered up then the count must be 9682 * incremented to reflect the power up. Note that it'll get incremented 9683 * a second time to become busy. 9684 * 9685 * Because the following has the potential to change the device state 9686 * and must release the un_pm_mutex to do so, only one thread can be 9687 * allowed through at a time. 9688 */ 9689 9690 mutex_enter(&un->un_pm_mutex); 9691 while (un->un_pm_busy == TRUE) { 9692 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9693 } 9694 un->un_pm_busy = TRUE; 9695 9696 if (un->un_pm_count < 1) { 9697 9698 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9699 9700 /* 9701 * Indicate we are now busy so the framework won't attempt to 9702 * power down the device. This call will only fail if either 9703 * we passed a bad component number or the device has no 9704 * components. Neither of these should ever happen. 9705 */ 9706 mutex_exit(&un->un_pm_mutex); 9707 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9708 ASSERT(return_status == DDI_SUCCESS); 9709 9710 mutex_enter(&un->un_pm_mutex); 9711 9712 if (un->un_pm_count < 0) { 9713 mutex_exit(&un->un_pm_mutex); 9714 9715 SD_TRACE(SD_LOG_IO_PM, un, 9716 "sd_pm_entry: power up component\n"); 9717 9718 /* 9719 * pm_raise_power will cause sdpower to be called 9720 * which brings the device power level to the 9721 * desired state, ON in this case. If successful, 9722 * un_pm_count and un_power_level will be updated 9723 * appropriately. 9724 */ 9725 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9726 SD_SPINDLE_ON); 9727 9728 mutex_enter(&un->un_pm_mutex); 9729 9730 if (return_status != DDI_SUCCESS) { 9731 /* 9732 * Power up failed. 9733 * Idle the device and adjust the count 9734 * so the result on exit is that we're 9735 * still powered down, ie. count is less than 0. 9736 */ 9737 SD_TRACE(SD_LOG_IO_PM, un, 9738 "sd_pm_entry: power up failed," 9739 " idle the component\n"); 9740 9741 (void) pm_idle_component(SD_DEVINFO(un), 0); 9742 un->un_pm_count--; 9743 } else { 9744 /* 9745 * Device is powered up, verify the 9746 * count is non-negative. 9747 * This is debug only. 9748 */ 9749 ASSERT(un->un_pm_count == 0); 9750 } 9751 } 9752 9753 if (return_status == DDI_SUCCESS) { 9754 /* 9755 * For performance, now that the device has been tagged 9756 * as busy, and it's known to be powered up, update the 9757 * chain types to use jump tables that do not include 9758 * pm. This significantly lowers the overhead and 9759 * therefore improves performance. 9760 */ 9761 9762 mutex_exit(&un->un_pm_mutex); 9763 mutex_enter(SD_MUTEX(un)); 9764 SD_TRACE(SD_LOG_IO_PM, un, 9765 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9766 un->un_uscsi_chain_type); 9767 9768 if (un->un_f_non_devbsize_supported) { 9769 un->un_buf_chain_type = 9770 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9771 } else { 9772 un->un_buf_chain_type = 9773 SD_CHAIN_INFO_DISK_NO_PM; 9774 } 9775 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9776 9777 SD_TRACE(SD_LOG_IO_PM, un, 9778 " changed uscsi_chain_type to %d\n", 9779 un->un_uscsi_chain_type); 9780 mutex_exit(SD_MUTEX(un)); 9781 mutex_enter(&un->un_pm_mutex); 9782 9783 if (un->un_pm_idle_timeid == NULL) { 9784 /* 300 ms. */ 9785 un->un_pm_idle_timeid = 9786 timeout(sd_pm_idletimeout_handler, un, 9787 (drv_usectohz((clock_t)300000))); 9788 /* 9789 * Include an extra call to busy which keeps the 9790 * device busy with-respect-to the PM layer 9791 * until the timer fires, at which time it'll 9792 * get the extra idle call. 9793 */ 9794 (void) pm_busy_component(SD_DEVINFO(un), 0); 9795 } 9796 } 9797 } 9798 un->un_pm_busy = FALSE; 9799 /* Next... */ 9800 cv_signal(&un->un_pm_busy_cv); 9801 9802 un->un_pm_count++; 9803 9804 SD_TRACE(SD_LOG_IO_PM, un, 9805 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9806 9807 mutex_exit(&un->un_pm_mutex); 9808 9809 return (return_status); 9810 } 9811 9812 9813 /* 9814 * Function: sd_pm_exit 9815 * 9816 * Description: Called at the completion of a command to manage busy 9817 * status for the device. If the device becomes idle the 9818 * PM framework is notified. 9819 * 9820 * Context: Kernel thread context 9821 */ 9822 9823 static void 9824 sd_pm_exit(struct sd_lun *un) 9825 { 9826 ASSERT(!mutex_owned(SD_MUTEX(un))); 9827 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9828 9829 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9830 9831 /* 9832 * After attach the following flag is only read, so don't 9833 * take the penalty of acquiring a mutex for it. 9834 */ 9835 if (un->un_f_pm_is_enabled == TRUE) { 9836 9837 mutex_enter(&un->un_pm_mutex); 9838 un->un_pm_count--; 9839 9840 SD_TRACE(SD_LOG_IO_PM, un, 9841 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9842 9843 ASSERT(un->un_pm_count >= 0); 9844 if (un->un_pm_count == 0) { 9845 mutex_exit(&un->un_pm_mutex); 9846 9847 SD_TRACE(SD_LOG_IO_PM, un, 9848 "sd_pm_exit: idle component\n"); 9849 9850 (void) pm_idle_component(SD_DEVINFO(un), 0); 9851 9852 } else { 9853 mutex_exit(&un->un_pm_mutex); 9854 } 9855 } 9856 9857 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9858 } 9859 9860 9861 /* 9862 * Function: sdopen 9863 * 9864 * Description: Driver's open(9e) entry point function. 9865 * 9866 * Arguments: dev_i - pointer to device number 9867 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9868 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9869 * cred_p - user credential pointer 9870 * 9871 * Return Code: EINVAL 9872 * ENXIO 9873 * EIO 9874 * EROFS 9875 * EBUSY 9876 * 9877 * Context: Kernel thread context 9878 */ 9879 /* ARGSUSED */ 9880 static int 9881 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9882 { 9883 struct sd_lun *un; 9884 int nodelay; 9885 int part; 9886 uint64_t partmask; 9887 int instance; 9888 dev_t dev; 9889 int rval = EIO; 9890 diskaddr_t nblks = 0; 9891 diskaddr_t label_cap; 9892 9893 /* Validate the open type */ 9894 if (otyp >= OTYPCNT) { 9895 return (EINVAL); 9896 } 9897 9898 dev = *dev_p; 9899 instance = SDUNIT(dev); 9900 mutex_enter(&sd_detach_mutex); 9901 9902 /* 9903 * Fail the open if there is no softstate for the instance, or 9904 * if another thread somewhere is trying to detach the instance. 9905 */ 9906 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9907 (un->un_detach_count != 0)) { 9908 mutex_exit(&sd_detach_mutex); 9909 /* 9910 * The probe cache only needs to be cleared when open (9e) fails 9911 * with ENXIO (4238046). 9912 */ 9913 /* 9914 * un-conditionally clearing probe cache is ok with 9915 * separate sd/ssd binaries 9916 * x86 platform can be an issue with both parallel 9917 * and fibre in 1 binary 9918 */ 9919 sd_scsi_clear_probe_cache(); 9920 return (ENXIO); 9921 } 9922 9923 /* 9924 * The un_layer_count is to prevent another thread in specfs from 9925 * trying to detach the instance, which can happen when we are 9926 * called from a higher-layer driver instead of thru specfs. 9927 * This will not be needed when DDI provides a layered driver 9928 * interface that allows specfs to know that an instance is in 9929 * use by a layered driver & should not be detached. 9930 * 9931 * Note: the semantics for layered driver opens are exactly one 9932 * close for every open. 9933 */ 9934 if (otyp == OTYP_LYR) { 9935 un->un_layer_count++; 9936 } 9937 9938 /* 9939 * Keep a count of the current # of opens in progress. This is because 9940 * some layered drivers try to call us as a regular open. This can 9941 * cause problems that we cannot prevent, however by keeping this count 9942 * we can at least keep our open and detach routines from racing against 9943 * each other under such conditions. 9944 */ 9945 un->un_opens_in_progress++; 9946 mutex_exit(&sd_detach_mutex); 9947 9948 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9949 part = SDPART(dev); 9950 partmask = 1 << part; 9951 9952 /* 9953 * We use a semaphore here in order to serialize 9954 * open and close requests on the device. 9955 */ 9956 sema_p(&un->un_semoclose); 9957 9958 mutex_enter(SD_MUTEX(un)); 9959 9960 /* 9961 * All device accesses go thru sdstrategy() where we check 9962 * on suspend status but there could be a scsi_poll command, 9963 * which bypasses sdstrategy(), so we need to check pm 9964 * status. 9965 */ 9966 9967 if (!nodelay) { 9968 while ((un->un_state == SD_STATE_SUSPENDED) || 9969 (un->un_state == SD_STATE_PM_CHANGING)) { 9970 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9971 } 9972 9973 mutex_exit(SD_MUTEX(un)); 9974 if (sd_pm_entry(un) != DDI_SUCCESS) { 9975 rval = EIO; 9976 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9977 "sdopen: sd_pm_entry failed\n"); 9978 goto open_failed_with_pm; 9979 } 9980 mutex_enter(SD_MUTEX(un)); 9981 } 9982 9983 /* check for previous exclusive open */ 9984 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9985 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9986 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9987 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9988 9989 if (un->un_exclopen & (partmask)) { 9990 goto excl_open_fail; 9991 } 9992 9993 if (flag & FEXCL) { 9994 int i; 9995 if (un->un_ocmap.lyropen[part]) { 9996 goto excl_open_fail; 9997 } 9998 for (i = 0; i < (OTYPCNT - 1); i++) { 9999 if (un->un_ocmap.regopen[i] & (partmask)) { 10000 goto excl_open_fail; 10001 } 10002 } 10003 } 10004 10005 /* 10006 * Check the write permission if this is a removable media device, 10007 * NDELAY has not been set, and writable permission is requested. 10008 * 10009 * Note: If NDELAY was set and this is write-protected media the WRITE 10010 * attempt will fail with EIO as part of the I/O processing. This is a 10011 * more permissive implementation that allows the open to succeed and 10012 * WRITE attempts to fail when appropriate. 10013 */ 10014 if (un->un_f_chk_wp_open) { 10015 if ((flag & FWRITE) && (!nodelay)) { 10016 mutex_exit(SD_MUTEX(un)); 10017 /* 10018 * Defer the check for write permission on writable 10019 * DVD drive till sdstrategy and will not fail open even 10020 * if FWRITE is set as the device can be writable 10021 * depending upon the media and the media can change 10022 * after the call to open(). 10023 */ 10024 if (un->un_f_dvdram_writable_device == FALSE) { 10025 if (ISCD(un) || sr_check_wp(dev)) { 10026 rval = EROFS; 10027 mutex_enter(SD_MUTEX(un)); 10028 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10029 "write to cd or write protected media\n"); 10030 goto open_fail; 10031 } 10032 } 10033 mutex_enter(SD_MUTEX(un)); 10034 } 10035 } 10036 10037 /* 10038 * If opening in NDELAY/NONBLOCK mode, just return. 10039 * Check if disk is ready and has a valid geometry later. 10040 */ 10041 if (!nodelay) { 10042 sd_ssc_t *ssc; 10043 10044 mutex_exit(SD_MUTEX(un)); 10045 ssc = sd_ssc_init(un); 10046 rval = sd_ready_and_valid(ssc, part); 10047 sd_ssc_fini(ssc); 10048 mutex_enter(SD_MUTEX(un)); 10049 /* 10050 * Fail if device is not ready or if the number of disk 10051 * blocks is zero or negative for non CD devices. 10052 */ 10053 10054 nblks = 0; 10055 10056 if (rval == SD_READY_VALID && (!ISCD(un))) { 10057 /* if cmlb_partinfo fails, nblks remains 0 */ 10058 mutex_exit(SD_MUTEX(un)); 10059 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10060 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10061 mutex_enter(SD_MUTEX(un)); 10062 } 10063 10064 if ((rval != SD_READY_VALID) || 10065 (!ISCD(un) && nblks <= 0)) { 10066 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10067 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10068 "device not ready or invalid disk block value\n"); 10069 goto open_fail; 10070 } 10071 #if defined(__i386) || defined(__amd64) 10072 } else { 10073 uchar_t *cp; 10074 /* 10075 * x86 requires special nodelay handling, so that p0 is 10076 * always defined and accessible. 10077 * Invalidate geometry only if device is not already open. 10078 */ 10079 cp = &un->un_ocmap.chkd[0]; 10080 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10081 if (*cp != (uchar_t)0) { 10082 break; 10083 } 10084 cp++; 10085 } 10086 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10087 mutex_exit(SD_MUTEX(un)); 10088 cmlb_invalidate(un->un_cmlbhandle, 10089 (void *)SD_PATH_DIRECT); 10090 mutex_enter(SD_MUTEX(un)); 10091 } 10092 10093 #endif 10094 } 10095 10096 if (otyp == OTYP_LYR) { 10097 un->un_ocmap.lyropen[part]++; 10098 } else { 10099 un->un_ocmap.regopen[otyp] |= partmask; 10100 } 10101 10102 /* Set up open and exclusive open flags */ 10103 if (flag & FEXCL) { 10104 un->un_exclopen |= (partmask); 10105 } 10106 10107 /* 10108 * If the lun is EFI labeled and lun capacity is greater than the 10109 * capacity contained in the label, log a sys-event to notify the 10110 * interested module. 10111 * To avoid an infinite loop of logging sys-event, we only log the 10112 * event when the lun is not opened in NDELAY mode. The event handler 10113 * should open the lun in NDELAY mode. 10114 */ 10115 if (!(flag & FNDELAY)) { 10116 mutex_exit(SD_MUTEX(un)); 10117 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10118 (void*)SD_PATH_DIRECT) == 0) { 10119 mutex_enter(SD_MUTEX(un)); 10120 if (un->un_f_blockcount_is_valid && 10121 un->un_blockcount > label_cap) { 10122 mutex_exit(SD_MUTEX(un)); 10123 sd_log_lun_expansion_event(un, 10124 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10125 mutex_enter(SD_MUTEX(un)); 10126 } 10127 } else { 10128 mutex_enter(SD_MUTEX(un)); 10129 } 10130 } 10131 10132 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10133 "open of part %d type %d\n", part, otyp); 10134 10135 mutex_exit(SD_MUTEX(un)); 10136 if (!nodelay) { 10137 sd_pm_exit(un); 10138 } 10139 10140 sema_v(&un->un_semoclose); 10141 10142 mutex_enter(&sd_detach_mutex); 10143 un->un_opens_in_progress--; 10144 mutex_exit(&sd_detach_mutex); 10145 10146 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10147 return (DDI_SUCCESS); 10148 10149 excl_open_fail: 10150 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10151 rval = EBUSY; 10152 10153 open_fail: 10154 mutex_exit(SD_MUTEX(un)); 10155 10156 /* 10157 * On a failed open we must exit the pm management. 10158 */ 10159 if (!nodelay) { 10160 sd_pm_exit(un); 10161 } 10162 open_failed_with_pm: 10163 sema_v(&un->un_semoclose); 10164 10165 mutex_enter(&sd_detach_mutex); 10166 un->un_opens_in_progress--; 10167 if (otyp == OTYP_LYR) { 10168 un->un_layer_count--; 10169 } 10170 mutex_exit(&sd_detach_mutex); 10171 10172 return (rval); 10173 } 10174 10175 10176 /* 10177 * Function: sdclose 10178 * 10179 * Description: Driver's close(9e) entry point function. 10180 * 10181 * Arguments: dev - device number 10182 * flag - file status flag, informational only 10183 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10184 * cred_p - user credential pointer 10185 * 10186 * Return Code: ENXIO 10187 * 10188 * Context: Kernel thread context 10189 */ 10190 /* ARGSUSED */ 10191 static int 10192 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10193 { 10194 struct sd_lun *un; 10195 uchar_t *cp; 10196 int part; 10197 int nodelay; 10198 int rval = 0; 10199 10200 /* Validate the open type */ 10201 if (otyp >= OTYPCNT) { 10202 return (ENXIO); 10203 } 10204 10205 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10206 return (ENXIO); 10207 } 10208 10209 part = SDPART(dev); 10210 nodelay = flag & (FNDELAY | FNONBLOCK); 10211 10212 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10213 "sdclose: close of part %d type %d\n", part, otyp); 10214 10215 /* 10216 * We use a semaphore here in order to serialize 10217 * open and close requests on the device. 10218 */ 10219 sema_p(&un->un_semoclose); 10220 10221 mutex_enter(SD_MUTEX(un)); 10222 10223 /* Don't proceed if power is being changed. */ 10224 while (un->un_state == SD_STATE_PM_CHANGING) { 10225 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10226 } 10227 10228 if (un->un_exclopen & (1 << part)) { 10229 un->un_exclopen &= ~(1 << part); 10230 } 10231 10232 /* Update the open partition map */ 10233 if (otyp == OTYP_LYR) { 10234 un->un_ocmap.lyropen[part] -= 1; 10235 } else { 10236 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10237 } 10238 10239 cp = &un->un_ocmap.chkd[0]; 10240 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10241 if (*cp != NULL) { 10242 break; 10243 } 10244 cp++; 10245 } 10246 10247 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10248 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10249 10250 /* 10251 * We avoid persistance upon the last close, and set 10252 * the throttle back to the maximum. 10253 */ 10254 un->un_throttle = un->un_saved_throttle; 10255 10256 if (un->un_state == SD_STATE_OFFLINE) { 10257 if (un->un_f_is_fibre == FALSE) { 10258 scsi_log(SD_DEVINFO(un), sd_label, 10259 CE_WARN, "offline\n"); 10260 } 10261 mutex_exit(SD_MUTEX(un)); 10262 cmlb_invalidate(un->un_cmlbhandle, 10263 (void *)SD_PATH_DIRECT); 10264 mutex_enter(SD_MUTEX(un)); 10265 10266 } else { 10267 /* 10268 * Flush any outstanding writes in NVRAM cache. 10269 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10270 * cmd, it may not work for non-Pluto devices. 10271 * SYNCHRONIZE CACHE is not required for removables, 10272 * except DVD-RAM drives. 10273 * 10274 * Also note: because SYNCHRONIZE CACHE is currently 10275 * the only command issued here that requires the 10276 * drive be powered up, only do the power up before 10277 * sending the Sync Cache command. If additional 10278 * commands are added which require a powered up 10279 * drive, the following sequence may have to change. 10280 * 10281 * And finally, note that parallel SCSI on SPARC 10282 * only issues a Sync Cache to DVD-RAM, a newly 10283 * supported device. 10284 */ 10285 #if defined(__i386) || defined(__amd64) 10286 if ((un->un_f_sync_cache_supported && 10287 un->un_f_sync_cache_required) || 10288 un->un_f_dvdram_writable_device == TRUE) { 10289 #else 10290 if (un->un_f_dvdram_writable_device == TRUE) { 10291 #endif 10292 mutex_exit(SD_MUTEX(un)); 10293 if (sd_pm_entry(un) == DDI_SUCCESS) { 10294 rval = 10295 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10296 NULL); 10297 /* ignore error if not supported */ 10298 if (rval == ENOTSUP) { 10299 rval = 0; 10300 } else if (rval != 0) { 10301 rval = EIO; 10302 } 10303 sd_pm_exit(un); 10304 } else { 10305 rval = EIO; 10306 } 10307 mutex_enter(SD_MUTEX(un)); 10308 } 10309 10310 /* 10311 * For devices which supports DOOR_LOCK, send an ALLOW 10312 * MEDIA REMOVAL command, but don't get upset if it 10313 * fails. We need to raise the power of the drive before 10314 * we can call sd_send_scsi_DOORLOCK() 10315 */ 10316 if (un->un_f_doorlock_supported) { 10317 mutex_exit(SD_MUTEX(un)); 10318 if (sd_pm_entry(un) == DDI_SUCCESS) { 10319 sd_ssc_t *ssc; 10320 10321 ssc = sd_ssc_init(un); 10322 rval = sd_send_scsi_DOORLOCK(ssc, 10323 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10324 if (rval != 0) 10325 sd_ssc_assessment(ssc, 10326 SD_FMT_IGNORE); 10327 sd_ssc_fini(ssc); 10328 10329 sd_pm_exit(un); 10330 if (ISCD(un) && (rval != 0) && 10331 (nodelay != 0)) { 10332 rval = ENXIO; 10333 } 10334 } else { 10335 rval = EIO; 10336 } 10337 mutex_enter(SD_MUTEX(un)); 10338 } 10339 10340 /* 10341 * If a device has removable media, invalidate all 10342 * parameters related to media, such as geometry, 10343 * blocksize, and blockcount. 10344 */ 10345 if (un->un_f_has_removable_media) { 10346 sr_ejected(un); 10347 } 10348 10349 /* 10350 * Destroy the cache (if it exists) which was 10351 * allocated for the write maps since this is 10352 * the last close for this media. 10353 */ 10354 if (un->un_wm_cache) { 10355 /* 10356 * Check if there are pending commands. 10357 * and if there are give a warning and 10358 * do not destroy the cache. 10359 */ 10360 if (un->un_ncmds_in_driver > 0) { 10361 scsi_log(SD_DEVINFO(un), 10362 sd_label, CE_WARN, 10363 "Unable to clean up memory " 10364 "because of pending I/O\n"); 10365 } else { 10366 kmem_cache_destroy( 10367 un->un_wm_cache); 10368 un->un_wm_cache = NULL; 10369 } 10370 } 10371 } 10372 } 10373 10374 mutex_exit(SD_MUTEX(un)); 10375 sema_v(&un->un_semoclose); 10376 10377 if (otyp == OTYP_LYR) { 10378 mutex_enter(&sd_detach_mutex); 10379 /* 10380 * The detach routine may run when the layer count 10381 * drops to zero. 10382 */ 10383 un->un_layer_count--; 10384 mutex_exit(&sd_detach_mutex); 10385 } 10386 10387 return (rval); 10388 } 10389 10390 10391 /* 10392 * Function: sd_ready_and_valid 10393 * 10394 * Description: Test if device is ready and has a valid geometry. 10395 * 10396 * Arguments: ssc - sd_ssc_t will contain un 10397 * un - driver soft state (unit) structure 10398 * 10399 * Return Code: SD_READY_VALID ready and valid label 10400 * SD_NOT_READY_VALID not ready, no label 10401 * SD_RESERVED_BY_OTHERS reservation conflict 10402 * 10403 * Context: Never called at interrupt context. 10404 */ 10405 10406 static int 10407 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10408 { 10409 struct sd_errstats *stp; 10410 uint64_t capacity; 10411 uint_t lbasize; 10412 int rval = SD_READY_VALID; 10413 char name_str[48]; 10414 boolean_t is_valid; 10415 struct sd_lun *un; 10416 int status; 10417 10418 ASSERT(ssc != NULL); 10419 un = ssc->ssc_un; 10420 ASSERT(un != NULL); 10421 ASSERT(!mutex_owned(SD_MUTEX(un))); 10422 10423 mutex_enter(SD_MUTEX(un)); 10424 /* 10425 * If a device has removable media, we must check if media is 10426 * ready when checking if this device is ready and valid. 10427 */ 10428 if (un->un_f_has_removable_media) { 10429 mutex_exit(SD_MUTEX(un)); 10430 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10431 10432 if (status != 0) { 10433 rval = SD_NOT_READY_VALID; 10434 mutex_enter(SD_MUTEX(un)); 10435 10436 /* Ignore all failed status for removalbe media */ 10437 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10438 10439 goto done; 10440 } 10441 10442 is_valid = SD_IS_VALID_LABEL(un); 10443 mutex_enter(SD_MUTEX(un)); 10444 if (!is_valid || 10445 (un->un_f_blockcount_is_valid == FALSE) || 10446 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10447 10448 /* capacity has to be read every open. */ 10449 mutex_exit(SD_MUTEX(un)); 10450 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10451 &lbasize, SD_PATH_DIRECT); 10452 10453 if (status != 0) { 10454 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10455 10456 cmlb_invalidate(un->un_cmlbhandle, 10457 (void *)SD_PATH_DIRECT); 10458 mutex_enter(SD_MUTEX(un)); 10459 rval = SD_NOT_READY_VALID; 10460 10461 goto done; 10462 } else { 10463 mutex_enter(SD_MUTEX(un)); 10464 sd_update_block_info(un, lbasize, capacity); 10465 } 10466 } 10467 10468 /* 10469 * Check if the media in the device is writable or not. 10470 */ 10471 if (!is_valid && ISCD(un)) { 10472 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10473 } 10474 10475 } else { 10476 /* 10477 * Do a test unit ready to clear any unit attention from non-cd 10478 * devices. 10479 */ 10480 mutex_exit(SD_MUTEX(un)); 10481 10482 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10483 if (status != 0) { 10484 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10485 } 10486 10487 mutex_enter(SD_MUTEX(un)); 10488 } 10489 10490 10491 /* 10492 * If this is a non 512 block device, allocate space for 10493 * the wmap cache. This is being done here since every time 10494 * a media is changed this routine will be called and the 10495 * block size is a function of media rather than device. 10496 */ 10497 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10498 un->un_f_non_devbsize_supported) && 10499 un->un_tgt_blocksize != DEV_BSIZE) { 10500 if (!(un->un_wm_cache)) { 10501 (void) snprintf(name_str, sizeof (name_str), 10502 "%s%d_cache", 10503 ddi_driver_name(SD_DEVINFO(un)), 10504 ddi_get_instance(SD_DEVINFO(un))); 10505 un->un_wm_cache = kmem_cache_create( 10506 name_str, sizeof (struct sd_w_map), 10507 8, sd_wm_cache_constructor, 10508 sd_wm_cache_destructor, NULL, 10509 (void *)un, NULL, 0); 10510 if (!(un->un_wm_cache)) { 10511 rval = ENOMEM; 10512 goto done; 10513 } 10514 } 10515 } 10516 10517 if (un->un_state == SD_STATE_NORMAL) { 10518 /* 10519 * If the target is not yet ready here (defined by a TUR 10520 * failure), invalidate the geometry and print an 'offline' 10521 * message. This is a legacy message, as the state of the 10522 * target is not actually changed to SD_STATE_OFFLINE. 10523 * 10524 * If the TUR fails for EACCES (Reservation Conflict), 10525 * SD_RESERVED_BY_OTHERS will be returned to indicate 10526 * reservation conflict. If the TUR fails for other 10527 * reasons, SD_NOT_READY_VALID will be returned. 10528 */ 10529 int err; 10530 10531 mutex_exit(SD_MUTEX(un)); 10532 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10533 mutex_enter(SD_MUTEX(un)); 10534 10535 if (err != 0) { 10536 mutex_exit(SD_MUTEX(un)); 10537 cmlb_invalidate(un->un_cmlbhandle, 10538 (void *)SD_PATH_DIRECT); 10539 mutex_enter(SD_MUTEX(un)); 10540 if (err == EACCES) { 10541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10542 "reservation conflict\n"); 10543 rval = SD_RESERVED_BY_OTHERS; 10544 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10545 } else { 10546 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10547 "drive offline\n"); 10548 rval = SD_NOT_READY_VALID; 10549 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10550 } 10551 goto done; 10552 } 10553 } 10554 10555 if (un->un_f_format_in_progress == FALSE) { 10556 mutex_exit(SD_MUTEX(un)); 10557 10558 (void) cmlb_validate(un->un_cmlbhandle, 0, 10559 (void *)SD_PATH_DIRECT); 10560 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10561 NULL, (void *) SD_PATH_DIRECT) != 0) { 10562 rval = SD_NOT_READY_VALID; 10563 mutex_enter(SD_MUTEX(un)); 10564 10565 goto done; 10566 } 10567 if (un->un_f_pkstats_enabled) { 10568 sd_set_pstats(un); 10569 SD_TRACE(SD_LOG_IO_PARTITION, un, 10570 "sd_ready_and_valid: un:0x%p pstats created and " 10571 "set\n", un); 10572 } 10573 mutex_enter(SD_MUTEX(un)); 10574 } 10575 10576 /* 10577 * If this device supports DOOR_LOCK command, try and send 10578 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10579 * if it fails. For a CD, however, it is an error 10580 */ 10581 if (un->un_f_doorlock_supported) { 10582 mutex_exit(SD_MUTEX(un)); 10583 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10584 SD_PATH_DIRECT); 10585 10586 if ((status != 0) && ISCD(un)) { 10587 rval = SD_NOT_READY_VALID; 10588 mutex_enter(SD_MUTEX(un)); 10589 10590 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10591 10592 goto done; 10593 } else if (status != 0) 10594 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10595 mutex_enter(SD_MUTEX(un)); 10596 } 10597 10598 /* The state has changed, inform the media watch routines */ 10599 un->un_mediastate = DKIO_INSERTED; 10600 cv_broadcast(&un->un_state_cv); 10601 rval = SD_READY_VALID; 10602 10603 done: 10604 10605 /* 10606 * Initialize the capacity kstat value, if no media previously 10607 * (capacity kstat is 0) and a media has been inserted 10608 * (un_blockcount > 0). 10609 */ 10610 if (un->un_errstats != NULL) { 10611 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10612 if ((stp->sd_capacity.value.ui64 == 0) && 10613 (un->un_f_blockcount_is_valid == TRUE)) { 10614 stp->sd_capacity.value.ui64 = 10615 (uint64_t)((uint64_t)un->un_blockcount * 10616 un->un_sys_blocksize); 10617 } 10618 } 10619 10620 mutex_exit(SD_MUTEX(un)); 10621 return (rval); 10622 } 10623 10624 10625 /* 10626 * Function: sdmin 10627 * 10628 * Description: Routine to limit the size of a data transfer. Used in 10629 * conjunction with physio(9F). 10630 * 10631 * Arguments: bp - pointer to the indicated buf(9S) struct. 10632 * 10633 * Context: Kernel thread context. 10634 */ 10635 10636 static void 10637 sdmin(struct buf *bp) 10638 { 10639 struct sd_lun *un; 10640 int instance; 10641 10642 instance = SDUNIT(bp->b_edev); 10643 10644 un = ddi_get_soft_state(sd_state, instance); 10645 ASSERT(un != NULL); 10646 10647 /* 10648 * We depend on DMA partial or buf breakup to restrict 10649 * IO size if any of them enabled. 10650 */ 10651 if (un->un_partial_dma_supported || 10652 un->un_buf_breakup_supported) { 10653 return; 10654 } 10655 10656 if (bp->b_bcount > un->un_max_xfer_size) { 10657 bp->b_bcount = un->un_max_xfer_size; 10658 } 10659 } 10660 10661 10662 /* 10663 * Function: sdread 10664 * 10665 * Description: Driver's read(9e) entry point function. 10666 * 10667 * Arguments: dev - device number 10668 * uio - structure pointer describing where data is to be stored 10669 * in user's space 10670 * cred_p - user credential pointer 10671 * 10672 * Return Code: ENXIO 10673 * EIO 10674 * EINVAL 10675 * value returned by physio 10676 * 10677 * Context: Kernel thread context. 10678 */ 10679 /* ARGSUSED */ 10680 static int 10681 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10682 { 10683 struct sd_lun *un = NULL; 10684 int secmask; 10685 int err = 0; 10686 sd_ssc_t *ssc; 10687 10688 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10689 return (ENXIO); 10690 } 10691 10692 ASSERT(!mutex_owned(SD_MUTEX(un))); 10693 10694 10695 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10696 mutex_enter(SD_MUTEX(un)); 10697 /* 10698 * Because the call to sd_ready_and_valid will issue I/O we 10699 * must wait here if either the device is suspended or 10700 * if it's power level is changing. 10701 */ 10702 while ((un->un_state == SD_STATE_SUSPENDED) || 10703 (un->un_state == SD_STATE_PM_CHANGING)) { 10704 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10705 } 10706 un->un_ncmds_in_driver++; 10707 mutex_exit(SD_MUTEX(un)); 10708 10709 /* Initialize sd_ssc_t for internal uscsi commands */ 10710 ssc = sd_ssc_init(un); 10711 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10712 err = EIO; 10713 } else { 10714 err = 0; 10715 } 10716 sd_ssc_fini(ssc); 10717 10718 mutex_enter(SD_MUTEX(un)); 10719 un->un_ncmds_in_driver--; 10720 ASSERT(un->un_ncmds_in_driver >= 0); 10721 mutex_exit(SD_MUTEX(un)); 10722 if (err != 0) 10723 return (err); 10724 } 10725 10726 /* 10727 * Read requests are restricted to multiples of the system block size. 10728 */ 10729 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10730 secmask = un->un_tgt_blocksize - 1; 10731 else 10732 secmask = DEV_BSIZE - 1; 10733 10734 if (uio->uio_loffset & ((offset_t)(secmask))) { 10735 SD_ERROR(SD_LOG_READ_WRITE, un, 10736 "sdread: file offset not modulo %d\n", 10737 secmask + 1); 10738 err = EINVAL; 10739 } else if (uio->uio_iov->iov_len & (secmask)) { 10740 SD_ERROR(SD_LOG_READ_WRITE, un, 10741 "sdread: transfer length not modulo %d\n", 10742 secmask + 1); 10743 err = EINVAL; 10744 } else { 10745 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10746 } 10747 10748 return (err); 10749 } 10750 10751 10752 /* 10753 * Function: sdwrite 10754 * 10755 * Description: Driver's write(9e) entry point function. 10756 * 10757 * Arguments: dev - device number 10758 * uio - structure pointer describing where data is stored in 10759 * user's space 10760 * cred_p - user credential pointer 10761 * 10762 * Return Code: ENXIO 10763 * EIO 10764 * EINVAL 10765 * value returned by physio 10766 * 10767 * Context: Kernel thread context. 10768 */ 10769 /* ARGSUSED */ 10770 static int 10771 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10772 { 10773 struct sd_lun *un = NULL; 10774 int secmask; 10775 int err = 0; 10776 sd_ssc_t *ssc; 10777 10778 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10779 return (ENXIO); 10780 } 10781 10782 ASSERT(!mutex_owned(SD_MUTEX(un))); 10783 10784 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10785 mutex_enter(SD_MUTEX(un)); 10786 /* 10787 * Because the call to sd_ready_and_valid will issue I/O we 10788 * must wait here if either the device is suspended or 10789 * if it's power level is changing. 10790 */ 10791 while ((un->un_state == SD_STATE_SUSPENDED) || 10792 (un->un_state == SD_STATE_PM_CHANGING)) { 10793 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10794 } 10795 un->un_ncmds_in_driver++; 10796 mutex_exit(SD_MUTEX(un)); 10797 10798 /* Initialize sd_ssc_t for internal uscsi commands */ 10799 ssc = sd_ssc_init(un); 10800 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10801 err = EIO; 10802 } else { 10803 err = 0; 10804 } 10805 sd_ssc_fini(ssc); 10806 10807 mutex_enter(SD_MUTEX(un)); 10808 un->un_ncmds_in_driver--; 10809 ASSERT(un->un_ncmds_in_driver >= 0); 10810 mutex_exit(SD_MUTEX(un)); 10811 if (err != 0) 10812 return (err); 10813 } 10814 10815 /* 10816 * Write requests are restricted to multiples of the system block size. 10817 */ 10818 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10819 secmask = un->un_tgt_blocksize - 1; 10820 else 10821 secmask = DEV_BSIZE - 1; 10822 10823 if (uio->uio_loffset & ((offset_t)(secmask))) { 10824 SD_ERROR(SD_LOG_READ_WRITE, un, 10825 "sdwrite: file offset not modulo %d\n", 10826 secmask + 1); 10827 err = EINVAL; 10828 } else if (uio->uio_iov->iov_len & (secmask)) { 10829 SD_ERROR(SD_LOG_READ_WRITE, un, 10830 "sdwrite: transfer length not modulo %d\n", 10831 secmask + 1); 10832 err = EINVAL; 10833 } else { 10834 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10835 } 10836 10837 return (err); 10838 } 10839 10840 10841 /* 10842 * Function: sdaread 10843 * 10844 * Description: Driver's aread(9e) entry point function. 10845 * 10846 * Arguments: dev - device number 10847 * aio - structure pointer describing where data is to be stored 10848 * cred_p - user credential pointer 10849 * 10850 * Return Code: ENXIO 10851 * EIO 10852 * EINVAL 10853 * value returned by aphysio 10854 * 10855 * Context: Kernel thread context. 10856 */ 10857 /* ARGSUSED */ 10858 static int 10859 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10860 { 10861 struct sd_lun *un = NULL; 10862 struct uio *uio = aio->aio_uio; 10863 int secmask; 10864 int err = 0; 10865 sd_ssc_t *ssc; 10866 10867 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10868 return (ENXIO); 10869 } 10870 10871 ASSERT(!mutex_owned(SD_MUTEX(un))); 10872 10873 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10874 mutex_enter(SD_MUTEX(un)); 10875 /* 10876 * Because the call to sd_ready_and_valid will issue I/O we 10877 * must wait here if either the device is suspended or 10878 * if it's power level is changing. 10879 */ 10880 while ((un->un_state == SD_STATE_SUSPENDED) || 10881 (un->un_state == SD_STATE_PM_CHANGING)) { 10882 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10883 } 10884 un->un_ncmds_in_driver++; 10885 mutex_exit(SD_MUTEX(un)); 10886 10887 /* Initialize sd_ssc_t for internal uscsi commands */ 10888 ssc = sd_ssc_init(un); 10889 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10890 err = EIO; 10891 } else { 10892 err = 0; 10893 } 10894 sd_ssc_fini(ssc); 10895 10896 mutex_enter(SD_MUTEX(un)); 10897 un->un_ncmds_in_driver--; 10898 ASSERT(un->un_ncmds_in_driver >= 0); 10899 mutex_exit(SD_MUTEX(un)); 10900 if (err != 0) 10901 return (err); 10902 } 10903 10904 /* 10905 * Read requests are restricted to multiples of the system block size. 10906 */ 10907 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10908 secmask = un->un_tgt_blocksize - 1; 10909 else 10910 secmask = DEV_BSIZE - 1; 10911 10912 if (uio->uio_loffset & ((offset_t)(secmask))) { 10913 SD_ERROR(SD_LOG_READ_WRITE, un, 10914 "sdaread: file offset not modulo %d\n", 10915 secmask + 1); 10916 err = EINVAL; 10917 } else if (uio->uio_iov->iov_len & (secmask)) { 10918 SD_ERROR(SD_LOG_READ_WRITE, un, 10919 "sdaread: transfer length not modulo %d\n", 10920 secmask + 1); 10921 err = EINVAL; 10922 } else { 10923 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10924 } 10925 10926 return (err); 10927 } 10928 10929 10930 /* 10931 * Function: sdawrite 10932 * 10933 * Description: Driver's awrite(9e) entry point function. 10934 * 10935 * Arguments: dev - device number 10936 * aio - structure pointer describing where data is stored 10937 * cred_p - user credential pointer 10938 * 10939 * Return Code: ENXIO 10940 * EIO 10941 * EINVAL 10942 * value returned by aphysio 10943 * 10944 * Context: Kernel thread context. 10945 */ 10946 /* ARGSUSED */ 10947 static int 10948 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10949 { 10950 struct sd_lun *un = NULL; 10951 struct uio *uio = aio->aio_uio; 10952 int secmask; 10953 int err = 0; 10954 sd_ssc_t *ssc; 10955 10956 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10957 return (ENXIO); 10958 } 10959 10960 ASSERT(!mutex_owned(SD_MUTEX(un))); 10961 10962 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10963 mutex_enter(SD_MUTEX(un)); 10964 /* 10965 * Because the call to sd_ready_and_valid will issue I/O we 10966 * must wait here if either the device is suspended or 10967 * if it's power level is changing. 10968 */ 10969 while ((un->un_state == SD_STATE_SUSPENDED) || 10970 (un->un_state == SD_STATE_PM_CHANGING)) { 10971 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10972 } 10973 un->un_ncmds_in_driver++; 10974 mutex_exit(SD_MUTEX(un)); 10975 10976 /* Initialize sd_ssc_t for internal uscsi commands */ 10977 ssc = sd_ssc_init(un); 10978 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10979 err = EIO; 10980 } else { 10981 err = 0; 10982 } 10983 sd_ssc_fini(ssc); 10984 10985 mutex_enter(SD_MUTEX(un)); 10986 un->un_ncmds_in_driver--; 10987 ASSERT(un->un_ncmds_in_driver >= 0); 10988 mutex_exit(SD_MUTEX(un)); 10989 if (err != 0) 10990 return (err); 10991 } 10992 10993 /* 10994 * Write requests are restricted to multiples of the system block size. 10995 */ 10996 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10997 secmask = un->un_tgt_blocksize - 1; 10998 else 10999 secmask = DEV_BSIZE - 1; 11000 11001 if (uio->uio_loffset & ((offset_t)(secmask))) { 11002 SD_ERROR(SD_LOG_READ_WRITE, un, 11003 "sdawrite: file offset not modulo %d\n", 11004 secmask + 1); 11005 err = EINVAL; 11006 } else if (uio->uio_iov->iov_len & (secmask)) { 11007 SD_ERROR(SD_LOG_READ_WRITE, un, 11008 "sdawrite: transfer length not modulo %d\n", 11009 secmask + 1); 11010 err = EINVAL; 11011 } else { 11012 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11013 } 11014 11015 return (err); 11016 } 11017 11018 11019 11020 11021 11022 /* 11023 * Driver IO processing follows the following sequence: 11024 * 11025 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11026 * | | ^ 11027 * v v | 11028 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11029 * | | | | 11030 * v | | | 11031 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11032 * | | ^ ^ 11033 * v v | | 11034 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11035 * | | | | 11036 * +---+ | +------------+ +-------+ 11037 * | | | | 11038 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11039 * | v | | 11040 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11041 * | | ^ | 11042 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11043 * | v | | 11044 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11045 * | | ^ | 11046 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11047 * | v | | 11048 * | sd_checksum_iostart() sd_checksum_iodone() | 11049 * | | ^ | 11050 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11051 * | v | | 11052 * | sd_pm_iostart() sd_pm_iodone() | 11053 * | | ^ | 11054 * | | | | 11055 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11056 * | ^ 11057 * v | 11058 * sd_core_iostart() | 11059 * | | 11060 * | +------>(*destroypkt)() 11061 * +-> sd_start_cmds() <-+ | | 11062 * | | | v 11063 * | | | scsi_destroy_pkt(9F) 11064 * | | | 11065 * +->(*initpkt)() +- sdintr() 11066 * | | | | 11067 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11068 * | +-> scsi_setup_cdb(9F) | 11069 * | | 11070 * +--> scsi_transport(9F) | 11071 * | | 11072 * +----> SCSA ---->+ 11073 * 11074 * 11075 * This code is based upon the following presumptions: 11076 * 11077 * - iostart and iodone functions operate on buf(9S) structures. These 11078 * functions perform the necessary operations on the buf(9S) and pass 11079 * them along to the next function in the chain by using the macros 11080 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11081 * (for iodone side functions). 11082 * 11083 * - The iostart side functions may sleep. The iodone side functions 11084 * are called under interrupt context and may NOT sleep. Therefore 11085 * iodone side functions also may not call iostart side functions. 11086 * (NOTE: iostart side functions should NOT sleep for memory, as 11087 * this could result in deadlock.) 11088 * 11089 * - An iostart side function may call its corresponding iodone side 11090 * function directly (if necessary). 11091 * 11092 * - In the event of an error, an iostart side function can return a buf(9S) 11093 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11094 * b_error in the usual way of course). 11095 * 11096 * - The taskq mechanism may be used by the iodone side functions to dispatch 11097 * requests to the iostart side functions. The iostart side functions in 11098 * this case would be called under the context of a taskq thread, so it's 11099 * OK for them to block/sleep/spin in this case. 11100 * 11101 * - iostart side functions may allocate "shadow" buf(9S) structs and 11102 * pass them along to the next function in the chain. The corresponding 11103 * iodone side functions must coalesce the "shadow" bufs and return 11104 * the "original" buf to the next higher layer. 11105 * 11106 * - The b_private field of the buf(9S) struct holds a pointer to 11107 * an sd_xbuf struct, which contains information needed to 11108 * construct the scsi_pkt for the command. 11109 * 11110 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11111 * layer must acquire & release the SD_MUTEX(un) as needed. 11112 */ 11113 11114 11115 /* 11116 * Create taskq for all targets in the system. This is created at 11117 * _init(9E) and destroyed at _fini(9E). 11118 * 11119 * Note: here we set the minalloc to a reasonably high number to ensure that 11120 * we will have an adequate supply of task entries available at interrupt time. 11121 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11122 * sd_create_taskq(). Since we do not want to sleep for allocations at 11123 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11124 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11125 * requests any one instant in time. 11126 */ 11127 #define SD_TASKQ_NUMTHREADS 8 11128 #define SD_TASKQ_MINALLOC 256 11129 #define SD_TASKQ_MAXALLOC 256 11130 11131 static taskq_t *sd_tq = NULL; 11132 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11133 11134 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11135 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11136 11137 /* 11138 * The following task queue is being created for the write part of 11139 * read-modify-write of non-512 block size devices. 11140 * Limit the number of threads to 1 for now. This number has been chosen 11141 * considering the fact that it applies only to dvd ram drives/MO drives 11142 * currently. Performance for which is not main criteria at this stage. 11143 * Note: It needs to be explored if we can use a single taskq in future 11144 */ 11145 #define SD_WMR_TASKQ_NUMTHREADS 1 11146 static taskq_t *sd_wmr_tq = NULL; 11147 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11148 11149 /* 11150 * Function: sd_taskq_create 11151 * 11152 * Description: Create taskq thread(s) and preallocate task entries 11153 * 11154 * Return Code: Returns a pointer to the allocated taskq_t. 11155 * 11156 * Context: Can sleep. Requires blockable context. 11157 * 11158 * Notes: - The taskq() facility currently is NOT part of the DDI. 11159 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11160 * - taskq_create() will block for memory, also it will panic 11161 * if it cannot create the requested number of threads. 11162 * - Currently taskq_create() creates threads that cannot be 11163 * swapped. 11164 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11165 * supply of taskq entries at interrupt time (ie, so that we 11166 * do not have to sleep for memory) 11167 */ 11168 11169 static void 11170 sd_taskq_create(void) 11171 { 11172 char taskq_name[TASKQ_NAMELEN]; 11173 11174 ASSERT(sd_tq == NULL); 11175 ASSERT(sd_wmr_tq == NULL); 11176 11177 (void) snprintf(taskq_name, sizeof (taskq_name), 11178 "%s_drv_taskq", sd_label); 11179 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11180 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11181 TASKQ_PREPOPULATE)); 11182 11183 (void) snprintf(taskq_name, sizeof (taskq_name), 11184 "%s_rmw_taskq", sd_label); 11185 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11186 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11187 TASKQ_PREPOPULATE)); 11188 } 11189 11190 11191 /* 11192 * Function: sd_taskq_delete 11193 * 11194 * Description: Complementary cleanup routine for sd_taskq_create(). 11195 * 11196 * Context: Kernel thread context. 11197 */ 11198 11199 static void 11200 sd_taskq_delete(void) 11201 { 11202 ASSERT(sd_tq != NULL); 11203 ASSERT(sd_wmr_tq != NULL); 11204 taskq_destroy(sd_tq); 11205 taskq_destroy(sd_wmr_tq); 11206 sd_tq = NULL; 11207 sd_wmr_tq = NULL; 11208 } 11209 11210 11211 /* 11212 * Function: sdstrategy 11213 * 11214 * Description: Driver's strategy (9E) entry point function. 11215 * 11216 * Arguments: bp - pointer to buf(9S) 11217 * 11218 * Return Code: Always returns zero 11219 * 11220 * Context: Kernel thread context. 11221 */ 11222 11223 static int 11224 sdstrategy(struct buf *bp) 11225 { 11226 struct sd_lun *un; 11227 11228 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11229 if (un == NULL) { 11230 bioerror(bp, EIO); 11231 bp->b_resid = bp->b_bcount; 11232 biodone(bp); 11233 return (0); 11234 } 11235 11236 /* As was done in the past, fail new cmds. if state is dumping. */ 11237 if (un->un_state == SD_STATE_DUMPING) { 11238 bioerror(bp, ENXIO); 11239 bp->b_resid = bp->b_bcount; 11240 biodone(bp); 11241 return (0); 11242 } 11243 11244 ASSERT(!mutex_owned(SD_MUTEX(un))); 11245 11246 /* 11247 * Commands may sneak in while we released the mutex in 11248 * DDI_SUSPEND, we should block new commands. However, old 11249 * commands that are still in the driver at this point should 11250 * still be allowed to drain. 11251 */ 11252 mutex_enter(SD_MUTEX(un)); 11253 /* 11254 * Must wait here if either the device is suspended or 11255 * if it's power level is changing. 11256 */ 11257 while ((un->un_state == SD_STATE_SUSPENDED) || 11258 (un->un_state == SD_STATE_PM_CHANGING)) { 11259 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11260 } 11261 11262 un->un_ncmds_in_driver++; 11263 11264 /* 11265 * atapi: Since we are running the CD for now in PIO mode we need to 11266 * call bp_mapin here to avoid bp_mapin called interrupt context under 11267 * the HBA's init_pkt routine. 11268 */ 11269 if (un->un_f_cfg_is_atapi == TRUE) { 11270 mutex_exit(SD_MUTEX(un)); 11271 bp_mapin(bp); 11272 mutex_enter(SD_MUTEX(un)); 11273 } 11274 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11275 un->un_ncmds_in_driver); 11276 11277 if (bp->b_flags & B_WRITE) 11278 un->un_f_sync_cache_required = TRUE; 11279 11280 mutex_exit(SD_MUTEX(un)); 11281 11282 /* 11283 * This will (eventually) allocate the sd_xbuf area and 11284 * call sd_xbuf_strategy(). We just want to return the 11285 * result of ddi_xbuf_qstrategy so that we have an opt- 11286 * imized tail call which saves us a stack frame. 11287 */ 11288 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11289 } 11290 11291 11292 /* 11293 * Function: sd_xbuf_strategy 11294 * 11295 * Description: Function for initiating IO operations via the 11296 * ddi_xbuf_qstrategy() mechanism. 11297 * 11298 * Context: Kernel thread context. 11299 */ 11300 11301 static void 11302 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11303 { 11304 struct sd_lun *un = arg; 11305 11306 ASSERT(bp != NULL); 11307 ASSERT(xp != NULL); 11308 ASSERT(un != NULL); 11309 ASSERT(!mutex_owned(SD_MUTEX(un))); 11310 11311 /* 11312 * Initialize the fields in the xbuf and save a pointer to the 11313 * xbuf in bp->b_private. 11314 */ 11315 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11316 11317 /* Send the buf down the iostart chain */ 11318 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11319 } 11320 11321 11322 /* 11323 * Function: sd_xbuf_init 11324 * 11325 * Description: Prepare the given sd_xbuf struct for use. 11326 * 11327 * Arguments: un - ptr to softstate 11328 * bp - ptr to associated buf(9S) 11329 * xp - ptr to associated sd_xbuf 11330 * chain_type - IO chain type to use: 11331 * SD_CHAIN_NULL 11332 * SD_CHAIN_BUFIO 11333 * SD_CHAIN_USCSI 11334 * SD_CHAIN_DIRECT 11335 * SD_CHAIN_DIRECT_PRIORITY 11336 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11337 * initialization; may be NULL if none. 11338 * 11339 * Context: Kernel thread context 11340 */ 11341 11342 static void 11343 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11344 uchar_t chain_type, void *pktinfop) 11345 { 11346 int index; 11347 11348 ASSERT(un != NULL); 11349 ASSERT(bp != NULL); 11350 ASSERT(xp != NULL); 11351 11352 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11353 bp, chain_type); 11354 11355 xp->xb_un = un; 11356 xp->xb_pktp = NULL; 11357 xp->xb_pktinfo = pktinfop; 11358 xp->xb_private = bp->b_private; 11359 xp->xb_blkno = (daddr_t)bp->b_blkno; 11360 11361 /* 11362 * Set up the iostart and iodone chain indexes in the xbuf, based 11363 * upon the specified chain type to use. 11364 */ 11365 switch (chain_type) { 11366 case SD_CHAIN_NULL: 11367 /* 11368 * Fall thru to just use the values for the buf type, even 11369 * tho for the NULL chain these values will never be used. 11370 */ 11371 /* FALLTHRU */ 11372 case SD_CHAIN_BUFIO: 11373 index = un->un_buf_chain_type; 11374 if ((!un->un_f_has_removable_media) && 11375 (un->un_tgt_blocksize != 0) && 11376 (un->un_tgt_blocksize != DEV_BSIZE)) { 11377 int secmask = 0, blknomask = 0; 11378 blknomask = 11379 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11380 secmask = un->un_tgt_blocksize - 1; 11381 11382 if ((bp->b_lblkno & (blknomask)) || 11383 (bp->b_bcount & (secmask))) { 11384 if (un->un_f_rmw_type != 11385 SD_RMW_TYPE_RETURN_ERROR) { 11386 if (un->un_f_pm_is_enabled == FALSE) 11387 index = 11388 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11389 else 11390 index = 11391 SD_CHAIN_INFO_MSS_DISK; 11392 } 11393 } 11394 } 11395 break; 11396 case SD_CHAIN_USCSI: 11397 index = un->un_uscsi_chain_type; 11398 break; 11399 case SD_CHAIN_DIRECT: 11400 index = un->un_direct_chain_type; 11401 break; 11402 case SD_CHAIN_DIRECT_PRIORITY: 11403 index = un->un_priority_chain_type; 11404 break; 11405 default: 11406 /* We're really broken if we ever get here... */ 11407 panic("sd_xbuf_init: illegal chain type!"); 11408 /*NOTREACHED*/ 11409 } 11410 11411 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11412 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11413 11414 /* 11415 * It might be a bit easier to simply bzero the entire xbuf above, 11416 * but it turns out that since we init a fair number of members anyway, 11417 * we save a fair number cycles by doing explicit assignment of zero. 11418 */ 11419 xp->xb_pkt_flags = 0; 11420 xp->xb_dma_resid = 0; 11421 xp->xb_retry_count = 0; 11422 xp->xb_victim_retry_count = 0; 11423 xp->xb_ua_retry_count = 0; 11424 xp->xb_nr_retry_count = 0; 11425 xp->xb_sense_bp = NULL; 11426 xp->xb_sense_status = 0; 11427 xp->xb_sense_state = 0; 11428 xp->xb_sense_resid = 0; 11429 xp->xb_ena = 0; 11430 11431 bp->b_private = xp; 11432 bp->b_flags &= ~(B_DONE | B_ERROR); 11433 bp->b_resid = 0; 11434 bp->av_forw = NULL; 11435 bp->av_back = NULL; 11436 bioerror(bp, 0); 11437 11438 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11439 } 11440 11441 11442 /* 11443 * Function: sd_uscsi_strategy 11444 * 11445 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11446 * 11447 * Arguments: bp - buf struct ptr 11448 * 11449 * Return Code: Always returns 0 11450 * 11451 * Context: Kernel thread context 11452 */ 11453 11454 static int 11455 sd_uscsi_strategy(struct buf *bp) 11456 { 11457 struct sd_lun *un; 11458 struct sd_uscsi_info *uip; 11459 struct sd_xbuf *xp; 11460 uchar_t chain_type; 11461 uchar_t cmd; 11462 11463 ASSERT(bp != NULL); 11464 11465 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11466 if (un == NULL) { 11467 bioerror(bp, EIO); 11468 bp->b_resid = bp->b_bcount; 11469 biodone(bp); 11470 return (0); 11471 } 11472 11473 ASSERT(!mutex_owned(SD_MUTEX(un))); 11474 11475 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11476 11477 /* 11478 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11479 */ 11480 ASSERT(bp->b_private != NULL); 11481 uip = (struct sd_uscsi_info *)bp->b_private; 11482 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11483 11484 mutex_enter(SD_MUTEX(un)); 11485 /* 11486 * atapi: Since we are running the CD for now in PIO mode we need to 11487 * call bp_mapin here to avoid bp_mapin called interrupt context under 11488 * the HBA's init_pkt routine. 11489 */ 11490 if (un->un_f_cfg_is_atapi == TRUE) { 11491 mutex_exit(SD_MUTEX(un)); 11492 bp_mapin(bp); 11493 mutex_enter(SD_MUTEX(un)); 11494 } 11495 un->un_ncmds_in_driver++; 11496 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11497 un->un_ncmds_in_driver); 11498 11499 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11500 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11501 un->un_f_sync_cache_required = TRUE; 11502 11503 mutex_exit(SD_MUTEX(un)); 11504 11505 switch (uip->ui_flags) { 11506 case SD_PATH_DIRECT: 11507 chain_type = SD_CHAIN_DIRECT; 11508 break; 11509 case SD_PATH_DIRECT_PRIORITY: 11510 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11511 break; 11512 default: 11513 chain_type = SD_CHAIN_USCSI; 11514 break; 11515 } 11516 11517 /* 11518 * We may allocate extra buf for external USCSI commands. If the 11519 * application asks for bigger than 20-byte sense data via USCSI, 11520 * SCSA layer will allocate 252 bytes sense buf for that command. 11521 */ 11522 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11523 SENSE_LENGTH) { 11524 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11525 MAX_SENSE_LENGTH, KM_SLEEP); 11526 } else { 11527 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11528 } 11529 11530 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11531 11532 /* Use the index obtained within xbuf_init */ 11533 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11534 11535 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11536 11537 return (0); 11538 } 11539 11540 /* 11541 * Function: sd_send_scsi_cmd 11542 * 11543 * Description: Runs a USCSI command for user (when called thru sdioctl), 11544 * or for the driver 11545 * 11546 * Arguments: dev - the dev_t for the device 11547 * incmd - ptr to a valid uscsi_cmd struct 11548 * flag - bit flag, indicating open settings, 32/64 bit type 11549 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11550 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11551 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11552 * to use the USCSI "direct" chain and bypass the normal 11553 * command waitq. 11554 * 11555 * Return Code: 0 - successful completion of the given command 11556 * EIO - scsi_uscsi_handle_command() failed 11557 * ENXIO - soft state not found for specified dev 11558 * EINVAL 11559 * EFAULT - copyin/copyout error 11560 * return code of scsi_uscsi_handle_command(): 11561 * EIO 11562 * ENXIO 11563 * EACCES 11564 * 11565 * Context: Waits for command to complete. Can sleep. 11566 */ 11567 11568 static int 11569 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11570 enum uio_seg dataspace, int path_flag) 11571 { 11572 struct sd_lun *un; 11573 sd_ssc_t *ssc; 11574 int rval; 11575 11576 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11577 if (un == NULL) { 11578 return (ENXIO); 11579 } 11580 11581 /* 11582 * Using sd_ssc_send to handle uscsi cmd 11583 */ 11584 ssc = sd_ssc_init(un); 11585 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11586 sd_ssc_fini(ssc); 11587 11588 return (rval); 11589 } 11590 11591 /* 11592 * Function: sd_ssc_init 11593 * 11594 * Description: Uscsi end-user call this function to initialize necessary 11595 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11596 * 11597 * The return value of sd_send_scsi_cmd will be treated as a 11598 * fault in various conditions. Even it is not Zero, some 11599 * callers may ignore the return value. That is to say, we can 11600 * not make an accurate assessment in sdintr, since if a 11601 * command is failed in sdintr it does not mean the caller of 11602 * sd_send_scsi_cmd will treat it as a real failure. 11603 * 11604 * To avoid printing too many error logs for a failed uscsi 11605 * packet that the caller may not treat it as a failure, the 11606 * sd will keep silent for handling all uscsi commands. 11607 * 11608 * During detach->attach and attach-open, for some types of 11609 * problems, the driver should be providing information about 11610 * the problem encountered. Device use USCSI_SILENT, which 11611 * suppresses all driver information. The result is that no 11612 * information about the problem is available. Being 11613 * completely silent during this time is inappropriate. The 11614 * driver needs a more selective filter than USCSI_SILENT, so 11615 * that information related to faults is provided. 11616 * 11617 * To make the accurate accessment, the caller of 11618 * sd_send_scsi_USCSI_CMD should take the ownership and 11619 * get necessary information to print error messages. 11620 * 11621 * If we want to print necessary info of uscsi command, we need to 11622 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11623 * assessment. We use sd_ssc_init to alloc necessary 11624 * structs for sending an uscsi command and we are also 11625 * responsible for free the memory by calling 11626 * sd_ssc_fini. 11627 * 11628 * The calling secquences will look like: 11629 * sd_ssc_init-> 11630 * 11631 * ... 11632 * 11633 * sd_send_scsi_USCSI_CMD-> 11634 * sd_ssc_send-> - - - sdintr 11635 * ... 11636 * 11637 * if we think the return value should be treated as a 11638 * failure, we make the accessment here and print out 11639 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11640 * 11641 * ... 11642 * 11643 * sd_ssc_fini 11644 * 11645 * 11646 * Arguments: un - pointer to driver soft state (unit) structure for this 11647 * target. 11648 * 11649 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11650 * uscsi_cmd and sd_uscsi_info. 11651 * NULL - if can not alloc memory for sd_ssc_t struct 11652 * 11653 * Context: Kernel Thread. 11654 */ 11655 static sd_ssc_t * 11656 sd_ssc_init(struct sd_lun *un) 11657 { 11658 sd_ssc_t *ssc; 11659 struct uscsi_cmd *ucmdp; 11660 struct sd_uscsi_info *uip; 11661 11662 ASSERT(un != NULL); 11663 ASSERT(!mutex_owned(SD_MUTEX(un))); 11664 11665 /* 11666 * Allocate sd_ssc_t structure 11667 */ 11668 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11669 11670 /* 11671 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11672 */ 11673 ucmdp = scsi_uscsi_alloc(); 11674 11675 /* 11676 * Allocate sd_uscsi_info structure 11677 */ 11678 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11679 11680 ssc->ssc_uscsi_cmd = ucmdp; 11681 ssc->ssc_uscsi_info = uip; 11682 ssc->ssc_un = un; 11683 11684 return (ssc); 11685 } 11686 11687 /* 11688 * Function: sd_ssc_fini 11689 * 11690 * Description: To free sd_ssc_t and it's hanging off 11691 * 11692 * Arguments: ssc - struct pointer of sd_ssc_t. 11693 */ 11694 static void 11695 sd_ssc_fini(sd_ssc_t *ssc) 11696 { 11697 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11698 11699 if (ssc->ssc_uscsi_info != NULL) { 11700 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11701 ssc->ssc_uscsi_info = NULL; 11702 } 11703 11704 kmem_free(ssc, sizeof (sd_ssc_t)); 11705 ssc = NULL; 11706 } 11707 11708 /* 11709 * Function: sd_ssc_send 11710 * 11711 * Description: Runs a USCSI command for user when called through sdioctl, 11712 * or for the driver. 11713 * 11714 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11715 * sd_uscsi_info in. 11716 * incmd - ptr to a valid uscsi_cmd struct 11717 * flag - bit flag, indicating open settings, 32/64 bit type 11718 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11719 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11720 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11721 * to use the USCSI "direct" chain and bypass the normal 11722 * command waitq. 11723 * 11724 * Return Code: 0 - successful completion of the given command 11725 * EIO - scsi_uscsi_handle_command() failed 11726 * ENXIO - soft state not found for specified dev 11727 * EINVAL 11728 * EFAULT - copyin/copyout error 11729 * return code of scsi_uscsi_handle_command(): 11730 * EIO 11731 * ENXIO 11732 * EACCES 11733 * 11734 * Context: Kernel Thread; 11735 * Waits for command to complete. Can sleep. 11736 */ 11737 static int 11738 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11739 enum uio_seg dataspace, int path_flag) 11740 { 11741 struct sd_uscsi_info *uip; 11742 struct uscsi_cmd *uscmd; 11743 struct sd_lun *un; 11744 dev_t dev; 11745 11746 int format = 0; 11747 int rval; 11748 11749 ASSERT(ssc != NULL); 11750 un = ssc->ssc_un; 11751 ASSERT(un != NULL); 11752 uscmd = ssc->ssc_uscsi_cmd; 11753 ASSERT(uscmd != NULL); 11754 ASSERT(!mutex_owned(SD_MUTEX(un))); 11755 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11756 /* 11757 * If enter here, it indicates that the previous uscsi 11758 * command has not been processed by sd_ssc_assessment. 11759 * This is violating our rules of FMA telemetry processing. 11760 * We should print out this message and the last undisposed 11761 * uscsi command. 11762 */ 11763 if (uscmd->uscsi_cdb != NULL) { 11764 SD_INFO(SD_LOG_SDTEST, un, 11765 "sd_ssc_send is missing the alternative " 11766 "sd_ssc_assessment when running command 0x%x.\n", 11767 uscmd->uscsi_cdb[0]); 11768 } 11769 /* 11770 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11771 * the initial status. 11772 */ 11773 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11774 } 11775 11776 /* 11777 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11778 * followed to avoid missing FMA telemetries. 11779 */ 11780 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11781 11782 #ifdef SDDEBUG 11783 switch (dataspace) { 11784 case UIO_USERSPACE: 11785 SD_TRACE(SD_LOG_IO, un, 11786 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11787 break; 11788 case UIO_SYSSPACE: 11789 SD_TRACE(SD_LOG_IO, un, 11790 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11791 break; 11792 default: 11793 SD_TRACE(SD_LOG_IO, un, 11794 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11795 break; 11796 } 11797 #endif 11798 11799 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11800 SD_ADDRESS(un), &uscmd); 11801 if (rval != 0) { 11802 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11803 "scsi_uscsi_alloc_and_copyin failed\n", un); 11804 return (rval); 11805 } 11806 11807 if ((uscmd->uscsi_cdb != NULL) && 11808 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11809 mutex_enter(SD_MUTEX(un)); 11810 un->un_f_format_in_progress = TRUE; 11811 mutex_exit(SD_MUTEX(un)); 11812 format = 1; 11813 } 11814 11815 /* 11816 * Allocate an sd_uscsi_info struct and fill it with the info 11817 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11818 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11819 * since we allocate the buf here in this function, we do not 11820 * need to preserve the prior contents of b_private. 11821 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11822 */ 11823 uip = ssc->ssc_uscsi_info; 11824 uip->ui_flags = path_flag; 11825 uip->ui_cmdp = uscmd; 11826 11827 /* 11828 * Commands sent with priority are intended for error recovery 11829 * situations, and do not have retries performed. 11830 */ 11831 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11832 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11833 } 11834 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11835 11836 dev = SD_GET_DEV(un); 11837 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11838 sd_uscsi_strategy, NULL, uip); 11839 11840 /* 11841 * mark ssc_flags right after handle_cmd to make sure 11842 * the uscsi has been sent 11843 */ 11844 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11845 11846 #ifdef SDDEBUG 11847 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11848 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11849 uscmd->uscsi_status, uscmd->uscsi_resid); 11850 if (uscmd->uscsi_bufaddr != NULL) { 11851 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11852 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11853 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11854 if (dataspace == UIO_SYSSPACE) { 11855 SD_DUMP_MEMORY(un, SD_LOG_IO, 11856 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11857 uscmd->uscsi_buflen, SD_LOG_HEX); 11858 } 11859 } 11860 #endif 11861 11862 if (format == 1) { 11863 mutex_enter(SD_MUTEX(un)); 11864 un->un_f_format_in_progress = FALSE; 11865 mutex_exit(SD_MUTEX(un)); 11866 } 11867 11868 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11869 11870 return (rval); 11871 } 11872 11873 /* 11874 * Function: sd_ssc_print 11875 * 11876 * Description: Print information available to the console. 11877 * 11878 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11879 * sd_uscsi_info in. 11880 * sd_severity - log level. 11881 * Context: Kernel thread or interrupt context. 11882 */ 11883 static void 11884 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11885 { 11886 struct uscsi_cmd *ucmdp; 11887 struct scsi_device *devp; 11888 dev_info_t *devinfo; 11889 uchar_t *sensep; 11890 int senlen; 11891 union scsi_cdb *cdbp; 11892 uchar_t com; 11893 extern struct scsi_key_strings scsi_cmds[]; 11894 11895 ASSERT(ssc != NULL); 11896 ASSERT(ssc->ssc_un != NULL); 11897 11898 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 11899 return; 11900 ucmdp = ssc->ssc_uscsi_cmd; 11901 devp = SD_SCSI_DEVP(ssc->ssc_un); 11902 devinfo = SD_DEVINFO(ssc->ssc_un); 11903 ASSERT(ucmdp != NULL); 11904 ASSERT(devp != NULL); 11905 ASSERT(devinfo != NULL); 11906 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11907 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11908 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11909 11910 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11911 if (cdbp == NULL) 11912 return; 11913 /* We don't print log if no sense data available. */ 11914 if (senlen == 0) 11915 sensep = NULL; 11916 com = cdbp->scc_cmd; 11917 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11918 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11919 } 11920 11921 /* 11922 * Function: sd_ssc_assessment 11923 * 11924 * Description: We use this function to make an assessment at the point 11925 * where SD driver may encounter a potential error. 11926 * 11927 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11928 * sd_uscsi_info in. 11929 * tp_assess - a hint of strategy for ereport posting. 11930 * Possible values of tp_assess include: 11931 * SD_FMT_IGNORE - we don't post any ereport because we're 11932 * sure that it is ok to ignore the underlying problems. 11933 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11934 * but it might be not correct to ignore the underlying hardware 11935 * error. 11936 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11937 * payload driver-assessment of value "fail" or 11938 * "fatal"(depending on what information we have here). This 11939 * assessment value is usually set when SD driver think there 11940 * is a potential error occurred(Typically, when return value 11941 * of the SCSI command is EIO). 11942 * SD_FMT_STANDARD - we will post an ereport with the payload 11943 * driver-assessment of value "info". This assessment value is 11944 * set when the SCSI command returned successfully and with 11945 * sense data sent back. 11946 * 11947 * Context: Kernel thread. 11948 */ 11949 static void 11950 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11951 { 11952 int senlen = 0; 11953 struct uscsi_cmd *ucmdp = NULL; 11954 struct sd_lun *un; 11955 11956 ASSERT(ssc != NULL); 11957 un = ssc->ssc_un; 11958 ASSERT(un != NULL); 11959 ucmdp = ssc->ssc_uscsi_cmd; 11960 ASSERT(ucmdp != NULL); 11961 11962 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11963 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11964 } else { 11965 /* 11966 * If enter here, it indicates that we have a wrong 11967 * calling sequence of sd_ssc_send and sd_ssc_assessment, 11968 * both of which should be called in a pair in case of 11969 * loss of FMA telemetries. 11970 */ 11971 if (ucmdp->uscsi_cdb != NULL) { 11972 SD_INFO(SD_LOG_SDTEST, un, 11973 "sd_ssc_assessment is missing the " 11974 "alternative sd_ssc_send when running 0x%x, " 11975 "or there are superfluous sd_ssc_assessment for " 11976 "the same sd_ssc_send.\n", 11977 ucmdp->uscsi_cdb[0]); 11978 } 11979 /* 11980 * Set the ssc_flags to the initial value to avoid passing 11981 * down dirty flags to the following sd_ssc_send function. 11982 */ 11983 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11984 return; 11985 } 11986 11987 /* 11988 * Only handle an issued command which is waiting for assessment. 11989 * A command which is not issued will not have 11990 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 11991 */ 11992 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11993 sd_ssc_print(ssc, SCSI_ERR_INFO); 11994 return; 11995 } else { 11996 /* 11997 * For an issued command, we should clear this flag in 11998 * order to make the sd_ssc_t structure be used off 11999 * multiple uscsi commands. 12000 */ 12001 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12002 } 12003 12004 /* 12005 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12006 * commands here. And we should clear the ssc_flags before return. 12007 */ 12008 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12009 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12010 return; 12011 } 12012 12013 switch (tp_assess) { 12014 case SD_FMT_IGNORE: 12015 case SD_FMT_IGNORE_COMPROMISE: 12016 break; 12017 case SD_FMT_STATUS_CHECK: 12018 /* 12019 * For a failed command(including the succeeded command 12020 * with invalid data sent back). 12021 */ 12022 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12023 break; 12024 case SD_FMT_STANDARD: 12025 /* 12026 * Always for the succeeded commands probably with sense 12027 * data sent back. 12028 * Limitation: 12029 * We can only handle a succeeded command with sense 12030 * data sent back when auto-request-sense is enabled. 12031 */ 12032 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12033 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12034 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12035 (un->un_f_arq_enabled == TRUE) && 12036 senlen > 0 && 12037 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12038 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12039 } 12040 break; 12041 default: 12042 /* 12043 * Should not have other type of assessment. 12044 */ 12045 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12046 "sd_ssc_assessment got wrong " 12047 "sd_type_assessment %d.\n", tp_assess); 12048 break; 12049 } 12050 /* 12051 * Clear up the ssc_flags before return. 12052 */ 12053 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12054 } 12055 12056 /* 12057 * Function: sd_ssc_post 12058 * 12059 * Description: 1. read the driver property to get fm-scsi-log flag. 12060 * 2. print log if fm_log_capable is non-zero. 12061 * 3. call sd_ssc_ereport_post to post ereport if possible. 12062 * 12063 * Context: May be called from kernel thread or interrupt context. 12064 */ 12065 static void 12066 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12067 { 12068 struct sd_lun *un; 12069 int sd_severity; 12070 12071 ASSERT(ssc != NULL); 12072 un = ssc->ssc_un; 12073 ASSERT(un != NULL); 12074 12075 /* 12076 * We may enter here from sd_ssc_assessment(for USCSI command) or 12077 * by directly called from sdintr context. 12078 * We don't handle a non-disk drive(CD-ROM, removable media). 12079 * Clear the ssc_flags before return in case we've set 12080 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12081 * driver. 12082 */ 12083 if (ISCD(un) || un->un_f_has_removable_media) { 12084 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12085 return; 12086 } 12087 12088 switch (sd_assess) { 12089 case SD_FM_DRV_FATAL: 12090 sd_severity = SCSI_ERR_FATAL; 12091 break; 12092 case SD_FM_DRV_RECOVERY: 12093 sd_severity = SCSI_ERR_RECOVERED; 12094 break; 12095 case SD_FM_DRV_RETRY: 12096 sd_severity = SCSI_ERR_RETRYABLE; 12097 break; 12098 case SD_FM_DRV_NOTICE: 12099 sd_severity = SCSI_ERR_INFO; 12100 break; 12101 default: 12102 sd_severity = SCSI_ERR_UNKNOWN; 12103 } 12104 /* print log */ 12105 sd_ssc_print(ssc, sd_severity); 12106 12107 /* always post ereport */ 12108 sd_ssc_ereport_post(ssc, sd_assess); 12109 } 12110 12111 /* 12112 * Function: sd_ssc_set_info 12113 * 12114 * Description: Mark ssc_flags and set ssc_info which would be the 12115 * payload of uderr ereport. This function will cause 12116 * sd_ssc_ereport_post to post uderr ereport only. 12117 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12118 * the function will also call SD_ERROR or scsi_log for a 12119 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12120 * 12121 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12122 * sd_uscsi_info in. 12123 * ssc_flags - indicate the sub-category of a uderr. 12124 * comp - this argument is meaningful only when 12125 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12126 * values include: 12127 * > 0, SD_ERROR is used with comp as the driver logging 12128 * component; 12129 * = 0, scsi-log is used to log error telemetries; 12130 * < 0, no log available for this telemetry. 12131 * 12132 * Context: Kernel thread or interrupt context 12133 */ 12134 static void 12135 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12136 { 12137 va_list ap; 12138 12139 ASSERT(ssc != NULL); 12140 ASSERT(ssc->ssc_un != NULL); 12141 12142 ssc->ssc_flags |= ssc_flags; 12143 va_start(ap, fmt); 12144 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12145 va_end(ap); 12146 12147 /* 12148 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12149 * with invalid data sent back. For non-uscsi command, the 12150 * following code will be bypassed. 12151 */ 12152 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12153 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12154 /* 12155 * If the error belong to certain component and we 12156 * do not want it to show up on the console, we 12157 * will use SD_ERROR, otherwise scsi_log is 12158 * preferred. 12159 */ 12160 if (comp > 0) { 12161 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12162 } else if (comp == 0) { 12163 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12164 CE_WARN, ssc->ssc_info); 12165 } 12166 } 12167 } 12168 } 12169 12170 /* 12171 * Function: sd_buf_iodone 12172 * 12173 * Description: Frees the sd_xbuf & returns the buf to its originator. 12174 * 12175 * Context: May be called from interrupt context. 12176 */ 12177 /* ARGSUSED */ 12178 static void 12179 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12180 { 12181 struct sd_xbuf *xp; 12182 12183 ASSERT(un != NULL); 12184 ASSERT(bp != NULL); 12185 ASSERT(!mutex_owned(SD_MUTEX(un))); 12186 12187 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12188 12189 xp = SD_GET_XBUF(bp); 12190 ASSERT(xp != NULL); 12191 12192 /* xbuf is gone after this */ 12193 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12194 mutex_enter(SD_MUTEX(un)); 12195 12196 /* 12197 * Grab time when the cmd completed. 12198 * This is used for determining if the system has been 12199 * idle long enough to make it idle to the PM framework. 12200 * This is for lowering the overhead, and therefore improving 12201 * performance per I/O operation. 12202 */ 12203 un->un_pm_idle_time = ddi_get_time(); 12204 12205 un->un_ncmds_in_driver--; 12206 ASSERT(un->un_ncmds_in_driver >= 0); 12207 SD_INFO(SD_LOG_IO, un, 12208 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12209 un->un_ncmds_in_driver); 12210 12211 mutex_exit(SD_MUTEX(un)); 12212 } 12213 12214 biodone(bp); /* bp is gone after this */ 12215 12216 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12217 } 12218 12219 12220 /* 12221 * Function: sd_uscsi_iodone 12222 * 12223 * Description: Frees the sd_xbuf & returns the buf to its originator. 12224 * 12225 * Context: May be called from interrupt context. 12226 */ 12227 /* ARGSUSED */ 12228 static void 12229 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12230 { 12231 struct sd_xbuf *xp; 12232 12233 ASSERT(un != NULL); 12234 ASSERT(bp != NULL); 12235 12236 xp = SD_GET_XBUF(bp); 12237 ASSERT(xp != NULL); 12238 ASSERT(!mutex_owned(SD_MUTEX(un))); 12239 12240 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12241 12242 bp->b_private = xp->xb_private; 12243 12244 mutex_enter(SD_MUTEX(un)); 12245 12246 /* 12247 * Grab time when the cmd completed. 12248 * This is used for determining if the system has been 12249 * idle long enough to make it idle to the PM framework. 12250 * This is for lowering the overhead, and therefore improving 12251 * performance per I/O operation. 12252 */ 12253 un->un_pm_idle_time = ddi_get_time(); 12254 12255 un->un_ncmds_in_driver--; 12256 ASSERT(un->un_ncmds_in_driver >= 0); 12257 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12258 un->un_ncmds_in_driver); 12259 12260 mutex_exit(SD_MUTEX(un)); 12261 12262 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12263 SENSE_LENGTH) { 12264 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12265 MAX_SENSE_LENGTH); 12266 } else { 12267 kmem_free(xp, sizeof (struct sd_xbuf)); 12268 } 12269 12270 biodone(bp); 12271 12272 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12273 } 12274 12275 12276 /* 12277 * Function: sd_mapblockaddr_iostart 12278 * 12279 * Description: Verify request lies within the partition limits for 12280 * the indicated minor device. Issue "overrun" buf if 12281 * request would exceed partition range. Converts 12282 * partition-relative block address to absolute. 12283 * 12284 * Upon exit of this function: 12285 * 1.I/O is aligned 12286 * xp->xb_blkno represents the absolute sector address 12287 * 2.I/O is misaligned 12288 * xp->xb_blkno represents the absolute logical block address 12289 * based on DEV_BSIZE. The logical block address will be 12290 * converted to physical sector address in sd_mapblocksize_\ 12291 * iostart. 12292 * 3.I/O is misaligned but is aligned in "overrun" buf 12293 * xp->xb_blkno represents the absolute logical block address 12294 * based on DEV_BSIZE. The logical block address will be 12295 * converted to physical sector address in sd_mapblocksize_\ 12296 * iostart. But no RMW will be issued in this case. 12297 * 12298 * Context: Can sleep 12299 * 12300 * Issues: This follows what the old code did, in terms of accessing 12301 * some of the partition info in the unit struct without holding 12302 * the mutext. This is a general issue, if the partition info 12303 * can be altered while IO is in progress... as soon as we send 12304 * a buf, its partitioning can be invalid before it gets to the 12305 * device. Probably the right fix is to move partitioning out 12306 * of the driver entirely. 12307 */ 12308 12309 static void 12310 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12311 { 12312 diskaddr_t nblocks; /* #blocks in the given partition */ 12313 daddr_t blocknum; /* Block number specified by the buf */ 12314 size_t requested_nblocks; 12315 size_t available_nblocks; 12316 int partition; 12317 diskaddr_t partition_offset; 12318 struct sd_xbuf *xp; 12319 int secmask = 0, blknomask = 0; 12320 ushort_t is_aligned = TRUE; 12321 12322 ASSERT(un != NULL); 12323 ASSERT(bp != NULL); 12324 ASSERT(!mutex_owned(SD_MUTEX(un))); 12325 12326 SD_TRACE(SD_LOG_IO_PARTITION, un, 12327 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12328 12329 xp = SD_GET_XBUF(bp); 12330 ASSERT(xp != NULL); 12331 12332 /* 12333 * If the geometry is not indicated as valid, attempt to access 12334 * the unit & verify the geometry/label. This can be the case for 12335 * removable-media devices, of if the device was opened in 12336 * NDELAY/NONBLOCK mode. 12337 */ 12338 partition = SDPART(bp->b_edev); 12339 12340 if (!SD_IS_VALID_LABEL(un)) { 12341 sd_ssc_t *ssc; 12342 /* 12343 * Initialize sd_ssc_t for internal uscsi commands 12344 * In case of potential porformance issue, we need 12345 * to alloc memory only if there is invalid label 12346 */ 12347 ssc = sd_ssc_init(un); 12348 12349 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12350 /* 12351 * For removable devices it is possible to start an 12352 * I/O without a media by opening the device in nodelay 12353 * mode. Also for writable CDs there can be many 12354 * scenarios where there is no geometry yet but volume 12355 * manager is trying to issue a read() just because 12356 * it can see TOC on the CD. So do not print a message 12357 * for removables. 12358 */ 12359 if (!un->un_f_has_removable_media) { 12360 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12361 "i/o to invalid geometry\n"); 12362 } 12363 bioerror(bp, EIO); 12364 bp->b_resid = bp->b_bcount; 12365 SD_BEGIN_IODONE(index, un, bp); 12366 12367 sd_ssc_fini(ssc); 12368 return; 12369 } 12370 sd_ssc_fini(ssc); 12371 } 12372 12373 nblocks = 0; 12374 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12375 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12376 12377 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12378 secmask = un->un_tgt_blocksize - 1; 12379 12380 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12381 is_aligned = FALSE; 12382 } 12383 12384 if (!(NOT_DEVBSIZE(un))) { 12385 /* 12386 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12387 * Convert the logical block number to target's physical sector 12388 * number. 12389 */ 12390 if (is_aligned) { 12391 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12392 } else { 12393 switch (un->un_f_rmw_type) { 12394 case SD_RMW_TYPE_RETURN_ERROR: 12395 bp->b_flags |= B_ERROR; 12396 goto error_exit; 12397 12398 case SD_RMW_TYPE_DEFAULT: 12399 mutex_enter(SD_MUTEX(un)); 12400 if (un->un_rmw_msg_timeid == NULL) { 12401 scsi_log(SD_DEVINFO(un), sd_label, 12402 CE_WARN, "I/O request is not " 12403 "aligned with %d disk sector size. " 12404 "It is handled through Read Modify " 12405 "Write but the performance is " 12406 "very low.\n", 12407 un->un_tgt_blocksize); 12408 un->un_rmw_msg_timeid = 12409 timeout(sd_rmw_msg_print_handler, 12410 un, SD_RMW_MSG_PRINT_TIMEOUT); 12411 } else { 12412 un->un_rmw_incre_count ++; 12413 } 12414 mutex_exit(SD_MUTEX(un)); 12415 break; 12416 12417 case SD_RMW_TYPE_NO_WARNING: 12418 default: 12419 break; 12420 } 12421 12422 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12423 partition_offset = SD_TGT2SYSBLOCK(un, 12424 partition_offset); 12425 } 12426 } 12427 12428 /* 12429 * blocknum is the starting block number of the request. At this 12430 * point it is still relative to the start of the minor device. 12431 */ 12432 blocknum = xp->xb_blkno; 12433 12434 /* 12435 * Legacy: If the starting block number is one past the last block 12436 * in the partition, do not set B_ERROR in the buf. 12437 */ 12438 if (blocknum == nblocks) { 12439 goto error_exit; 12440 } 12441 12442 /* 12443 * Confirm that the first block of the request lies within the 12444 * partition limits. Also the requested number of bytes must be 12445 * a multiple of the system block size. 12446 */ 12447 if ((blocknum < 0) || (blocknum >= nblocks) || 12448 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12449 bp->b_flags |= B_ERROR; 12450 goto error_exit; 12451 } 12452 12453 /* 12454 * If the requsted # blocks exceeds the available # blocks, that 12455 * is an overrun of the partition. 12456 */ 12457 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12458 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12459 } else { 12460 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12461 } 12462 12463 available_nblocks = (size_t)(nblocks - blocknum); 12464 ASSERT(nblocks >= blocknum); 12465 12466 if (requested_nblocks > available_nblocks) { 12467 size_t resid; 12468 12469 /* 12470 * Allocate an "overrun" buf to allow the request to proceed 12471 * for the amount of space available in the partition. The 12472 * amount not transferred will be added into the b_resid 12473 * when the operation is complete. The overrun buf 12474 * replaces the original buf here, and the original buf 12475 * is saved inside the overrun buf, for later use. 12476 */ 12477 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12478 resid = SD_TGTBLOCKS2BYTES(un, 12479 (offset_t)(requested_nblocks - available_nblocks)); 12480 } else { 12481 resid = SD_SYSBLOCKS2BYTES( 12482 (offset_t)(requested_nblocks - available_nblocks)); 12483 } 12484 12485 size_t count = bp->b_bcount - resid; 12486 /* 12487 * Note: count is an unsigned entity thus it'll NEVER 12488 * be less than 0 so ASSERT the original values are 12489 * correct. 12490 */ 12491 ASSERT(bp->b_bcount >= resid); 12492 12493 bp = sd_bioclone_alloc(bp, count, blocknum, 12494 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12495 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12496 ASSERT(xp != NULL); 12497 } 12498 12499 /* At this point there should be no residual for this buf. */ 12500 ASSERT(bp->b_resid == 0); 12501 12502 /* Convert the block number to an absolute address. */ 12503 xp->xb_blkno += partition_offset; 12504 12505 SD_NEXT_IOSTART(index, un, bp); 12506 12507 SD_TRACE(SD_LOG_IO_PARTITION, un, 12508 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12509 12510 return; 12511 12512 error_exit: 12513 bp->b_resid = bp->b_bcount; 12514 SD_BEGIN_IODONE(index, un, bp); 12515 SD_TRACE(SD_LOG_IO_PARTITION, un, 12516 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12517 } 12518 12519 12520 /* 12521 * Function: sd_mapblockaddr_iodone 12522 * 12523 * Description: Completion-side processing for partition management. 12524 * 12525 * Context: May be called under interrupt context 12526 */ 12527 12528 static void 12529 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12530 { 12531 /* int partition; */ /* Not used, see below. */ 12532 ASSERT(un != NULL); 12533 ASSERT(bp != NULL); 12534 ASSERT(!mutex_owned(SD_MUTEX(un))); 12535 12536 SD_TRACE(SD_LOG_IO_PARTITION, un, 12537 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12538 12539 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12540 /* 12541 * We have an "overrun" buf to deal with... 12542 */ 12543 struct sd_xbuf *xp; 12544 struct buf *obp; /* ptr to the original buf */ 12545 12546 xp = SD_GET_XBUF(bp); 12547 ASSERT(xp != NULL); 12548 12549 /* Retrieve the pointer to the original buf */ 12550 obp = (struct buf *)xp->xb_private; 12551 ASSERT(obp != NULL); 12552 12553 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12554 bioerror(obp, bp->b_error); 12555 12556 sd_bioclone_free(bp); 12557 12558 /* 12559 * Get back the original buf. 12560 * Note that since the restoration of xb_blkno below 12561 * was removed, the sd_xbuf is not needed. 12562 */ 12563 bp = obp; 12564 /* 12565 * xp = SD_GET_XBUF(bp); 12566 * ASSERT(xp != NULL); 12567 */ 12568 } 12569 12570 /* 12571 * Convert sd->xb_blkno back to a minor-device relative value. 12572 * Note: this has been commented out, as it is not needed in the 12573 * current implementation of the driver (ie, since this function 12574 * is at the top of the layering chains, so the info will be 12575 * discarded) and it is in the "hot" IO path. 12576 * 12577 * partition = getminor(bp->b_edev) & SDPART_MASK; 12578 * xp->xb_blkno -= un->un_offset[partition]; 12579 */ 12580 12581 SD_NEXT_IODONE(index, un, bp); 12582 12583 SD_TRACE(SD_LOG_IO_PARTITION, un, 12584 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12585 } 12586 12587 12588 /* 12589 * Function: sd_mapblocksize_iostart 12590 * 12591 * Description: Convert between system block size (un->un_sys_blocksize) 12592 * and target block size (un->un_tgt_blocksize). 12593 * 12594 * Context: Can sleep to allocate resources. 12595 * 12596 * Assumptions: A higher layer has already performed any partition validation, 12597 * and converted the xp->xb_blkno to an absolute value relative 12598 * to the start of the device. 12599 * 12600 * It is also assumed that the higher layer has implemented 12601 * an "overrun" mechanism for the case where the request would 12602 * read/write beyond the end of a partition. In this case we 12603 * assume (and ASSERT) that bp->b_resid == 0. 12604 * 12605 * Note: The implementation for this routine assumes the target 12606 * block size remains constant between allocation and transport. 12607 */ 12608 12609 static void 12610 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12611 { 12612 struct sd_mapblocksize_info *bsp; 12613 struct sd_xbuf *xp; 12614 offset_t first_byte; 12615 daddr_t start_block, end_block; 12616 daddr_t request_bytes; 12617 ushort_t is_aligned = FALSE; 12618 12619 ASSERT(un != NULL); 12620 ASSERT(bp != NULL); 12621 ASSERT(!mutex_owned(SD_MUTEX(un))); 12622 ASSERT(bp->b_resid == 0); 12623 12624 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12625 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12626 12627 /* 12628 * For a non-writable CD, a write request is an error 12629 */ 12630 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12631 (un->un_f_mmc_writable_media == FALSE)) { 12632 bioerror(bp, EIO); 12633 bp->b_resid = bp->b_bcount; 12634 SD_BEGIN_IODONE(index, un, bp); 12635 return; 12636 } 12637 12638 /* 12639 * We do not need a shadow buf if the device is using 12640 * un->un_sys_blocksize as its block size or if bcount == 0. 12641 * In this case there is no layer-private data block allocated. 12642 */ 12643 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12644 (bp->b_bcount == 0)) { 12645 goto done; 12646 } 12647 12648 #if defined(__i386) || defined(__amd64) 12649 /* We do not support non-block-aligned transfers for ROD devices */ 12650 ASSERT(!ISROD(un)); 12651 #endif 12652 12653 xp = SD_GET_XBUF(bp); 12654 ASSERT(xp != NULL); 12655 12656 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12657 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12658 un->un_tgt_blocksize, DEV_BSIZE); 12659 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12660 "request start block:0x%x\n", xp->xb_blkno); 12661 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12662 "request len:0x%x\n", bp->b_bcount); 12663 12664 /* 12665 * Allocate the layer-private data area for the mapblocksize layer. 12666 * Layers are allowed to use the xp_private member of the sd_xbuf 12667 * struct to store the pointer to their layer-private data block, but 12668 * each layer also has the responsibility of restoring the prior 12669 * contents of xb_private before returning the buf/xbuf to the 12670 * higher layer that sent it. 12671 * 12672 * Here we save the prior contents of xp->xb_private into the 12673 * bsp->mbs_oprivate field of our layer-private data area. This value 12674 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12675 * the layer-private area and returning the buf/xbuf to the layer 12676 * that sent it. 12677 * 12678 * Note that here we use kmem_zalloc for the allocation as there are 12679 * parts of the mapblocksize code that expect certain fields to be 12680 * zero unless explicitly set to a required value. 12681 */ 12682 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12683 bsp->mbs_oprivate = xp->xb_private; 12684 xp->xb_private = bsp; 12685 12686 /* 12687 * This treats the data on the disk (target) as an array of bytes. 12688 * first_byte is the byte offset, from the beginning of the device, 12689 * to the location of the request. This is converted from a 12690 * un->un_sys_blocksize block address to a byte offset, and then back 12691 * to a block address based upon a un->un_tgt_blocksize block size. 12692 * 12693 * xp->xb_blkno should be absolute upon entry into this function, 12694 * but, but it is based upon partitions that use the "system" 12695 * block size. It must be adjusted to reflect the block size of 12696 * the target. 12697 * 12698 * Note that end_block is actually the block that follows the last 12699 * block of the request, but that's what is needed for the computation. 12700 */ 12701 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12702 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12703 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12704 un->un_tgt_blocksize; 12705 12706 /* request_bytes is rounded up to a multiple of the target block size */ 12707 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12708 12709 /* 12710 * See if the starting address of the request and the request 12711 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12712 * then we do not need to allocate a shadow buf to handle the request. 12713 */ 12714 if (((first_byte % un->un_tgt_blocksize) == 0) && 12715 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12716 is_aligned = TRUE; 12717 } 12718 12719 if ((bp->b_flags & B_READ) == 0) { 12720 /* 12721 * Lock the range for a write operation. An aligned request is 12722 * considered a simple write; otherwise the request must be a 12723 * read-modify-write. 12724 */ 12725 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12726 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12727 } 12728 12729 /* 12730 * Alloc a shadow buf if the request is not aligned. Also, this is 12731 * where the READ command is generated for a read-modify-write. (The 12732 * write phase is deferred until after the read completes.) 12733 */ 12734 if (is_aligned == FALSE) { 12735 12736 struct sd_mapblocksize_info *shadow_bsp; 12737 struct sd_xbuf *shadow_xp; 12738 struct buf *shadow_bp; 12739 12740 /* 12741 * Allocate the shadow buf and it associated xbuf. Note that 12742 * after this call the xb_blkno value in both the original 12743 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12744 * same: absolute relative to the start of the device, and 12745 * adjusted for the target block size. The b_blkno in the 12746 * shadow buf will also be set to this value. We should never 12747 * change b_blkno in the original bp however. 12748 * 12749 * Note also that the shadow buf will always need to be a 12750 * READ command, regardless of whether the incoming command 12751 * is a READ or a WRITE. 12752 */ 12753 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12754 xp->xb_blkno, 12755 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12756 12757 shadow_xp = SD_GET_XBUF(shadow_bp); 12758 12759 /* 12760 * Allocate the layer-private data for the shadow buf. 12761 * (No need to preserve xb_private in the shadow xbuf.) 12762 */ 12763 shadow_xp->xb_private = shadow_bsp = 12764 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12765 12766 /* 12767 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12768 * to figure out where the start of the user data is (based upon 12769 * the system block size) in the data returned by the READ 12770 * command (which will be based upon the target blocksize). Note 12771 * that this is only really used if the request is unaligned. 12772 */ 12773 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12774 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12775 ASSERT((bsp->mbs_copy_offset >= 0) && 12776 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12777 12778 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12779 12780 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12781 12782 /* Transfer the wmap (if any) to the shadow buf */ 12783 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12784 bsp->mbs_wmp = NULL; 12785 12786 /* 12787 * The shadow buf goes on from here in place of the 12788 * original buf. 12789 */ 12790 shadow_bsp->mbs_orig_bp = bp; 12791 bp = shadow_bp; 12792 } 12793 12794 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12795 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12796 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12797 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12798 request_bytes); 12799 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12800 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12801 12802 done: 12803 SD_NEXT_IOSTART(index, un, bp); 12804 12805 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12806 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12807 } 12808 12809 12810 /* 12811 * Function: sd_mapblocksize_iodone 12812 * 12813 * Description: Completion side processing for block-size mapping. 12814 * 12815 * Context: May be called under interrupt context 12816 */ 12817 12818 static void 12819 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12820 { 12821 struct sd_mapblocksize_info *bsp; 12822 struct sd_xbuf *xp; 12823 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12824 struct buf *orig_bp; /* ptr to the original buf */ 12825 offset_t shadow_end; 12826 offset_t request_end; 12827 offset_t shadow_start; 12828 ssize_t copy_offset; 12829 size_t copy_length; 12830 size_t shortfall; 12831 uint_t is_write; /* TRUE if this bp is a WRITE */ 12832 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12833 12834 ASSERT(un != NULL); 12835 ASSERT(bp != NULL); 12836 12837 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12838 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12839 12840 /* 12841 * There is no shadow buf or layer-private data if the target is 12842 * using un->un_sys_blocksize as its block size or if bcount == 0. 12843 */ 12844 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12845 (bp->b_bcount == 0)) { 12846 goto exit; 12847 } 12848 12849 xp = SD_GET_XBUF(bp); 12850 ASSERT(xp != NULL); 12851 12852 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12853 bsp = xp->xb_private; 12854 12855 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12856 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12857 12858 if (is_write) { 12859 /* 12860 * For a WRITE request we must free up the block range that 12861 * we have locked up. This holds regardless of whether this is 12862 * an aligned write request or a read-modify-write request. 12863 */ 12864 sd_range_unlock(un, bsp->mbs_wmp); 12865 bsp->mbs_wmp = NULL; 12866 } 12867 12868 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12869 /* 12870 * An aligned read or write command will have no shadow buf; 12871 * there is not much else to do with it. 12872 */ 12873 goto done; 12874 } 12875 12876 orig_bp = bsp->mbs_orig_bp; 12877 ASSERT(orig_bp != NULL); 12878 orig_xp = SD_GET_XBUF(orig_bp); 12879 ASSERT(orig_xp != NULL); 12880 ASSERT(!mutex_owned(SD_MUTEX(un))); 12881 12882 if (!is_write && has_wmap) { 12883 /* 12884 * A READ with a wmap means this is the READ phase of a 12885 * read-modify-write. If an error occurred on the READ then 12886 * we do not proceed with the WRITE phase or copy any data. 12887 * Just release the write maps and return with an error. 12888 */ 12889 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12890 orig_bp->b_resid = orig_bp->b_bcount; 12891 bioerror(orig_bp, bp->b_error); 12892 sd_range_unlock(un, bsp->mbs_wmp); 12893 goto freebuf_done; 12894 } 12895 } 12896 12897 /* 12898 * Here is where we set up to copy the data from the shadow buf 12899 * into the space associated with the original buf. 12900 * 12901 * To deal with the conversion between block sizes, these 12902 * computations treat the data as an array of bytes, with the 12903 * first byte (byte 0) corresponding to the first byte in the 12904 * first block on the disk. 12905 */ 12906 12907 /* 12908 * shadow_start and shadow_len indicate the location and size of 12909 * the data returned with the shadow IO request. 12910 */ 12911 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12912 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12913 12914 /* 12915 * copy_offset gives the offset (in bytes) from the start of the first 12916 * block of the READ request to the beginning of the data. We retrieve 12917 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12918 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12919 * data to be copied (in bytes). 12920 */ 12921 copy_offset = bsp->mbs_copy_offset; 12922 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12923 copy_length = orig_bp->b_bcount; 12924 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12925 12926 /* 12927 * Set up the resid and error fields of orig_bp as appropriate. 12928 */ 12929 if (shadow_end >= request_end) { 12930 /* We got all the requested data; set resid to zero */ 12931 orig_bp->b_resid = 0; 12932 } else { 12933 /* 12934 * We failed to get enough data to fully satisfy the original 12935 * request. Just copy back whatever data we got and set 12936 * up the residual and error code as required. 12937 * 12938 * 'shortfall' is the amount by which the data received with the 12939 * shadow buf has "fallen short" of the requested amount. 12940 */ 12941 shortfall = (size_t)(request_end - shadow_end); 12942 12943 if (shortfall > orig_bp->b_bcount) { 12944 /* 12945 * We did not get enough data to even partially 12946 * fulfill the original request. The residual is 12947 * equal to the amount requested. 12948 */ 12949 orig_bp->b_resid = orig_bp->b_bcount; 12950 } else { 12951 /* 12952 * We did not get all the data that we requested 12953 * from the device, but we will try to return what 12954 * portion we did get. 12955 */ 12956 orig_bp->b_resid = shortfall; 12957 } 12958 ASSERT(copy_length >= orig_bp->b_resid); 12959 copy_length -= orig_bp->b_resid; 12960 } 12961 12962 /* Propagate the error code from the shadow buf to the original buf */ 12963 bioerror(orig_bp, bp->b_error); 12964 12965 if (is_write) { 12966 goto freebuf_done; /* No data copying for a WRITE */ 12967 } 12968 12969 if (has_wmap) { 12970 /* 12971 * This is a READ command from the READ phase of a 12972 * read-modify-write request. We have to copy the data given 12973 * by the user OVER the data returned by the READ command, 12974 * then convert the command from a READ to a WRITE and send 12975 * it back to the target. 12976 */ 12977 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12978 copy_length); 12979 12980 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12981 12982 /* 12983 * Dispatch the WRITE command to the taskq thread, which 12984 * will in turn send the command to the target. When the 12985 * WRITE command completes, we (sd_mapblocksize_iodone()) 12986 * will get called again as part of the iodone chain 12987 * processing for it. Note that we will still be dealing 12988 * with the shadow buf at that point. 12989 */ 12990 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12991 KM_NOSLEEP) != 0) { 12992 /* 12993 * Dispatch was successful so we are done. Return 12994 * without going any higher up the iodone chain. Do 12995 * not free up any layer-private data until after the 12996 * WRITE completes. 12997 */ 12998 return; 12999 } 13000 13001 /* 13002 * Dispatch of the WRITE command failed; set up the error 13003 * condition and send this IO back up the iodone chain. 13004 */ 13005 bioerror(orig_bp, EIO); 13006 orig_bp->b_resid = orig_bp->b_bcount; 13007 13008 } else { 13009 /* 13010 * This is a regular READ request (ie, not a RMW). Copy the 13011 * data from the shadow buf into the original buf. The 13012 * copy_offset compensates for any "misalignment" between the 13013 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13014 * original buf (with its un->un_sys_blocksize blocks). 13015 */ 13016 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13017 copy_length); 13018 } 13019 13020 freebuf_done: 13021 13022 /* 13023 * At this point we still have both the shadow buf AND the original 13024 * buf to deal with, as well as the layer-private data area in each. 13025 * Local variables are as follows: 13026 * 13027 * bp -- points to shadow buf 13028 * xp -- points to xbuf of shadow buf 13029 * bsp -- points to layer-private data area of shadow buf 13030 * orig_bp -- points to original buf 13031 * 13032 * First free the shadow buf and its associated xbuf, then free the 13033 * layer-private data area from the shadow buf. There is no need to 13034 * restore xb_private in the shadow xbuf. 13035 */ 13036 sd_shadow_buf_free(bp); 13037 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13038 13039 /* 13040 * Now update the local variables to point to the original buf, xbuf, 13041 * and layer-private area. 13042 */ 13043 bp = orig_bp; 13044 xp = SD_GET_XBUF(bp); 13045 ASSERT(xp != NULL); 13046 ASSERT(xp == orig_xp); 13047 bsp = xp->xb_private; 13048 ASSERT(bsp != NULL); 13049 13050 done: 13051 /* 13052 * Restore xb_private to whatever it was set to by the next higher 13053 * layer in the chain, then free the layer-private data area. 13054 */ 13055 xp->xb_private = bsp->mbs_oprivate; 13056 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13057 13058 exit: 13059 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13060 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13061 13062 SD_NEXT_IODONE(index, un, bp); 13063 } 13064 13065 13066 /* 13067 * Function: sd_checksum_iostart 13068 * 13069 * Description: A stub function for a layer that's currently not used. 13070 * For now just a placeholder. 13071 * 13072 * Context: Kernel thread context 13073 */ 13074 13075 static void 13076 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13077 { 13078 ASSERT(un != NULL); 13079 ASSERT(bp != NULL); 13080 ASSERT(!mutex_owned(SD_MUTEX(un))); 13081 SD_NEXT_IOSTART(index, un, bp); 13082 } 13083 13084 13085 /* 13086 * Function: sd_checksum_iodone 13087 * 13088 * Description: A stub function for a layer that's currently not used. 13089 * For now just a placeholder. 13090 * 13091 * Context: May be called under interrupt context 13092 */ 13093 13094 static void 13095 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13096 { 13097 ASSERT(un != NULL); 13098 ASSERT(bp != NULL); 13099 ASSERT(!mutex_owned(SD_MUTEX(un))); 13100 SD_NEXT_IODONE(index, un, bp); 13101 } 13102 13103 13104 /* 13105 * Function: sd_checksum_uscsi_iostart 13106 * 13107 * Description: A stub function for a layer that's currently not used. 13108 * For now just a placeholder. 13109 * 13110 * Context: Kernel thread context 13111 */ 13112 13113 static void 13114 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13115 { 13116 ASSERT(un != NULL); 13117 ASSERT(bp != NULL); 13118 ASSERT(!mutex_owned(SD_MUTEX(un))); 13119 SD_NEXT_IOSTART(index, un, bp); 13120 } 13121 13122 13123 /* 13124 * Function: sd_checksum_uscsi_iodone 13125 * 13126 * Description: A stub function for a layer that's currently not used. 13127 * For now just a placeholder. 13128 * 13129 * Context: May be called under interrupt context 13130 */ 13131 13132 static void 13133 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13134 { 13135 ASSERT(un != NULL); 13136 ASSERT(bp != NULL); 13137 ASSERT(!mutex_owned(SD_MUTEX(un))); 13138 SD_NEXT_IODONE(index, un, bp); 13139 } 13140 13141 13142 /* 13143 * Function: sd_pm_iostart 13144 * 13145 * Description: iostart-side routine for Power mangement. 13146 * 13147 * Context: Kernel thread context 13148 */ 13149 13150 static void 13151 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13152 { 13153 ASSERT(un != NULL); 13154 ASSERT(bp != NULL); 13155 ASSERT(!mutex_owned(SD_MUTEX(un))); 13156 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13157 13158 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13159 13160 if (sd_pm_entry(un) != DDI_SUCCESS) { 13161 /* 13162 * Set up to return the failed buf back up the 'iodone' 13163 * side of the calling chain. 13164 */ 13165 bioerror(bp, EIO); 13166 bp->b_resid = bp->b_bcount; 13167 13168 SD_BEGIN_IODONE(index, un, bp); 13169 13170 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13171 return; 13172 } 13173 13174 SD_NEXT_IOSTART(index, un, bp); 13175 13176 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13177 } 13178 13179 13180 /* 13181 * Function: sd_pm_iodone 13182 * 13183 * Description: iodone-side routine for power mangement. 13184 * 13185 * Context: may be called from interrupt context 13186 */ 13187 13188 static void 13189 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13190 { 13191 ASSERT(un != NULL); 13192 ASSERT(bp != NULL); 13193 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13194 13195 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13196 13197 /* 13198 * After attach the following flag is only read, so don't 13199 * take the penalty of acquiring a mutex for it. 13200 */ 13201 if (un->un_f_pm_is_enabled == TRUE) { 13202 sd_pm_exit(un); 13203 } 13204 13205 SD_NEXT_IODONE(index, un, bp); 13206 13207 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13208 } 13209 13210 13211 /* 13212 * Function: sd_core_iostart 13213 * 13214 * Description: Primary driver function for enqueuing buf(9S) structs from 13215 * the system and initiating IO to the target device 13216 * 13217 * Context: Kernel thread context. Can sleep. 13218 * 13219 * Assumptions: - The given xp->xb_blkno is absolute 13220 * (ie, relative to the start of the device). 13221 * - The IO is to be done using the native blocksize of 13222 * the device, as specified in un->un_tgt_blocksize. 13223 */ 13224 /* ARGSUSED */ 13225 static void 13226 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13227 { 13228 struct sd_xbuf *xp; 13229 13230 ASSERT(un != NULL); 13231 ASSERT(bp != NULL); 13232 ASSERT(!mutex_owned(SD_MUTEX(un))); 13233 ASSERT(bp->b_resid == 0); 13234 13235 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13236 13237 xp = SD_GET_XBUF(bp); 13238 ASSERT(xp != NULL); 13239 13240 mutex_enter(SD_MUTEX(un)); 13241 13242 /* 13243 * If we are currently in the failfast state, fail any new IO 13244 * that has B_FAILFAST set, then return. 13245 */ 13246 if ((bp->b_flags & B_FAILFAST) && 13247 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13248 mutex_exit(SD_MUTEX(un)); 13249 bioerror(bp, EIO); 13250 bp->b_resid = bp->b_bcount; 13251 SD_BEGIN_IODONE(index, un, bp); 13252 return; 13253 } 13254 13255 if (SD_IS_DIRECT_PRIORITY(xp)) { 13256 /* 13257 * Priority command -- transport it immediately. 13258 * 13259 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13260 * because all direct priority commands should be associated 13261 * with error recovery actions which we don't want to retry. 13262 */ 13263 sd_start_cmds(un, bp); 13264 } else { 13265 /* 13266 * Normal command -- add it to the wait queue, then start 13267 * transporting commands from the wait queue. 13268 */ 13269 sd_add_buf_to_waitq(un, bp); 13270 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13271 sd_start_cmds(un, NULL); 13272 } 13273 13274 mutex_exit(SD_MUTEX(un)); 13275 13276 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13277 } 13278 13279 13280 /* 13281 * Function: sd_init_cdb_limits 13282 * 13283 * Description: This is to handle scsi_pkt initialization differences 13284 * between the driver platforms. 13285 * 13286 * Legacy behaviors: 13287 * 13288 * If the block number or the sector count exceeds the 13289 * capabilities of a Group 0 command, shift over to a 13290 * Group 1 command. We don't blindly use Group 1 13291 * commands because a) some drives (CDC Wren IVs) get a 13292 * bit confused, and b) there is probably a fair amount 13293 * of speed difference for a target to receive and decode 13294 * a 10 byte command instead of a 6 byte command. 13295 * 13296 * The xfer time difference of 6 vs 10 byte CDBs is 13297 * still significant so this code is still worthwhile. 13298 * 10 byte CDBs are very inefficient with the fas HBA driver 13299 * and older disks. Each CDB byte took 1 usec with some 13300 * popular disks. 13301 * 13302 * Context: Must be called at attach time 13303 */ 13304 13305 static void 13306 sd_init_cdb_limits(struct sd_lun *un) 13307 { 13308 int hba_cdb_limit; 13309 13310 /* 13311 * Use CDB_GROUP1 commands for most devices except for 13312 * parallel SCSI fixed drives in which case we get better 13313 * performance using CDB_GROUP0 commands (where applicable). 13314 */ 13315 un->un_mincdb = SD_CDB_GROUP1; 13316 #if !defined(__fibre) 13317 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13318 !un->un_f_has_removable_media) { 13319 un->un_mincdb = SD_CDB_GROUP0; 13320 } 13321 #endif 13322 13323 /* 13324 * Try to read the max-cdb-length supported by HBA. 13325 */ 13326 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13327 if (0 >= un->un_max_hba_cdb) { 13328 un->un_max_hba_cdb = CDB_GROUP4; 13329 hba_cdb_limit = SD_CDB_GROUP4; 13330 } else if (0 < un->un_max_hba_cdb && 13331 un->un_max_hba_cdb < CDB_GROUP1) { 13332 hba_cdb_limit = SD_CDB_GROUP0; 13333 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13334 un->un_max_hba_cdb < CDB_GROUP5) { 13335 hba_cdb_limit = SD_CDB_GROUP1; 13336 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13337 un->un_max_hba_cdb < CDB_GROUP4) { 13338 hba_cdb_limit = SD_CDB_GROUP5; 13339 } else { 13340 hba_cdb_limit = SD_CDB_GROUP4; 13341 } 13342 13343 /* 13344 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13345 * commands for fixed disks unless we are building for a 32 bit 13346 * kernel. 13347 */ 13348 #ifdef _LP64 13349 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13350 min(hba_cdb_limit, SD_CDB_GROUP4); 13351 #else 13352 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13353 min(hba_cdb_limit, SD_CDB_GROUP1); 13354 #endif 13355 13356 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13357 ? sizeof (struct scsi_arq_status) : 1); 13358 un->un_cmd_timeout = (ushort_t)sd_io_time; 13359 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13360 } 13361 13362 13363 /* 13364 * Function: sd_initpkt_for_buf 13365 * 13366 * Description: Allocate and initialize for transport a scsi_pkt struct, 13367 * based upon the info specified in the given buf struct. 13368 * 13369 * Assumes the xb_blkno in the request is absolute (ie, 13370 * relative to the start of the device (NOT partition!). 13371 * Also assumes that the request is using the native block 13372 * size of the device (as returned by the READ CAPACITY 13373 * command). 13374 * 13375 * Return Code: SD_PKT_ALLOC_SUCCESS 13376 * SD_PKT_ALLOC_FAILURE 13377 * SD_PKT_ALLOC_FAILURE_NO_DMA 13378 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13379 * 13380 * Context: Kernel thread and may be called from software interrupt context 13381 * as part of a sdrunout callback. This function may not block or 13382 * call routines that block 13383 */ 13384 13385 static int 13386 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13387 { 13388 struct sd_xbuf *xp; 13389 struct scsi_pkt *pktp = NULL; 13390 struct sd_lun *un; 13391 size_t blockcount; 13392 daddr_t startblock; 13393 int rval; 13394 int cmd_flags; 13395 13396 ASSERT(bp != NULL); 13397 ASSERT(pktpp != NULL); 13398 xp = SD_GET_XBUF(bp); 13399 ASSERT(xp != NULL); 13400 un = SD_GET_UN(bp); 13401 ASSERT(un != NULL); 13402 ASSERT(mutex_owned(SD_MUTEX(un))); 13403 ASSERT(bp->b_resid == 0); 13404 13405 SD_TRACE(SD_LOG_IO_CORE, un, 13406 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13407 13408 mutex_exit(SD_MUTEX(un)); 13409 13410 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13411 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13412 /* 13413 * Already have a scsi_pkt -- just need DMA resources. 13414 * We must recompute the CDB in case the mapping returns 13415 * a nonzero pkt_resid. 13416 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13417 * that is being retried, the unmap/remap of the DMA resouces 13418 * will result in the entire transfer starting over again 13419 * from the very first block. 13420 */ 13421 ASSERT(xp->xb_pktp != NULL); 13422 pktp = xp->xb_pktp; 13423 } else { 13424 pktp = NULL; 13425 } 13426 #endif /* __i386 || __amd64 */ 13427 13428 startblock = xp->xb_blkno; /* Absolute block num. */ 13429 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13430 13431 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13432 13433 /* 13434 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13435 * call scsi_init_pkt, and build the CDB. 13436 */ 13437 rval = sd_setup_rw_pkt(un, &pktp, bp, 13438 cmd_flags, sdrunout, (caddr_t)un, 13439 startblock, blockcount); 13440 13441 if (rval == 0) { 13442 /* 13443 * Success. 13444 * 13445 * If partial DMA is being used and required for this transfer. 13446 * set it up here. 13447 */ 13448 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13449 (pktp->pkt_resid != 0)) { 13450 13451 /* 13452 * Save the CDB length and pkt_resid for the 13453 * next xfer 13454 */ 13455 xp->xb_dma_resid = pktp->pkt_resid; 13456 13457 /* rezero resid */ 13458 pktp->pkt_resid = 0; 13459 13460 } else { 13461 xp->xb_dma_resid = 0; 13462 } 13463 13464 pktp->pkt_flags = un->un_tagflags; 13465 pktp->pkt_time = un->un_cmd_timeout; 13466 pktp->pkt_comp = sdintr; 13467 13468 pktp->pkt_private = bp; 13469 *pktpp = pktp; 13470 13471 SD_TRACE(SD_LOG_IO_CORE, un, 13472 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13473 13474 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13475 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13476 #endif 13477 13478 mutex_enter(SD_MUTEX(un)); 13479 return (SD_PKT_ALLOC_SUCCESS); 13480 13481 } 13482 13483 /* 13484 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13485 * from sd_setup_rw_pkt. 13486 */ 13487 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13488 13489 if (rval == SD_PKT_ALLOC_FAILURE) { 13490 *pktpp = NULL; 13491 /* 13492 * Set the driver state to RWAIT to indicate the driver 13493 * is waiting on resource allocations. The driver will not 13494 * suspend, pm_suspend, or detatch while the state is RWAIT. 13495 */ 13496 mutex_enter(SD_MUTEX(un)); 13497 New_state(un, SD_STATE_RWAIT); 13498 13499 SD_ERROR(SD_LOG_IO_CORE, un, 13500 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13501 13502 if ((bp->b_flags & B_ERROR) != 0) { 13503 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13504 } 13505 return (SD_PKT_ALLOC_FAILURE); 13506 } else { 13507 /* 13508 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13509 * 13510 * This should never happen. Maybe someone messed with the 13511 * kernel's minphys? 13512 */ 13513 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13514 "Request rejected: too large for CDB: " 13515 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13516 SD_ERROR(SD_LOG_IO_CORE, un, 13517 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13518 mutex_enter(SD_MUTEX(un)); 13519 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13520 13521 } 13522 } 13523 13524 13525 /* 13526 * Function: sd_destroypkt_for_buf 13527 * 13528 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13529 * 13530 * Context: Kernel thread or interrupt context 13531 */ 13532 13533 static void 13534 sd_destroypkt_for_buf(struct buf *bp) 13535 { 13536 ASSERT(bp != NULL); 13537 ASSERT(SD_GET_UN(bp) != NULL); 13538 13539 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13540 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13541 13542 ASSERT(SD_GET_PKTP(bp) != NULL); 13543 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13544 13545 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13546 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13547 } 13548 13549 /* 13550 * Function: sd_setup_rw_pkt 13551 * 13552 * Description: Determines appropriate CDB group for the requested LBA 13553 * and transfer length, calls scsi_init_pkt, and builds 13554 * the CDB. Do not use for partial DMA transfers except 13555 * for the initial transfer since the CDB size must 13556 * remain constant. 13557 * 13558 * Context: Kernel thread and may be called from software interrupt 13559 * context as part of a sdrunout callback. This function may not 13560 * block or call routines that block 13561 */ 13562 13563 13564 int 13565 sd_setup_rw_pkt(struct sd_lun *un, 13566 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13567 int (*callback)(caddr_t), caddr_t callback_arg, 13568 diskaddr_t lba, uint32_t blockcount) 13569 { 13570 struct scsi_pkt *return_pktp; 13571 union scsi_cdb *cdbp; 13572 struct sd_cdbinfo *cp = NULL; 13573 int i; 13574 13575 /* 13576 * See which size CDB to use, based upon the request. 13577 */ 13578 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13579 13580 /* 13581 * Check lba and block count against sd_cdbtab limits. 13582 * In the partial DMA case, we have to use the same size 13583 * CDB for all the transfers. Check lba + blockcount 13584 * against the max LBA so we know that segment of the 13585 * transfer can use the CDB we select. 13586 */ 13587 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13588 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13589 13590 /* 13591 * The command will fit into the CDB type 13592 * specified by sd_cdbtab[i]. 13593 */ 13594 cp = sd_cdbtab + i; 13595 13596 /* 13597 * Call scsi_init_pkt so we can fill in the 13598 * CDB. 13599 */ 13600 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13601 bp, cp->sc_grpcode, un->un_status_len, 0, 13602 flags, callback, callback_arg); 13603 13604 if (return_pktp != NULL) { 13605 13606 /* 13607 * Return new value of pkt 13608 */ 13609 *pktpp = return_pktp; 13610 13611 /* 13612 * To be safe, zero the CDB insuring there is 13613 * no leftover data from a previous command. 13614 */ 13615 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13616 13617 /* 13618 * Handle partial DMA mapping 13619 */ 13620 if (return_pktp->pkt_resid != 0) { 13621 13622 /* 13623 * Not going to xfer as many blocks as 13624 * originally expected 13625 */ 13626 blockcount -= 13627 SD_BYTES2TGTBLOCKS(un, 13628 return_pktp->pkt_resid); 13629 } 13630 13631 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13632 13633 /* 13634 * Set command byte based on the CDB 13635 * type we matched. 13636 */ 13637 cdbp->scc_cmd = cp->sc_grpmask | 13638 ((bp->b_flags & B_READ) ? 13639 SCMD_READ : SCMD_WRITE); 13640 13641 SD_FILL_SCSI1_LUN(un, return_pktp); 13642 13643 /* 13644 * Fill in LBA and length 13645 */ 13646 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13647 (cp->sc_grpcode == CDB_GROUP4) || 13648 (cp->sc_grpcode == CDB_GROUP0) || 13649 (cp->sc_grpcode == CDB_GROUP5)); 13650 13651 if (cp->sc_grpcode == CDB_GROUP1) { 13652 FORMG1ADDR(cdbp, lba); 13653 FORMG1COUNT(cdbp, blockcount); 13654 return (0); 13655 } else if (cp->sc_grpcode == CDB_GROUP4) { 13656 FORMG4LONGADDR(cdbp, lba); 13657 FORMG4COUNT(cdbp, blockcount); 13658 return (0); 13659 } else if (cp->sc_grpcode == CDB_GROUP0) { 13660 FORMG0ADDR(cdbp, lba); 13661 FORMG0COUNT(cdbp, blockcount); 13662 return (0); 13663 } else if (cp->sc_grpcode == CDB_GROUP5) { 13664 FORMG5ADDR(cdbp, lba); 13665 FORMG5COUNT(cdbp, blockcount); 13666 return (0); 13667 } 13668 13669 /* 13670 * It should be impossible to not match one 13671 * of the CDB types above, so we should never 13672 * reach this point. Set the CDB command byte 13673 * to test-unit-ready to avoid writing 13674 * to somewhere we don't intend. 13675 */ 13676 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13677 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13678 } else { 13679 /* 13680 * Couldn't get scsi_pkt 13681 */ 13682 return (SD_PKT_ALLOC_FAILURE); 13683 } 13684 } 13685 } 13686 13687 /* 13688 * None of the available CDB types were suitable. This really 13689 * should never happen: on a 64 bit system we support 13690 * READ16/WRITE16 which will hold an entire 64 bit disk address 13691 * and on a 32 bit system we will refuse to bind to a device 13692 * larger than 2TB so addresses will never be larger than 32 bits. 13693 */ 13694 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13695 } 13696 13697 /* 13698 * Function: sd_setup_next_rw_pkt 13699 * 13700 * Description: Setup packet for partial DMA transfers, except for the 13701 * initial transfer. sd_setup_rw_pkt should be used for 13702 * the initial transfer. 13703 * 13704 * Context: Kernel thread and may be called from interrupt context. 13705 */ 13706 13707 int 13708 sd_setup_next_rw_pkt(struct sd_lun *un, 13709 struct scsi_pkt *pktp, struct buf *bp, 13710 diskaddr_t lba, uint32_t blockcount) 13711 { 13712 uchar_t com; 13713 union scsi_cdb *cdbp; 13714 uchar_t cdb_group_id; 13715 13716 ASSERT(pktp != NULL); 13717 ASSERT(pktp->pkt_cdbp != NULL); 13718 13719 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13720 com = cdbp->scc_cmd; 13721 cdb_group_id = CDB_GROUPID(com); 13722 13723 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13724 (cdb_group_id == CDB_GROUPID_1) || 13725 (cdb_group_id == CDB_GROUPID_4) || 13726 (cdb_group_id == CDB_GROUPID_5)); 13727 13728 /* 13729 * Move pkt to the next portion of the xfer. 13730 * func is NULL_FUNC so we do not have to release 13731 * the disk mutex here. 13732 */ 13733 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13734 NULL_FUNC, NULL) == pktp) { 13735 /* Success. Handle partial DMA */ 13736 if (pktp->pkt_resid != 0) { 13737 blockcount -= 13738 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13739 } 13740 13741 cdbp->scc_cmd = com; 13742 SD_FILL_SCSI1_LUN(un, pktp); 13743 if (cdb_group_id == CDB_GROUPID_1) { 13744 FORMG1ADDR(cdbp, lba); 13745 FORMG1COUNT(cdbp, blockcount); 13746 return (0); 13747 } else if (cdb_group_id == CDB_GROUPID_4) { 13748 FORMG4LONGADDR(cdbp, lba); 13749 FORMG4COUNT(cdbp, blockcount); 13750 return (0); 13751 } else if (cdb_group_id == CDB_GROUPID_0) { 13752 FORMG0ADDR(cdbp, lba); 13753 FORMG0COUNT(cdbp, blockcount); 13754 return (0); 13755 } else if (cdb_group_id == CDB_GROUPID_5) { 13756 FORMG5ADDR(cdbp, lba); 13757 FORMG5COUNT(cdbp, blockcount); 13758 return (0); 13759 } 13760 13761 /* Unreachable */ 13762 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13763 } 13764 13765 /* 13766 * Error setting up next portion of cmd transfer. 13767 * Something is definitely very wrong and this 13768 * should not happen. 13769 */ 13770 return (SD_PKT_ALLOC_FAILURE); 13771 } 13772 13773 /* 13774 * Function: sd_initpkt_for_uscsi 13775 * 13776 * Description: Allocate and initialize for transport a scsi_pkt struct, 13777 * based upon the info specified in the given uscsi_cmd struct. 13778 * 13779 * Return Code: SD_PKT_ALLOC_SUCCESS 13780 * SD_PKT_ALLOC_FAILURE 13781 * SD_PKT_ALLOC_FAILURE_NO_DMA 13782 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13783 * 13784 * Context: Kernel thread and may be called from software interrupt context 13785 * as part of a sdrunout callback. This function may not block or 13786 * call routines that block 13787 */ 13788 13789 static int 13790 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13791 { 13792 struct uscsi_cmd *uscmd; 13793 struct sd_xbuf *xp; 13794 struct scsi_pkt *pktp; 13795 struct sd_lun *un; 13796 uint32_t flags = 0; 13797 13798 ASSERT(bp != NULL); 13799 ASSERT(pktpp != NULL); 13800 xp = SD_GET_XBUF(bp); 13801 ASSERT(xp != NULL); 13802 un = SD_GET_UN(bp); 13803 ASSERT(un != NULL); 13804 ASSERT(mutex_owned(SD_MUTEX(un))); 13805 13806 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13807 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13808 ASSERT(uscmd != NULL); 13809 13810 SD_TRACE(SD_LOG_IO_CORE, un, 13811 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13812 13813 /* 13814 * Allocate the scsi_pkt for the command. 13815 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13816 * during scsi_init_pkt time and will continue to use the 13817 * same path as long as the same scsi_pkt is used without 13818 * intervening scsi_dma_free(). Since uscsi command does 13819 * not call scsi_dmafree() before retry failed command, it 13820 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13821 * set such that scsi_vhci can use other available path for 13822 * retry. Besides, ucsci command does not allow DMA breakup, 13823 * so there is no need to set PKT_DMA_PARTIAL flag. 13824 */ 13825 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13826 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13827 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13828 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13829 - sizeof (struct scsi_extended_sense)), 0, 13830 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13831 sdrunout, (caddr_t)un); 13832 } else { 13833 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13834 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13835 sizeof (struct scsi_arq_status), 0, 13836 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13837 sdrunout, (caddr_t)un); 13838 } 13839 13840 if (pktp == NULL) { 13841 *pktpp = NULL; 13842 /* 13843 * Set the driver state to RWAIT to indicate the driver 13844 * is waiting on resource allocations. The driver will not 13845 * suspend, pm_suspend, or detatch while the state is RWAIT. 13846 */ 13847 New_state(un, SD_STATE_RWAIT); 13848 13849 SD_ERROR(SD_LOG_IO_CORE, un, 13850 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13851 13852 if ((bp->b_flags & B_ERROR) != 0) { 13853 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13854 } 13855 return (SD_PKT_ALLOC_FAILURE); 13856 } 13857 13858 /* 13859 * We do not do DMA breakup for USCSI commands, so return failure 13860 * here if all the needed DMA resources were not allocated. 13861 */ 13862 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13863 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13864 scsi_destroy_pkt(pktp); 13865 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13866 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13867 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13868 } 13869 13870 /* Init the cdb from the given uscsi struct */ 13871 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13872 uscmd->uscsi_cdb[0], 0, 0, 0); 13873 13874 SD_FILL_SCSI1_LUN(un, pktp); 13875 13876 /* 13877 * Set up the optional USCSI flags. See the uscsi (7I) man page 13878 * for listing of the supported flags. 13879 */ 13880 13881 if (uscmd->uscsi_flags & USCSI_SILENT) { 13882 flags |= FLAG_SILENT; 13883 } 13884 13885 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13886 flags |= FLAG_DIAGNOSE; 13887 } 13888 13889 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13890 flags |= FLAG_ISOLATE; 13891 } 13892 13893 if (un->un_f_is_fibre == FALSE) { 13894 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13895 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13896 } 13897 } 13898 13899 /* 13900 * Set the pkt flags here so we save time later. 13901 * Note: These flags are NOT in the uscsi man page!!! 13902 */ 13903 if (uscmd->uscsi_flags & USCSI_HEAD) { 13904 flags |= FLAG_HEAD; 13905 } 13906 13907 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13908 flags |= FLAG_NOINTR; 13909 } 13910 13911 /* 13912 * For tagged queueing, things get a bit complicated. 13913 * Check first for head of queue and last for ordered queue. 13914 * If neither head nor order, use the default driver tag flags. 13915 */ 13916 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13917 if (uscmd->uscsi_flags & USCSI_HTAG) { 13918 flags |= FLAG_HTAG; 13919 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13920 flags |= FLAG_OTAG; 13921 } else { 13922 flags |= un->un_tagflags & FLAG_TAGMASK; 13923 } 13924 } 13925 13926 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13927 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13928 } 13929 13930 pktp->pkt_flags = flags; 13931 13932 /* Transfer uscsi information to scsi_pkt */ 13933 (void) scsi_uscsi_pktinit(uscmd, pktp); 13934 13935 /* Copy the caller's CDB into the pkt... */ 13936 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13937 13938 if (uscmd->uscsi_timeout == 0) { 13939 pktp->pkt_time = un->un_uscsi_timeout; 13940 } else { 13941 pktp->pkt_time = uscmd->uscsi_timeout; 13942 } 13943 13944 /* need it later to identify USCSI request in sdintr */ 13945 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13946 13947 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13948 13949 pktp->pkt_private = bp; 13950 pktp->pkt_comp = sdintr; 13951 *pktpp = pktp; 13952 13953 SD_TRACE(SD_LOG_IO_CORE, un, 13954 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13955 13956 return (SD_PKT_ALLOC_SUCCESS); 13957 } 13958 13959 13960 /* 13961 * Function: sd_destroypkt_for_uscsi 13962 * 13963 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13964 * IOs.. Also saves relevant info into the associated uscsi_cmd 13965 * struct. 13966 * 13967 * Context: May be called under interrupt context 13968 */ 13969 13970 static void 13971 sd_destroypkt_for_uscsi(struct buf *bp) 13972 { 13973 struct uscsi_cmd *uscmd; 13974 struct sd_xbuf *xp; 13975 struct scsi_pkt *pktp; 13976 struct sd_lun *un; 13977 struct sd_uscsi_info *suip; 13978 13979 ASSERT(bp != NULL); 13980 xp = SD_GET_XBUF(bp); 13981 ASSERT(xp != NULL); 13982 un = SD_GET_UN(bp); 13983 ASSERT(un != NULL); 13984 ASSERT(!mutex_owned(SD_MUTEX(un))); 13985 pktp = SD_GET_PKTP(bp); 13986 ASSERT(pktp != NULL); 13987 13988 SD_TRACE(SD_LOG_IO_CORE, un, 13989 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13990 13991 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13992 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13993 ASSERT(uscmd != NULL); 13994 13995 /* Save the status and the residual into the uscsi_cmd struct */ 13996 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13997 uscmd->uscsi_resid = bp->b_resid; 13998 13999 /* Transfer scsi_pkt information to uscsi */ 14000 (void) scsi_uscsi_pktfini(pktp, uscmd); 14001 14002 /* 14003 * If enabled, copy any saved sense data into the area specified 14004 * by the uscsi command. 14005 */ 14006 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14007 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14008 /* 14009 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14010 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14011 */ 14012 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14013 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14014 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14015 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14016 MAX_SENSE_LENGTH); 14017 } else { 14018 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14019 SENSE_LENGTH); 14020 } 14021 } 14022 /* 14023 * The following assignments are for SCSI FMA. 14024 */ 14025 ASSERT(xp->xb_private != NULL); 14026 suip = (struct sd_uscsi_info *)xp->xb_private; 14027 suip->ui_pkt_reason = pktp->pkt_reason; 14028 suip->ui_pkt_state = pktp->pkt_state; 14029 suip->ui_pkt_statistics = pktp->pkt_statistics; 14030 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14031 14032 /* We are done with the scsi_pkt; free it now */ 14033 ASSERT(SD_GET_PKTP(bp) != NULL); 14034 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14035 14036 SD_TRACE(SD_LOG_IO_CORE, un, 14037 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14038 } 14039 14040 14041 /* 14042 * Function: sd_bioclone_alloc 14043 * 14044 * Description: Allocate a buf(9S) and init it as per the given buf 14045 * and the various arguments. The associated sd_xbuf 14046 * struct is (nearly) duplicated. The struct buf *bp 14047 * argument is saved in new_xp->xb_private. 14048 * 14049 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14050 * datalen - size of data area for the shadow bp 14051 * blkno - starting LBA 14052 * func - function pointer for b_iodone in the shadow buf. (May 14053 * be NULL if none.) 14054 * 14055 * Return Code: Pointer to allocates buf(9S) struct 14056 * 14057 * Context: Can sleep. 14058 */ 14059 14060 static struct buf * 14061 sd_bioclone_alloc(struct buf *bp, size_t datalen, 14062 daddr_t blkno, int (*func)(struct buf *)) 14063 { 14064 struct sd_lun *un; 14065 struct sd_xbuf *xp; 14066 struct sd_xbuf *new_xp; 14067 struct buf *new_bp; 14068 14069 ASSERT(bp != NULL); 14070 xp = SD_GET_XBUF(bp); 14071 ASSERT(xp != NULL); 14072 un = SD_GET_UN(bp); 14073 ASSERT(un != NULL); 14074 ASSERT(!mutex_owned(SD_MUTEX(un))); 14075 14076 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14077 NULL, KM_SLEEP); 14078 14079 new_bp->b_lblkno = blkno; 14080 14081 /* 14082 * Allocate an xbuf for the shadow bp and copy the contents of the 14083 * original xbuf into it. 14084 */ 14085 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14086 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14087 14088 /* 14089 * The given bp is automatically saved in the xb_private member 14090 * of the new xbuf. Callers are allowed to depend on this. 14091 */ 14092 new_xp->xb_private = bp; 14093 14094 new_bp->b_private = new_xp; 14095 14096 return (new_bp); 14097 } 14098 14099 /* 14100 * Function: sd_shadow_buf_alloc 14101 * 14102 * Description: Allocate a buf(9S) and init it as per the given buf 14103 * and the various arguments. The associated sd_xbuf 14104 * struct is (nearly) duplicated. The struct buf *bp 14105 * argument is saved in new_xp->xb_private. 14106 * 14107 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14108 * datalen - size of data area for the shadow bp 14109 * bflags - B_READ or B_WRITE (pseudo flag) 14110 * blkno - starting LBA 14111 * func - function pointer for b_iodone in the shadow buf. (May 14112 * be NULL if none.) 14113 * 14114 * Return Code: Pointer to allocates buf(9S) struct 14115 * 14116 * Context: Can sleep. 14117 */ 14118 14119 static struct buf * 14120 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14121 daddr_t blkno, int (*func)(struct buf *)) 14122 { 14123 struct sd_lun *un; 14124 struct sd_xbuf *xp; 14125 struct sd_xbuf *new_xp; 14126 struct buf *new_bp; 14127 14128 ASSERT(bp != NULL); 14129 xp = SD_GET_XBUF(bp); 14130 ASSERT(xp != NULL); 14131 un = SD_GET_UN(bp); 14132 ASSERT(un != NULL); 14133 ASSERT(!mutex_owned(SD_MUTEX(un))); 14134 14135 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14136 bp_mapin(bp); 14137 } 14138 14139 bflags &= (B_READ | B_WRITE); 14140 #if defined(__i386) || defined(__amd64) 14141 new_bp = getrbuf(KM_SLEEP); 14142 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14143 new_bp->b_bcount = datalen; 14144 new_bp->b_flags = bflags | 14145 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14146 #else 14147 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14148 datalen, bflags, SLEEP_FUNC, NULL); 14149 #endif 14150 new_bp->av_forw = NULL; 14151 new_bp->av_back = NULL; 14152 new_bp->b_dev = bp->b_dev; 14153 new_bp->b_blkno = blkno; 14154 new_bp->b_iodone = func; 14155 new_bp->b_edev = bp->b_edev; 14156 new_bp->b_resid = 0; 14157 14158 /* We need to preserve the B_FAILFAST flag */ 14159 if (bp->b_flags & B_FAILFAST) { 14160 new_bp->b_flags |= B_FAILFAST; 14161 } 14162 14163 /* 14164 * Allocate an xbuf for the shadow bp and copy the contents of the 14165 * original xbuf into it. 14166 */ 14167 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14168 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14169 14170 /* Need later to copy data between the shadow buf & original buf! */ 14171 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14172 14173 /* 14174 * The given bp is automatically saved in the xb_private member 14175 * of the new xbuf. Callers are allowed to depend on this. 14176 */ 14177 new_xp->xb_private = bp; 14178 14179 new_bp->b_private = new_xp; 14180 14181 return (new_bp); 14182 } 14183 14184 /* 14185 * Function: sd_bioclone_free 14186 * 14187 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14188 * in the larger than partition operation. 14189 * 14190 * Context: May be called under interrupt context 14191 */ 14192 14193 static void 14194 sd_bioclone_free(struct buf *bp) 14195 { 14196 struct sd_xbuf *xp; 14197 14198 ASSERT(bp != NULL); 14199 xp = SD_GET_XBUF(bp); 14200 ASSERT(xp != NULL); 14201 14202 /* 14203 * Call bp_mapout() before freeing the buf, in case a lower 14204 * layer or HBA had done a bp_mapin(). we must do this here 14205 * as we are the "originator" of the shadow buf. 14206 */ 14207 bp_mapout(bp); 14208 14209 /* 14210 * Null out b_iodone before freeing the bp, to ensure that the driver 14211 * never gets confused by a stale value in this field. (Just a little 14212 * extra defensiveness here.) 14213 */ 14214 bp->b_iodone = NULL; 14215 14216 freerbuf(bp); 14217 14218 kmem_free(xp, sizeof (struct sd_xbuf)); 14219 } 14220 14221 /* 14222 * Function: sd_shadow_buf_free 14223 * 14224 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14225 * 14226 * Context: May be called under interrupt context 14227 */ 14228 14229 static void 14230 sd_shadow_buf_free(struct buf *bp) 14231 { 14232 struct sd_xbuf *xp; 14233 14234 ASSERT(bp != NULL); 14235 xp = SD_GET_XBUF(bp); 14236 ASSERT(xp != NULL); 14237 14238 #if defined(__sparc) 14239 /* 14240 * Call bp_mapout() before freeing the buf, in case a lower 14241 * layer or HBA had done a bp_mapin(). we must do this here 14242 * as we are the "originator" of the shadow buf. 14243 */ 14244 bp_mapout(bp); 14245 #endif 14246 14247 /* 14248 * Null out b_iodone before freeing the bp, to ensure that the driver 14249 * never gets confused by a stale value in this field. (Just a little 14250 * extra defensiveness here.) 14251 */ 14252 bp->b_iodone = NULL; 14253 14254 #if defined(__i386) || defined(__amd64) 14255 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14256 freerbuf(bp); 14257 #else 14258 scsi_free_consistent_buf(bp); 14259 #endif 14260 14261 kmem_free(xp, sizeof (struct sd_xbuf)); 14262 } 14263 14264 14265 /* 14266 * Function: sd_print_transport_rejected_message 14267 * 14268 * Description: This implements the ludicrously complex rules for printing 14269 * a "transport rejected" message. This is to address the 14270 * specific problem of having a flood of this error message 14271 * produced when a failover occurs. 14272 * 14273 * Context: Any. 14274 */ 14275 14276 static void 14277 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14278 int code) 14279 { 14280 ASSERT(un != NULL); 14281 ASSERT(mutex_owned(SD_MUTEX(un))); 14282 ASSERT(xp != NULL); 14283 14284 /* 14285 * Print the "transport rejected" message under the following 14286 * conditions: 14287 * 14288 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14289 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14290 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14291 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14292 * scsi_transport(9F) (which indicates that the target might have 14293 * gone off-line). This uses the un->un_tran_fatal_count 14294 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14295 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14296 * from scsi_transport(). 14297 * 14298 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14299 * the preceeding cases in order for the message to be printed. 14300 */ 14301 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14302 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14303 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14304 (code != TRAN_FATAL_ERROR) || 14305 (un->un_tran_fatal_count == 1)) { 14306 switch (code) { 14307 case TRAN_BADPKT: 14308 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14309 "transport rejected bad packet\n"); 14310 break; 14311 case TRAN_FATAL_ERROR: 14312 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14313 "transport rejected fatal error\n"); 14314 break; 14315 default: 14316 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14317 "transport rejected (%d)\n", code); 14318 break; 14319 } 14320 } 14321 } 14322 } 14323 14324 14325 /* 14326 * Function: sd_add_buf_to_waitq 14327 * 14328 * Description: Add the given buf(9S) struct to the wait queue for the 14329 * instance. If sorting is enabled, then the buf is added 14330 * to the queue via an elevator sort algorithm (a la 14331 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14332 * If sorting is not enabled, then the buf is just added 14333 * to the end of the wait queue. 14334 * 14335 * Return Code: void 14336 * 14337 * Context: Does not sleep/block, therefore technically can be called 14338 * from any context. However if sorting is enabled then the 14339 * execution time is indeterminate, and may take long if 14340 * the wait queue grows large. 14341 */ 14342 14343 static void 14344 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14345 { 14346 struct buf *ap; 14347 14348 ASSERT(bp != NULL); 14349 ASSERT(un != NULL); 14350 ASSERT(mutex_owned(SD_MUTEX(un))); 14351 14352 /* If the queue is empty, add the buf as the only entry & return. */ 14353 if (un->un_waitq_headp == NULL) { 14354 ASSERT(un->un_waitq_tailp == NULL); 14355 un->un_waitq_headp = un->un_waitq_tailp = bp; 14356 bp->av_forw = NULL; 14357 return; 14358 } 14359 14360 ASSERT(un->un_waitq_tailp != NULL); 14361 14362 /* 14363 * If sorting is disabled, just add the buf to the tail end of 14364 * the wait queue and return. 14365 */ 14366 if (un->un_f_disksort_disabled) { 14367 un->un_waitq_tailp->av_forw = bp; 14368 un->un_waitq_tailp = bp; 14369 bp->av_forw = NULL; 14370 return; 14371 } 14372 14373 /* 14374 * Sort thru the list of requests currently on the wait queue 14375 * and add the new buf request at the appropriate position. 14376 * 14377 * The un->un_waitq_headp is an activity chain pointer on which 14378 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14379 * first queue holds those requests which are positioned after 14380 * the current SD_GET_BLKNO() (in the first request); the second holds 14381 * requests which came in after their SD_GET_BLKNO() number was passed. 14382 * Thus we implement a one way scan, retracting after reaching 14383 * the end of the drive to the first request on the second 14384 * queue, at which time it becomes the first queue. 14385 * A one-way scan is natural because of the way UNIX read-ahead 14386 * blocks are allocated. 14387 * 14388 * If we lie after the first request, then we must locate the 14389 * second request list and add ourselves to it. 14390 */ 14391 ap = un->un_waitq_headp; 14392 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14393 while (ap->av_forw != NULL) { 14394 /* 14395 * Look for an "inversion" in the (normally 14396 * ascending) block numbers. This indicates 14397 * the start of the second request list. 14398 */ 14399 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14400 /* 14401 * Search the second request list for the 14402 * first request at a larger block number. 14403 * We go before that; however if there is 14404 * no such request, we go at the end. 14405 */ 14406 do { 14407 if (SD_GET_BLKNO(bp) < 14408 SD_GET_BLKNO(ap->av_forw)) { 14409 goto insert; 14410 } 14411 ap = ap->av_forw; 14412 } while (ap->av_forw != NULL); 14413 goto insert; /* after last */ 14414 } 14415 ap = ap->av_forw; 14416 } 14417 14418 /* 14419 * No inversions... we will go after the last, and 14420 * be the first request in the second request list. 14421 */ 14422 goto insert; 14423 } 14424 14425 /* 14426 * Request is at/after the current request... 14427 * sort in the first request list. 14428 */ 14429 while (ap->av_forw != NULL) { 14430 /* 14431 * We want to go after the current request (1) if 14432 * there is an inversion after it (i.e. it is the end 14433 * of the first request list), or (2) if the next 14434 * request is a larger block no. than our request. 14435 */ 14436 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14437 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14438 goto insert; 14439 } 14440 ap = ap->av_forw; 14441 } 14442 14443 /* 14444 * Neither a second list nor a larger request, therefore 14445 * we go at the end of the first list (which is the same 14446 * as the end of the whole schebang). 14447 */ 14448 insert: 14449 bp->av_forw = ap->av_forw; 14450 ap->av_forw = bp; 14451 14452 /* 14453 * If we inserted onto the tail end of the waitq, make sure the 14454 * tail pointer is updated. 14455 */ 14456 if (ap == un->un_waitq_tailp) { 14457 un->un_waitq_tailp = bp; 14458 } 14459 } 14460 14461 14462 /* 14463 * Function: sd_start_cmds 14464 * 14465 * Description: Remove and transport cmds from the driver queues. 14466 * 14467 * Arguments: un - pointer to the unit (soft state) struct for the target. 14468 * 14469 * immed_bp - ptr to a buf to be transported immediately. Only 14470 * the immed_bp is transported; bufs on the waitq are not 14471 * processed and the un_retry_bp is not checked. If immed_bp is 14472 * NULL, then normal queue processing is performed. 14473 * 14474 * Context: May be called from kernel thread context, interrupt context, 14475 * or runout callback context. This function may not block or 14476 * call routines that block. 14477 */ 14478 14479 static void 14480 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14481 { 14482 struct sd_xbuf *xp; 14483 struct buf *bp; 14484 void (*statp)(kstat_io_t *); 14485 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14486 void (*saved_statp)(kstat_io_t *); 14487 #endif 14488 int rval; 14489 struct sd_fm_internal *sfip = NULL; 14490 14491 ASSERT(un != NULL); 14492 ASSERT(mutex_owned(SD_MUTEX(un))); 14493 ASSERT(un->un_ncmds_in_transport >= 0); 14494 ASSERT(un->un_throttle >= 0); 14495 14496 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14497 14498 do { 14499 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14500 saved_statp = NULL; 14501 #endif 14502 14503 /* 14504 * If we are syncing or dumping, fail the command to 14505 * avoid recursively calling back into scsi_transport(). 14506 * The dump I/O itself uses a separate code path so this 14507 * only prevents non-dump I/O from being sent while dumping. 14508 * File system sync takes place before dumping begins. 14509 * During panic, filesystem I/O is allowed provided 14510 * un_in_callback is <= 1. This is to prevent recursion 14511 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14512 * sd_start_cmds and so on. See panic.c for more information 14513 * about the states the system can be in during panic. 14514 */ 14515 if ((un->un_state == SD_STATE_DUMPING) || 14516 (ddi_in_panic() && (un->un_in_callback > 1))) { 14517 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14518 "sd_start_cmds: panicking\n"); 14519 goto exit; 14520 } 14521 14522 if ((bp = immed_bp) != NULL) { 14523 /* 14524 * We have a bp that must be transported immediately. 14525 * It's OK to transport the immed_bp here without doing 14526 * the throttle limit check because the immed_bp is 14527 * always used in a retry/recovery case. This means 14528 * that we know we are not at the throttle limit by 14529 * virtue of the fact that to get here we must have 14530 * already gotten a command back via sdintr(). This also 14531 * relies on (1) the command on un_retry_bp preventing 14532 * further commands from the waitq from being issued; 14533 * and (2) the code in sd_retry_command checking the 14534 * throttle limit before issuing a delayed or immediate 14535 * retry. This holds even if the throttle limit is 14536 * currently ratcheted down from its maximum value. 14537 */ 14538 statp = kstat_runq_enter; 14539 if (bp == un->un_retry_bp) { 14540 ASSERT((un->un_retry_statp == NULL) || 14541 (un->un_retry_statp == kstat_waitq_enter) || 14542 (un->un_retry_statp == 14543 kstat_runq_back_to_waitq)); 14544 /* 14545 * If the waitq kstat was incremented when 14546 * sd_set_retry_bp() queued this bp for a retry, 14547 * then we must set up statp so that the waitq 14548 * count will get decremented correctly below. 14549 * Also we must clear un->un_retry_statp to 14550 * ensure that we do not act on a stale value 14551 * in this field. 14552 */ 14553 if ((un->un_retry_statp == kstat_waitq_enter) || 14554 (un->un_retry_statp == 14555 kstat_runq_back_to_waitq)) { 14556 statp = kstat_waitq_to_runq; 14557 } 14558 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14559 saved_statp = un->un_retry_statp; 14560 #endif 14561 un->un_retry_statp = NULL; 14562 14563 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14564 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14565 "un_throttle:%d un_ncmds_in_transport:%d\n", 14566 un, un->un_retry_bp, un->un_throttle, 14567 un->un_ncmds_in_transport); 14568 } else { 14569 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14570 "processing priority bp:0x%p\n", bp); 14571 } 14572 14573 } else if ((bp = un->un_waitq_headp) != NULL) { 14574 /* 14575 * A command on the waitq is ready to go, but do not 14576 * send it if: 14577 * 14578 * (1) the throttle limit has been reached, or 14579 * (2) a retry is pending, or 14580 * (3) a START_STOP_UNIT callback pending, or 14581 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14582 * command is pending. 14583 * 14584 * For all of these conditions, IO processing will 14585 * restart after the condition is cleared. 14586 */ 14587 if (un->un_ncmds_in_transport >= un->un_throttle) { 14588 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14589 "sd_start_cmds: exiting, " 14590 "throttle limit reached!\n"); 14591 goto exit; 14592 } 14593 if (un->un_retry_bp != NULL) { 14594 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14595 "sd_start_cmds: exiting, retry pending!\n"); 14596 goto exit; 14597 } 14598 if (un->un_startstop_timeid != NULL) { 14599 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14600 "sd_start_cmds: exiting, " 14601 "START_STOP pending!\n"); 14602 goto exit; 14603 } 14604 if (un->un_direct_priority_timeid != NULL) { 14605 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14606 "sd_start_cmds: exiting, " 14607 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14608 goto exit; 14609 } 14610 14611 /* Dequeue the command */ 14612 un->un_waitq_headp = bp->av_forw; 14613 if (un->un_waitq_headp == NULL) { 14614 un->un_waitq_tailp = NULL; 14615 } 14616 bp->av_forw = NULL; 14617 statp = kstat_waitq_to_runq; 14618 SD_TRACE(SD_LOG_IO_CORE, un, 14619 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14620 14621 } else { 14622 /* No work to do so bail out now */ 14623 SD_TRACE(SD_LOG_IO_CORE, un, 14624 "sd_start_cmds: no more work, exiting!\n"); 14625 goto exit; 14626 } 14627 14628 /* 14629 * Reset the state to normal. This is the mechanism by which 14630 * the state transitions from either SD_STATE_RWAIT or 14631 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14632 * If state is SD_STATE_PM_CHANGING then this command is 14633 * part of the device power control and the state must 14634 * not be put back to normal. Doing so would would 14635 * allow new commands to proceed when they shouldn't, 14636 * the device may be going off. 14637 */ 14638 if ((un->un_state != SD_STATE_SUSPENDED) && 14639 (un->un_state != SD_STATE_PM_CHANGING)) { 14640 New_state(un, SD_STATE_NORMAL); 14641 } 14642 14643 xp = SD_GET_XBUF(bp); 14644 ASSERT(xp != NULL); 14645 14646 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14647 /* 14648 * Allocate the scsi_pkt if we need one, or attach DMA 14649 * resources if we have a scsi_pkt that needs them. The 14650 * latter should only occur for commands that are being 14651 * retried. 14652 */ 14653 if ((xp->xb_pktp == NULL) || 14654 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14655 #else 14656 if (xp->xb_pktp == NULL) { 14657 #endif 14658 /* 14659 * There is no scsi_pkt allocated for this buf. Call 14660 * the initpkt function to allocate & init one. 14661 * 14662 * The scsi_init_pkt runout callback functionality is 14663 * implemented as follows: 14664 * 14665 * 1) The initpkt function always calls 14666 * scsi_init_pkt(9F) with sdrunout specified as the 14667 * callback routine. 14668 * 2) A successful packet allocation is initialized and 14669 * the I/O is transported. 14670 * 3) The I/O associated with an allocation resource 14671 * failure is left on its queue to be retried via 14672 * runout or the next I/O. 14673 * 4) The I/O associated with a DMA error is removed 14674 * from the queue and failed with EIO. Processing of 14675 * the transport queues is also halted to be 14676 * restarted via runout or the next I/O. 14677 * 5) The I/O associated with a CDB size or packet 14678 * size error is removed from the queue and failed 14679 * with EIO. Processing of the transport queues is 14680 * continued. 14681 * 14682 * Note: there is no interface for canceling a runout 14683 * callback. To prevent the driver from detaching or 14684 * suspending while a runout is pending the driver 14685 * state is set to SD_STATE_RWAIT 14686 * 14687 * Note: using the scsi_init_pkt callback facility can 14688 * result in an I/O request persisting at the head of 14689 * the list which cannot be satisfied even after 14690 * multiple retries. In the future the driver may 14691 * implement some kind of maximum runout count before 14692 * failing an I/O. 14693 * 14694 * Note: the use of funcp below may seem superfluous, 14695 * but it helps warlock figure out the correct 14696 * initpkt function calls (see [s]sd.wlcmd). 14697 */ 14698 struct scsi_pkt *pktp; 14699 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14700 14701 ASSERT(bp != un->un_rqs_bp); 14702 14703 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14704 switch ((*funcp)(bp, &pktp)) { 14705 case SD_PKT_ALLOC_SUCCESS: 14706 xp->xb_pktp = pktp; 14707 SD_TRACE(SD_LOG_IO_CORE, un, 14708 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14709 pktp); 14710 goto got_pkt; 14711 14712 case SD_PKT_ALLOC_FAILURE: 14713 /* 14714 * Temporary (hopefully) resource depletion. 14715 * Since retries and RQS commands always have a 14716 * scsi_pkt allocated, these cases should never 14717 * get here. So the only cases this needs to 14718 * handle is a bp from the waitq (which we put 14719 * back onto the waitq for sdrunout), or a bp 14720 * sent as an immed_bp (which we just fail). 14721 */ 14722 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14723 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14724 14725 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14726 14727 if (bp == immed_bp) { 14728 /* 14729 * If SD_XB_DMA_FREED is clear, then 14730 * this is a failure to allocate a 14731 * scsi_pkt, and we must fail the 14732 * command. 14733 */ 14734 if ((xp->xb_pkt_flags & 14735 SD_XB_DMA_FREED) == 0) { 14736 break; 14737 } 14738 14739 /* 14740 * If this immediate command is NOT our 14741 * un_retry_bp, then we must fail it. 14742 */ 14743 if (bp != un->un_retry_bp) { 14744 break; 14745 } 14746 14747 /* 14748 * We get here if this cmd is our 14749 * un_retry_bp that was DMAFREED, but 14750 * scsi_init_pkt() failed to reallocate 14751 * DMA resources when we attempted to 14752 * retry it. This can happen when an 14753 * mpxio failover is in progress, but 14754 * we don't want to just fail the 14755 * command in this case. 14756 * 14757 * Use timeout(9F) to restart it after 14758 * a 100ms delay. We don't want to 14759 * let sdrunout() restart it, because 14760 * sdrunout() is just supposed to start 14761 * commands that are sitting on the 14762 * wait queue. The un_retry_bp stays 14763 * set until the command completes, but 14764 * sdrunout can be called many times 14765 * before that happens. Since sdrunout 14766 * cannot tell if the un_retry_bp is 14767 * already in the transport, it could 14768 * end up calling scsi_transport() for 14769 * the un_retry_bp multiple times. 14770 * 14771 * Also: don't schedule the callback 14772 * if some other callback is already 14773 * pending. 14774 */ 14775 if (un->un_retry_statp == NULL) { 14776 /* 14777 * restore the kstat pointer to 14778 * keep kstat counts coherent 14779 * when we do retry the command. 14780 */ 14781 un->un_retry_statp = 14782 saved_statp; 14783 } 14784 14785 if ((un->un_startstop_timeid == NULL) && 14786 (un->un_retry_timeid == NULL) && 14787 (un->un_direct_priority_timeid == 14788 NULL)) { 14789 14790 un->un_retry_timeid = 14791 timeout( 14792 sd_start_retry_command, 14793 un, SD_RESTART_TIMEOUT); 14794 } 14795 goto exit; 14796 } 14797 14798 #else 14799 if (bp == immed_bp) { 14800 break; /* Just fail the command */ 14801 } 14802 #endif 14803 14804 /* Add the buf back to the head of the waitq */ 14805 bp->av_forw = un->un_waitq_headp; 14806 un->un_waitq_headp = bp; 14807 if (un->un_waitq_tailp == NULL) { 14808 un->un_waitq_tailp = bp; 14809 } 14810 goto exit; 14811 14812 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14813 /* 14814 * HBA DMA resource failure. Fail the command 14815 * and continue processing of the queues. 14816 */ 14817 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14818 "sd_start_cmds: " 14819 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14820 break; 14821 14822 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14823 /* 14824 * Note:x86: Partial DMA mapping not supported 14825 * for USCSI commands, and all the needed DMA 14826 * resources were not allocated. 14827 */ 14828 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14829 "sd_start_cmds: " 14830 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14831 break; 14832 14833 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14834 /* 14835 * Note:x86: Request cannot fit into CDB based 14836 * on lba and len. 14837 */ 14838 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14839 "sd_start_cmds: " 14840 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14841 break; 14842 14843 default: 14844 /* Should NEVER get here! */ 14845 panic("scsi_initpkt error"); 14846 /*NOTREACHED*/ 14847 } 14848 14849 /* 14850 * Fatal error in allocating a scsi_pkt for this buf. 14851 * Update kstats & return the buf with an error code. 14852 * We must use sd_return_failed_command_no_restart() to 14853 * avoid a recursive call back into sd_start_cmds(). 14854 * However this also means that we must keep processing 14855 * the waitq here in order to avoid stalling. 14856 */ 14857 if (statp == kstat_waitq_to_runq) { 14858 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14859 } 14860 sd_return_failed_command_no_restart(un, bp, EIO); 14861 if (bp == immed_bp) { 14862 /* immed_bp is gone by now, so clear this */ 14863 immed_bp = NULL; 14864 } 14865 continue; 14866 } 14867 got_pkt: 14868 if (bp == immed_bp) { 14869 /* goto the head of the class.... */ 14870 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14871 } 14872 14873 un->un_ncmds_in_transport++; 14874 SD_UPDATE_KSTATS(un, statp, bp); 14875 14876 /* 14877 * Call scsi_transport() to send the command to the target. 14878 * According to SCSA architecture, we must drop the mutex here 14879 * before calling scsi_transport() in order to avoid deadlock. 14880 * Note that the scsi_pkt's completion routine can be executed 14881 * (from interrupt context) even before the call to 14882 * scsi_transport() returns. 14883 */ 14884 SD_TRACE(SD_LOG_IO_CORE, un, 14885 "sd_start_cmds: calling scsi_transport()\n"); 14886 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14887 14888 mutex_exit(SD_MUTEX(un)); 14889 rval = scsi_transport(xp->xb_pktp); 14890 mutex_enter(SD_MUTEX(un)); 14891 14892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14893 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14894 14895 switch (rval) { 14896 case TRAN_ACCEPT: 14897 /* Clear this with every pkt accepted by the HBA */ 14898 un->un_tran_fatal_count = 0; 14899 break; /* Success; try the next cmd (if any) */ 14900 14901 case TRAN_BUSY: 14902 un->un_ncmds_in_transport--; 14903 ASSERT(un->un_ncmds_in_transport >= 0); 14904 14905 /* 14906 * Don't retry request sense, the sense data 14907 * is lost when another request is sent. 14908 * Free up the rqs buf and retry 14909 * the original failed cmd. Update kstat. 14910 */ 14911 if (bp == un->un_rqs_bp) { 14912 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14913 bp = sd_mark_rqs_idle(un, xp); 14914 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14915 NULL, NULL, EIO, un->un_busy_timeout / 500, 14916 kstat_waitq_enter); 14917 goto exit; 14918 } 14919 14920 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14921 /* 14922 * Free the DMA resources for the scsi_pkt. This will 14923 * allow mpxio to select another path the next time 14924 * we call scsi_transport() with this scsi_pkt. 14925 * See sdintr() for the rationalization behind this. 14926 */ 14927 if ((un->un_f_is_fibre == TRUE) && 14928 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14929 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14930 scsi_dmafree(xp->xb_pktp); 14931 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14932 } 14933 #endif 14934 14935 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14936 /* 14937 * Commands that are SD_PATH_DIRECT_PRIORITY 14938 * are for error recovery situations. These do 14939 * not use the normal command waitq, so if they 14940 * get a TRAN_BUSY we cannot put them back onto 14941 * the waitq for later retry. One possible 14942 * problem is that there could already be some 14943 * other command on un_retry_bp that is waiting 14944 * for this one to complete, so we would be 14945 * deadlocked if we put this command back onto 14946 * the waitq for later retry (since un_retry_bp 14947 * must complete before the driver gets back to 14948 * commands on the waitq). 14949 * 14950 * To avoid deadlock we must schedule a callback 14951 * that will restart this command after a set 14952 * interval. This should keep retrying for as 14953 * long as the underlying transport keeps 14954 * returning TRAN_BUSY (just like for other 14955 * commands). Use the same timeout interval as 14956 * for the ordinary TRAN_BUSY retry. 14957 */ 14958 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14959 "sd_start_cmds: scsi_transport() returned " 14960 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14961 14962 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14963 un->un_direct_priority_timeid = 14964 timeout(sd_start_direct_priority_command, 14965 bp, un->un_busy_timeout / 500); 14966 14967 goto exit; 14968 } 14969 14970 /* 14971 * For TRAN_BUSY, we want to reduce the throttle value, 14972 * unless we are retrying a command. 14973 */ 14974 if (bp != un->un_retry_bp) { 14975 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14976 } 14977 14978 /* 14979 * Set up the bp to be tried again 10 ms later. 14980 * Note:x86: Is there a timeout value in the sd_lun 14981 * for this condition? 14982 */ 14983 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14984 kstat_runq_back_to_waitq); 14985 goto exit; 14986 14987 case TRAN_FATAL_ERROR: 14988 un->un_tran_fatal_count++; 14989 /* FALLTHRU */ 14990 14991 case TRAN_BADPKT: 14992 default: 14993 un->un_ncmds_in_transport--; 14994 ASSERT(un->un_ncmds_in_transport >= 0); 14995 14996 /* 14997 * If this is our REQUEST SENSE command with a 14998 * transport error, we must get back the pointers 14999 * to the original buf, and mark the REQUEST 15000 * SENSE command as "available". 15001 */ 15002 if (bp == un->un_rqs_bp) { 15003 bp = sd_mark_rqs_idle(un, xp); 15004 xp = SD_GET_XBUF(bp); 15005 } else { 15006 /* 15007 * Legacy behavior: do not update transport 15008 * error count for request sense commands. 15009 */ 15010 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15011 } 15012 15013 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15014 sd_print_transport_rejected_message(un, xp, rval); 15015 15016 /* 15017 * This command will be terminated by SD driver due 15018 * to a fatal transport error. We should post 15019 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15020 * of "fail" for any command to indicate this 15021 * situation. 15022 */ 15023 if (xp->xb_ena > 0) { 15024 ASSERT(un->un_fm_private != NULL); 15025 sfip = un->un_fm_private; 15026 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15027 sd_ssc_extract_info(&sfip->fm_ssc, un, 15028 xp->xb_pktp, bp, xp); 15029 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15030 } 15031 15032 /* 15033 * We must use sd_return_failed_command_no_restart() to 15034 * avoid a recursive call back into sd_start_cmds(). 15035 * However this also means that we must keep processing 15036 * the waitq here in order to avoid stalling. 15037 */ 15038 sd_return_failed_command_no_restart(un, bp, EIO); 15039 15040 /* 15041 * Notify any threads waiting in sd_ddi_suspend() that 15042 * a command completion has occurred. 15043 */ 15044 if (un->un_state == SD_STATE_SUSPENDED) { 15045 cv_broadcast(&un->un_disk_busy_cv); 15046 } 15047 15048 if (bp == immed_bp) { 15049 /* immed_bp is gone by now, so clear this */ 15050 immed_bp = NULL; 15051 } 15052 break; 15053 } 15054 15055 } while (immed_bp == NULL); 15056 15057 exit: 15058 ASSERT(mutex_owned(SD_MUTEX(un))); 15059 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15060 } 15061 15062 15063 /* 15064 * Function: sd_return_command 15065 * 15066 * Description: Returns a command to its originator (with or without an 15067 * error). Also starts commands waiting to be transported 15068 * to the target. 15069 * 15070 * Context: May be called from interrupt, kernel, or timeout context 15071 */ 15072 15073 static void 15074 sd_return_command(struct sd_lun *un, struct buf *bp) 15075 { 15076 struct sd_xbuf *xp; 15077 struct scsi_pkt *pktp; 15078 struct sd_fm_internal *sfip; 15079 15080 ASSERT(bp != NULL); 15081 ASSERT(un != NULL); 15082 ASSERT(mutex_owned(SD_MUTEX(un))); 15083 ASSERT(bp != un->un_rqs_bp); 15084 xp = SD_GET_XBUF(bp); 15085 ASSERT(xp != NULL); 15086 15087 pktp = SD_GET_PKTP(bp); 15088 sfip = (struct sd_fm_internal *)un->un_fm_private; 15089 ASSERT(sfip != NULL); 15090 15091 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15092 15093 /* 15094 * Note: check for the "sdrestart failed" case. 15095 */ 15096 if ((un->un_partial_dma_supported == 1) && 15097 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15098 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15099 (xp->xb_pktp->pkt_resid == 0)) { 15100 15101 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15102 /* 15103 * Successfully set up next portion of cmd 15104 * transfer, try sending it 15105 */ 15106 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15107 NULL, NULL, 0, (clock_t)0, NULL); 15108 sd_start_cmds(un, NULL); 15109 return; /* Note:x86: need a return here? */ 15110 } 15111 } 15112 15113 /* 15114 * If this is the failfast bp, clear it from un_failfast_bp. This 15115 * can happen if upon being re-tried the failfast bp either 15116 * succeeded or encountered another error (possibly even a different 15117 * error than the one that precipitated the failfast state, but in 15118 * that case it would have had to exhaust retries as well). Regardless, 15119 * this should not occur whenever the instance is in the active 15120 * failfast state. 15121 */ 15122 if (bp == un->un_failfast_bp) { 15123 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15124 un->un_failfast_bp = NULL; 15125 } 15126 15127 /* 15128 * Clear the failfast state upon successful completion of ANY cmd. 15129 */ 15130 if (bp->b_error == 0) { 15131 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15132 /* 15133 * If this is a successful command, but used to be retried, 15134 * we will take it as a recovered command and post an 15135 * ereport with driver-assessment of "recovered". 15136 */ 15137 if (xp->xb_ena > 0) { 15138 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15139 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15140 } 15141 } else { 15142 /* 15143 * If this is a failed non-USCSI command we will post an 15144 * ereport with driver-assessment set accordingly("fail" or 15145 * "fatal"). 15146 */ 15147 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15148 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15149 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15150 } 15151 } 15152 15153 /* 15154 * This is used if the command was retried one or more times. Show that 15155 * we are done with it, and allow processing of the waitq to resume. 15156 */ 15157 if (bp == un->un_retry_bp) { 15158 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15159 "sd_return_command: un:0x%p: " 15160 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15161 un->un_retry_bp = NULL; 15162 un->un_retry_statp = NULL; 15163 } 15164 15165 SD_UPDATE_RDWR_STATS(un, bp); 15166 SD_UPDATE_PARTITION_STATS(un, bp); 15167 15168 switch (un->un_state) { 15169 case SD_STATE_SUSPENDED: 15170 /* 15171 * Notify any threads waiting in sd_ddi_suspend() that 15172 * a command completion has occurred. 15173 */ 15174 cv_broadcast(&un->un_disk_busy_cv); 15175 break; 15176 default: 15177 sd_start_cmds(un, NULL); 15178 break; 15179 } 15180 15181 /* Return this command up the iodone chain to its originator. */ 15182 mutex_exit(SD_MUTEX(un)); 15183 15184 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15185 xp->xb_pktp = NULL; 15186 15187 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15188 15189 ASSERT(!mutex_owned(SD_MUTEX(un))); 15190 mutex_enter(SD_MUTEX(un)); 15191 15192 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15193 } 15194 15195 15196 /* 15197 * Function: sd_return_failed_command 15198 * 15199 * Description: Command completion when an error occurred. 15200 * 15201 * Context: May be called from interrupt context 15202 */ 15203 15204 static void 15205 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15206 { 15207 ASSERT(bp != NULL); 15208 ASSERT(un != NULL); 15209 ASSERT(mutex_owned(SD_MUTEX(un))); 15210 15211 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15212 "sd_return_failed_command: entry\n"); 15213 15214 /* 15215 * b_resid could already be nonzero due to a partial data 15216 * transfer, so do not change it here. 15217 */ 15218 SD_BIOERROR(bp, errcode); 15219 15220 sd_return_command(un, bp); 15221 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15222 "sd_return_failed_command: exit\n"); 15223 } 15224 15225 15226 /* 15227 * Function: sd_return_failed_command_no_restart 15228 * 15229 * Description: Same as sd_return_failed_command, but ensures that no 15230 * call back into sd_start_cmds will be issued. 15231 * 15232 * Context: May be called from interrupt context 15233 */ 15234 15235 static void 15236 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15237 int errcode) 15238 { 15239 struct sd_xbuf *xp; 15240 15241 ASSERT(bp != NULL); 15242 ASSERT(un != NULL); 15243 ASSERT(mutex_owned(SD_MUTEX(un))); 15244 xp = SD_GET_XBUF(bp); 15245 ASSERT(xp != NULL); 15246 ASSERT(errcode != 0); 15247 15248 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15249 "sd_return_failed_command_no_restart: entry\n"); 15250 15251 /* 15252 * b_resid could already be nonzero due to a partial data 15253 * transfer, so do not change it here. 15254 */ 15255 SD_BIOERROR(bp, errcode); 15256 15257 /* 15258 * If this is the failfast bp, clear it. This can happen if the 15259 * failfast bp encounterd a fatal error when we attempted to 15260 * re-try it (such as a scsi_transport(9F) failure). However 15261 * we should NOT be in an active failfast state if the failfast 15262 * bp is not NULL. 15263 */ 15264 if (bp == un->un_failfast_bp) { 15265 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15266 un->un_failfast_bp = NULL; 15267 } 15268 15269 if (bp == un->un_retry_bp) { 15270 /* 15271 * This command was retried one or more times. Show that we are 15272 * done with it, and allow processing of the waitq to resume. 15273 */ 15274 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15275 "sd_return_failed_command_no_restart: " 15276 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15277 un->un_retry_bp = NULL; 15278 un->un_retry_statp = NULL; 15279 } 15280 15281 SD_UPDATE_RDWR_STATS(un, bp); 15282 SD_UPDATE_PARTITION_STATS(un, bp); 15283 15284 mutex_exit(SD_MUTEX(un)); 15285 15286 if (xp->xb_pktp != NULL) { 15287 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15288 xp->xb_pktp = NULL; 15289 } 15290 15291 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15292 15293 mutex_enter(SD_MUTEX(un)); 15294 15295 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15296 "sd_return_failed_command_no_restart: exit\n"); 15297 } 15298 15299 15300 /* 15301 * Function: sd_retry_command 15302 * 15303 * Description: queue up a command for retry, or (optionally) fail it 15304 * if retry counts are exhausted. 15305 * 15306 * Arguments: un - Pointer to the sd_lun struct for the target. 15307 * 15308 * bp - Pointer to the buf for the command to be retried. 15309 * 15310 * retry_check_flag - Flag to see which (if any) of the retry 15311 * counts should be decremented/checked. If the indicated 15312 * retry count is exhausted, then the command will not be 15313 * retried; it will be failed instead. This should use a 15314 * value equal to one of the following: 15315 * 15316 * SD_RETRIES_NOCHECK 15317 * SD_RESD_RETRIES_STANDARD 15318 * SD_RETRIES_VICTIM 15319 * 15320 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15321 * if the check should be made to see of FLAG_ISOLATE is set 15322 * in the pkt. If FLAG_ISOLATE is set, then the command is 15323 * not retried, it is simply failed. 15324 * 15325 * user_funcp - Ptr to function to call before dispatching the 15326 * command. May be NULL if no action needs to be performed. 15327 * (Primarily intended for printing messages.) 15328 * 15329 * user_arg - Optional argument to be passed along to 15330 * the user_funcp call. 15331 * 15332 * failure_code - errno return code to set in the bp if the 15333 * command is going to be failed. 15334 * 15335 * retry_delay - Retry delay interval in (clock_t) units. May 15336 * be zero which indicates that the retry should be retried 15337 * immediately (ie, without an intervening delay). 15338 * 15339 * statp - Ptr to kstat function to be updated if the command 15340 * is queued for a delayed retry. May be NULL if no kstat 15341 * update is desired. 15342 * 15343 * Context: May be called from interrupt context. 15344 */ 15345 15346 static void 15347 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15348 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15349 code), void *user_arg, int failure_code, clock_t retry_delay, 15350 void (*statp)(kstat_io_t *)) 15351 { 15352 struct sd_xbuf *xp; 15353 struct scsi_pkt *pktp; 15354 struct sd_fm_internal *sfip; 15355 15356 ASSERT(un != NULL); 15357 ASSERT(mutex_owned(SD_MUTEX(un))); 15358 ASSERT(bp != NULL); 15359 xp = SD_GET_XBUF(bp); 15360 ASSERT(xp != NULL); 15361 pktp = SD_GET_PKTP(bp); 15362 ASSERT(pktp != NULL); 15363 15364 sfip = (struct sd_fm_internal *)un->un_fm_private; 15365 ASSERT(sfip != NULL); 15366 15367 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15368 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15369 15370 /* 15371 * If we are syncing or dumping, fail the command to avoid 15372 * recursively calling back into scsi_transport(). 15373 */ 15374 if (ddi_in_panic()) { 15375 goto fail_command_no_log; 15376 } 15377 15378 /* 15379 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15380 * log an error and fail the command. 15381 */ 15382 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15383 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15384 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15385 sd_dump_memory(un, SD_LOG_IO, "CDB", 15386 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15387 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15388 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15389 goto fail_command; 15390 } 15391 15392 /* 15393 * If we are suspended, then put the command onto head of the 15394 * wait queue since we don't want to start more commands, and 15395 * clear the un_retry_bp. Next time when we are resumed, will 15396 * handle the command in the wait queue. 15397 */ 15398 switch (un->un_state) { 15399 case SD_STATE_SUSPENDED: 15400 case SD_STATE_DUMPING: 15401 bp->av_forw = un->un_waitq_headp; 15402 un->un_waitq_headp = bp; 15403 if (un->un_waitq_tailp == NULL) { 15404 un->un_waitq_tailp = bp; 15405 } 15406 if (bp == un->un_retry_bp) { 15407 un->un_retry_bp = NULL; 15408 un->un_retry_statp = NULL; 15409 } 15410 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15411 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15412 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15413 return; 15414 default: 15415 break; 15416 } 15417 15418 /* 15419 * If the caller wants us to check FLAG_ISOLATE, then see if that 15420 * is set; if it is then we do not want to retry the command. 15421 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15422 */ 15423 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15424 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15425 goto fail_command; 15426 } 15427 } 15428 15429 15430 /* 15431 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15432 * command timeout or a selection timeout has occurred. This means 15433 * that we were unable to establish an kind of communication with 15434 * the target, and subsequent retries and/or commands are likely 15435 * to encounter similar results and take a long time to complete. 15436 * 15437 * If this is a failfast error condition, we need to update the 15438 * failfast state, even if this bp does not have B_FAILFAST set. 15439 */ 15440 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15441 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15442 ASSERT(un->un_failfast_bp == NULL); 15443 /* 15444 * If we are already in the active failfast state, and 15445 * another failfast error condition has been detected, 15446 * then fail this command if it has B_FAILFAST set. 15447 * If B_FAILFAST is clear, then maintain the legacy 15448 * behavior of retrying heroically, even tho this will 15449 * take a lot more time to fail the command. 15450 */ 15451 if (bp->b_flags & B_FAILFAST) { 15452 goto fail_command; 15453 } 15454 } else { 15455 /* 15456 * We're not in the active failfast state, but we 15457 * have a failfast error condition, so we must begin 15458 * transition to the next state. We do this regardless 15459 * of whether or not this bp has B_FAILFAST set. 15460 */ 15461 if (un->un_failfast_bp == NULL) { 15462 /* 15463 * This is the first bp to meet a failfast 15464 * condition so save it on un_failfast_bp & 15465 * do normal retry processing. Do not enter 15466 * active failfast state yet. This marks 15467 * entry into the "failfast pending" state. 15468 */ 15469 un->un_failfast_bp = bp; 15470 15471 } else if (un->un_failfast_bp == bp) { 15472 /* 15473 * This is the second time *this* bp has 15474 * encountered a failfast error condition, 15475 * so enter active failfast state & flush 15476 * queues as appropriate. 15477 */ 15478 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15479 un->un_failfast_bp = NULL; 15480 sd_failfast_flushq(un); 15481 15482 /* 15483 * Fail this bp now if B_FAILFAST set; 15484 * otherwise continue with retries. (It would 15485 * be pretty ironic if this bp succeeded on a 15486 * subsequent retry after we just flushed all 15487 * the queues). 15488 */ 15489 if (bp->b_flags & B_FAILFAST) { 15490 goto fail_command; 15491 } 15492 15493 #if !defined(lint) && !defined(__lint) 15494 } else { 15495 /* 15496 * If neither of the preceeding conditionals 15497 * was true, it means that there is some 15498 * *other* bp that has met an inital failfast 15499 * condition and is currently either being 15500 * retried or is waiting to be retried. In 15501 * that case we should perform normal retry 15502 * processing on *this* bp, since there is a 15503 * chance that the current failfast condition 15504 * is transient and recoverable. If that does 15505 * not turn out to be the case, then retries 15506 * will be cleared when the wait queue is 15507 * flushed anyway. 15508 */ 15509 #endif 15510 } 15511 } 15512 } else { 15513 /* 15514 * SD_RETRIES_FAILFAST is clear, which indicates that we 15515 * likely were able to at least establish some level of 15516 * communication with the target and subsequent commands 15517 * and/or retries are likely to get through to the target, 15518 * In this case we want to be aggressive about clearing 15519 * the failfast state. Note that this does not affect 15520 * the "failfast pending" condition. 15521 */ 15522 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15523 } 15524 15525 15526 /* 15527 * Check the specified retry count to see if we can still do 15528 * any retries with this pkt before we should fail it. 15529 */ 15530 switch (retry_check_flag & SD_RETRIES_MASK) { 15531 case SD_RETRIES_VICTIM: 15532 /* 15533 * Check the victim retry count. If exhausted, then fall 15534 * thru & check against the standard retry count. 15535 */ 15536 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15537 /* Increment count & proceed with the retry */ 15538 xp->xb_victim_retry_count++; 15539 break; 15540 } 15541 /* Victim retries exhausted, fall back to std. retries... */ 15542 /* FALLTHRU */ 15543 15544 case SD_RETRIES_STANDARD: 15545 if (xp->xb_retry_count >= un->un_retry_count) { 15546 /* Retries exhausted, fail the command */ 15547 SD_TRACE(SD_LOG_IO_CORE, un, 15548 "sd_retry_command: retries exhausted!\n"); 15549 /* 15550 * update b_resid for failed SCMD_READ & SCMD_WRITE 15551 * commands with nonzero pkt_resid. 15552 */ 15553 if ((pktp->pkt_reason == CMD_CMPLT) && 15554 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15555 (pktp->pkt_resid != 0)) { 15556 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15557 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15558 SD_UPDATE_B_RESID(bp, pktp); 15559 } 15560 } 15561 goto fail_command; 15562 } 15563 xp->xb_retry_count++; 15564 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15565 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15566 break; 15567 15568 case SD_RETRIES_UA: 15569 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15570 /* Retries exhausted, fail the command */ 15571 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15572 "Unit Attention retries exhausted. " 15573 "Check the target.\n"); 15574 goto fail_command; 15575 } 15576 xp->xb_ua_retry_count++; 15577 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15578 "sd_retry_command: retry count:%d\n", 15579 xp->xb_ua_retry_count); 15580 break; 15581 15582 case SD_RETRIES_BUSY: 15583 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15584 /* Retries exhausted, fail the command */ 15585 SD_TRACE(SD_LOG_IO_CORE, un, 15586 "sd_retry_command: retries exhausted!\n"); 15587 goto fail_command; 15588 } 15589 xp->xb_retry_count++; 15590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15591 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15592 break; 15593 15594 case SD_RETRIES_NOCHECK: 15595 default: 15596 /* No retry count to check. Just proceed with the retry */ 15597 break; 15598 } 15599 15600 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15601 15602 /* 15603 * If this is a non-USCSI command being retried 15604 * during execution last time, we should post an ereport with 15605 * driver-assessment of the value "retry". 15606 * For partial DMA, request sense and STATUS_QFULL, there are no 15607 * hardware errors, we bypass ereport posting. 15608 */ 15609 if (failure_code != 0) { 15610 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15611 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15612 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15613 } 15614 } 15615 15616 /* 15617 * If we were given a zero timeout, we must attempt to retry the 15618 * command immediately (ie, without a delay). 15619 */ 15620 if (retry_delay == 0) { 15621 /* 15622 * Check some limiting conditions to see if we can actually 15623 * do the immediate retry. If we cannot, then we must 15624 * fall back to queueing up a delayed retry. 15625 */ 15626 if (un->un_ncmds_in_transport >= un->un_throttle) { 15627 /* 15628 * We are at the throttle limit for the target, 15629 * fall back to delayed retry. 15630 */ 15631 retry_delay = un->un_busy_timeout; 15632 statp = kstat_waitq_enter; 15633 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15634 "sd_retry_command: immed. retry hit " 15635 "throttle!\n"); 15636 } else { 15637 /* 15638 * We're clear to proceed with the immediate retry. 15639 * First call the user-provided function (if any) 15640 */ 15641 if (user_funcp != NULL) { 15642 (*user_funcp)(un, bp, user_arg, 15643 SD_IMMEDIATE_RETRY_ISSUED); 15644 #ifdef __lock_lint 15645 sd_print_incomplete_msg(un, bp, user_arg, 15646 SD_IMMEDIATE_RETRY_ISSUED); 15647 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15648 SD_IMMEDIATE_RETRY_ISSUED); 15649 sd_print_sense_failed_msg(un, bp, user_arg, 15650 SD_IMMEDIATE_RETRY_ISSUED); 15651 #endif 15652 } 15653 15654 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15655 "sd_retry_command: issuing immediate retry\n"); 15656 15657 /* 15658 * Call sd_start_cmds() to transport the command to 15659 * the target. 15660 */ 15661 sd_start_cmds(un, bp); 15662 15663 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15664 "sd_retry_command exit\n"); 15665 return; 15666 } 15667 } 15668 15669 /* 15670 * Set up to retry the command after a delay. 15671 * First call the user-provided function (if any) 15672 */ 15673 if (user_funcp != NULL) { 15674 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15675 } 15676 15677 sd_set_retry_bp(un, bp, retry_delay, statp); 15678 15679 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15680 return; 15681 15682 fail_command: 15683 15684 if (user_funcp != NULL) { 15685 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15686 } 15687 15688 fail_command_no_log: 15689 15690 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15691 "sd_retry_command: returning failed command\n"); 15692 15693 sd_return_failed_command(un, bp, failure_code); 15694 15695 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15696 } 15697 15698 15699 /* 15700 * Function: sd_set_retry_bp 15701 * 15702 * Description: Set up the given bp for retry. 15703 * 15704 * Arguments: un - ptr to associated softstate 15705 * bp - ptr to buf(9S) for the command 15706 * retry_delay - time interval before issuing retry (may be 0) 15707 * statp - optional pointer to kstat function 15708 * 15709 * Context: May be called under interrupt context 15710 */ 15711 15712 static void 15713 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15714 void (*statp)(kstat_io_t *)) 15715 { 15716 ASSERT(un != NULL); 15717 ASSERT(mutex_owned(SD_MUTEX(un))); 15718 ASSERT(bp != NULL); 15719 15720 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15721 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15722 15723 /* 15724 * Indicate that the command is being retried. This will not allow any 15725 * other commands on the wait queue to be transported to the target 15726 * until this command has been completed (success or failure). The 15727 * "retry command" is not transported to the target until the given 15728 * time delay expires, unless the user specified a 0 retry_delay. 15729 * 15730 * Note: the timeout(9F) callback routine is what actually calls 15731 * sd_start_cmds() to transport the command, with the exception of a 15732 * zero retry_delay. The only current implementor of a zero retry delay 15733 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15734 */ 15735 if (un->un_retry_bp == NULL) { 15736 ASSERT(un->un_retry_statp == NULL); 15737 un->un_retry_bp = bp; 15738 15739 /* 15740 * If the user has not specified a delay the command should 15741 * be queued and no timeout should be scheduled. 15742 */ 15743 if (retry_delay == 0) { 15744 /* 15745 * Save the kstat pointer that will be used in the 15746 * call to SD_UPDATE_KSTATS() below, so that 15747 * sd_start_cmds() can correctly decrement the waitq 15748 * count when it is time to transport this command. 15749 */ 15750 un->un_retry_statp = statp; 15751 goto done; 15752 } 15753 } 15754 15755 if (un->un_retry_bp == bp) { 15756 /* 15757 * Save the kstat pointer that will be used in the call to 15758 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15759 * correctly decrement the waitq count when it is time to 15760 * transport this command. 15761 */ 15762 un->un_retry_statp = statp; 15763 15764 /* 15765 * Schedule a timeout if: 15766 * 1) The user has specified a delay. 15767 * 2) There is not a START_STOP_UNIT callback pending. 15768 * 15769 * If no delay has been specified, then it is up to the caller 15770 * to ensure that IO processing continues without stalling. 15771 * Effectively, this means that the caller will issue the 15772 * required call to sd_start_cmds(). The START_STOP_UNIT 15773 * callback does this after the START STOP UNIT command has 15774 * completed. In either of these cases we should not schedule 15775 * a timeout callback here. Also don't schedule the timeout if 15776 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15777 */ 15778 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15779 (un->un_direct_priority_timeid == NULL)) { 15780 un->un_retry_timeid = 15781 timeout(sd_start_retry_command, un, retry_delay); 15782 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15783 "sd_set_retry_bp: setting timeout: un: 0x%p" 15784 " bp:0x%p un_retry_timeid:0x%p\n", 15785 un, bp, un->un_retry_timeid); 15786 } 15787 } else { 15788 /* 15789 * We only get in here if there is already another command 15790 * waiting to be retried. In this case, we just put the 15791 * given command onto the wait queue, so it can be transported 15792 * after the current retry command has completed. 15793 * 15794 * Also we have to make sure that if the command at the head 15795 * of the wait queue is the un_failfast_bp, that we do not 15796 * put ahead of it any other commands that are to be retried. 15797 */ 15798 if ((un->un_failfast_bp != NULL) && 15799 (un->un_failfast_bp == un->un_waitq_headp)) { 15800 /* 15801 * Enqueue this command AFTER the first command on 15802 * the wait queue (which is also un_failfast_bp). 15803 */ 15804 bp->av_forw = un->un_waitq_headp->av_forw; 15805 un->un_waitq_headp->av_forw = bp; 15806 if (un->un_waitq_headp == un->un_waitq_tailp) { 15807 un->un_waitq_tailp = bp; 15808 } 15809 } else { 15810 /* Enqueue this command at the head of the waitq. */ 15811 bp->av_forw = un->un_waitq_headp; 15812 un->un_waitq_headp = bp; 15813 if (un->un_waitq_tailp == NULL) { 15814 un->un_waitq_tailp = bp; 15815 } 15816 } 15817 15818 if (statp == NULL) { 15819 statp = kstat_waitq_enter; 15820 } 15821 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15822 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15823 } 15824 15825 done: 15826 if (statp != NULL) { 15827 SD_UPDATE_KSTATS(un, statp, bp); 15828 } 15829 15830 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15831 "sd_set_retry_bp: exit un:0x%p\n", un); 15832 } 15833 15834 15835 /* 15836 * Function: sd_start_retry_command 15837 * 15838 * Description: Start the command that has been waiting on the target's 15839 * retry queue. Called from timeout(9F) context after the 15840 * retry delay interval has expired. 15841 * 15842 * Arguments: arg - pointer to associated softstate for the device. 15843 * 15844 * Context: timeout(9F) thread context. May not sleep. 15845 */ 15846 15847 static void 15848 sd_start_retry_command(void *arg) 15849 { 15850 struct sd_lun *un = arg; 15851 15852 ASSERT(un != NULL); 15853 ASSERT(!mutex_owned(SD_MUTEX(un))); 15854 15855 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15856 "sd_start_retry_command: entry\n"); 15857 15858 mutex_enter(SD_MUTEX(un)); 15859 15860 un->un_retry_timeid = NULL; 15861 15862 if (un->un_retry_bp != NULL) { 15863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15864 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15865 un, un->un_retry_bp); 15866 sd_start_cmds(un, un->un_retry_bp); 15867 } 15868 15869 mutex_exit(SD_MUTEX(un)); 15870 15871 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15872 "sd_start_retry_command: exit\n"); 15873 } 15874 15875 /* 15876 * Function: sd_rmw_msg_print_handler 15877 * 15878 * Description: If RMW mode is enabled and warning message is triggered 15879 * print I/O count during a fixed interval. 15880 * 15881 * Arguments: arg - pointer to associated softstate for the device. 15882 * 15883 * Context: timeout(9F) thread context. May not sleep. 15884 */ 15885 static void 15886 sd_rmw_msg_print_handler(void *arg) 15887 { 15888 struct sd_lun *un = arg; 15889 15890 ASSERT(un != NULL); 15891 ASSERT(!mutex_owned(SD_MUTEX(un))); 15892 15893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15894 "sd_rmw_msg_print_handler: entry\n"); 15895 15896 mutex_enter(SD_MUTEX(un)); 15897 15898 if (un->un_rmw_incre_count > 0) { 15899 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15900 "%"PRIu64" I/O requests are not aligned with %d disk " 15901 "sector size in %ld seconds. They are handled through " 15902 "Read Modify Write but the performance is very low!\n", 15903 un->un_rmw_incre_count, un->un_tgt_blocksize, 15904 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 15905 un->un_rmw_incre_count = 0; 15906 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 15907 un, SD_RMW_MSG_PRINT_TIMEOUT); 15908 } else { 15909 un->un_rmw_msg_timeid = NULL; 15910 } 15911 15912 mutex_exit(SD_MUTEX(un)); 15913 15914 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15915 "sd_rmw_msg_print_handler: exit\n"); 15916 } 15917 15918 /* 15919 * Function: sd_start_direct_priority_command 15920 * 15921 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15922 * received TRAN_BUSY when we called scsi_transport() to send it 15923 * to the underlying HBA. This function is called from timeout(9F) 15924 * context after the delay interval has expired. 15925 * 15926 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15927 * 15928 * Context: timeout(9F) thread context. May not sleep. 15929 */ 15930 15931 static void 15932 sd_start_direct_priority_command(void *arg) 15933 { 15934 struct buf *priority_bp = arg; 15935 struct sd_lun *un; 15936 15937 ASSERT(priority_bp != NULL); 15938 un = SD_GET_UN(priority_bp); 15939 ASSERT(un != NULL); 15940 ASSERT(!mutex_owned(SD_MUTEX(un))); 15941 15942 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15943 "sd_start_direct_priority_command: entry\n"); 15944 15945 mutex_enter(SD_MUTEX(un)); 15946 un->un_direct_priority_timeid = NULL; 15947 sd_start_cmds(un, priority_bp); 15948 mutex_exit(SD_MUTEX(un)); 15949 15950 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15951 "sd_start_direct_priority_command: exit\n"); 15952 } 15953 15954 15955 /* 15956 * Function: sd_send_request_sense_command 15957 * 15958 * Description: Sends a REQUEST SENSE command to the target 15959 * 15960 * Context: May be called from interrupt context. 15961 */ 15962 15963 static void 15964 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15965 struct scsi_pkt *pktp) 15966 { 15967 ASSERT(bp != NULL); 15968 ASSERT(un != NULL); 15969 ASSERT(mutex_owned(SD_MUTEX(un))); 15970 15971 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15972 "entry: buf:0x%p\n", bp); 15973 15974 /* 15975 * If we are syncing or dumping, then fail the command to avoid a 15976 * recursive callback into scsi_transport(). Also fail the command 15977 * if we are suspended (legacy behavior). 15978 */ 15979 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15980 (un->un_state == SD_STATE_DUMPING)) { 15981 sd_return_failed_command(un, bp, EIO); 15982 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15983 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15984 return; 15985 } 15986 15987 /* 15988 * Retry the failed command and don't issue the request sense if: 15989 * 1) the sense buf is busy 15990 * 2) we have 1 or more outstanding commands on the target 15991 * (the sense data will be cleared or invalidated any way) 15992 * 15993 * Note: There could be an issue with not checking a retry limit here, 15994 * the problem is determining which retry limit to check. 15995 */ 15996 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15997 /* Don't retry if the command is flagged as non-retryable */ 15998 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15999 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16000 NULL, NULL, 0, un->un_busy_timeout, 16001 kstat_waitq_enter); 16002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16003 "sd_send_request_sense_command: " 16004 "at full throttle, retrying exit\n"); 16005 } else { 16006 sd_return_failed_command(un, bp, EIO); 16007 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16008 "sd_send_request_sense_command: " 16009 "at full throttle, non-retryable exit\n"); 16010 } 16011 return; 16012 } 16013 16014 sd_mark_rqs_busy(un, bp); 16015 sd_start_cmds(un, un->un_rqs_bp); 16016 16017 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16018 "sd_send_request_sense_command: exit\n"); 16019 } 16020 16021 16022 /* 16023 * Function: sd_mark_rqs_busy 16024 * 16025 * Description: Indicate that the request sense bp for this instance is 16026 * in use. 16027 * 16028 * Context: May be called under interrupt context 16029 */ 16030 16031 static void 16032 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16033 { 16034 struct sd_xbuf *sense_xp; 16035 16036 ASSERT(un != NULL); 16037 ASSERT(bp != NULL); 16038 ASSERT(mutex_owned(SD_MUTEX(un))); 16039 ASSERT(un->un_sense_isbusy == 0); 16040 16041 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16042 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16043 16044 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16045 ASSERT(sense_xp != NULL); 16046 16047 SD_INFO(SD_LOG_IO, un, 16048 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16049 16050 ASSERT(sense_xp->xb_pktp != NULL); 16051 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16052 == (FLAG_SENSING | FLAG_HEAD)); 16053 16054 un->un_sense_isbusy = 1; 16055 un->un_rqs_bp->b_resid = 0; 16056 sense_xp->xb_pktp->pkt_resid = 0; 16057 sense_xp->xb_pktp->pkt_reason = 0; 16058 16059 /* So we can get back the bp at interrupt time! */ 16060 sense_xp->xb_sense_bp = bp; 16061 16062 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16063 16064 /* 16065 * Mark this buf as awaiting sense data. (This is already set in 16066 * the pkt_flags for the RQS packet.) 16067 */ 16068 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16069 16070 /* Request sense down same path */ 16071 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16072 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16073 sense_xp->xb_pktp->pkt_path_instance = 16074 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16075 16076 sense_xp->xb_retry_count = 0; 16077 sense_xp->xb_victim_retry_count = 0; 16078 sense_xp->xb_ua_retry_count = 0; 16079 sense_xp->xb_nr_retry_count = 0; 16080 sense_xp->xb_dma_resid = 0; 16081 16082 /* Clean up the fields for auto-request sense */ 16083 sense_xp->xb_sense_status = 0; 16084 sense_xp->xb_sense_state = 0; 16085 sense_xp->xb_sense_resid = 0; 16086 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16087 16088 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16089 } 16090 16091 16092 /* 16093 * Function: sd_mark_rqs_idle 16094 * 16095 * Description: SD_MUTEX must be held continuously through this routine 16096 * to prevent reuse of the rqs struct before the caller can 16097 * complete it's processing. 16098 * 16099 * Return Code: Pointer to the RQS buf 16100 * 16101 * Context: May be called under interrupt context 16102 */ 16103 16104 static struct buf * 16105 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16106 { 16107 struct buf *bp; 16108 ASSERT(un != NULL); 16109 ASSERT(sense_xp != NULL); 16110 ASSERT(mutex_owned(SD_MUTEX(un))); 16111 ASSERT(un->un_sense_isbusy != 0); 16112 16113 un->un_sense_isbusy = 0; 16114 bp = sense_xp->xb_sense_bp; 16115 sense_xp->xb_sense_bp = NULL; 16116 16117 /* This pkt is no longer interested in getting sense data */ 16118 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16119 16120 return (bp); 16121 } 16122 16123 16124 16125 /* 16126 * Function: sd_alloc_rqs 16127 * 16128 * Description: Set up the unit to receive auto request sense data 16129 * 16130 * Return Code: DDI_SUCCESS or DDI_FAILURE 16131 * 16132 * Context: Called under attach(9E) context 16133 */ 16134 16135 static int 16136 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16137 { 16138 struct sd_xbuf *xp; 16139 16140 ASSERT(un != NULL); 16141 ASSERT(!mutex_owned(SD_MUTEX(un))); 16142 ASSERT(un->un_rqs_bp == NULL); 16143 ASSERT(un->un_rqs_pktp == NULL); 16144 16145 /* 16146 * First allocate the required buf and scsi_pkt structs, then set up 16147 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16148 */ 16149 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16150 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16151 if (un->un_rqs_bp == NULL) { 16152 return (DDI_FAILURE); 16153 } 16154 16155 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16156 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16157 16158 if (un->un_rqs_pktp == NULL) { 16159 sd_free_rqs(un); 16160 return (DDI_FAILURE); 16161 } 16162 16163 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16164 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16165 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16166 16167 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16168 16169 /* Set up the other needed members in the ARQ scsi_pkt. */ 16170 un->un_rqs_pktp->pkt_comp = sdintr; 16171 un->un_rqs_pktp->pkt_time = sd_io_time; 16172 un->un_rqs_pktp->pkt_flags |= 16173 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16174 16175 /* 16176 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16177 * provide any intpkt, destroypkt routines as we take care of 16178 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16179 */ 16180 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16181 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16182 xp->xb_pktp = un->un_rqs_pktp; 16183 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16184 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16185 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16186 16187 /* 16188 * Save the pointer to the request sense private bp so it can 16189 * be retrieved in sdintr. 16190 */ 16191 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16192 ASSERT(un->un_rqs_bp->b_private == xp); 16193 16194 /* 16195 * See if the HBA supports auto-request sense for the specified 16196 * target/lun. If it does, then try to enable it (if not already 16197 * enabled). 16198 * 16199 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16200 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16201 * return success. However, in both of these cases ARQ is always 16202 * enabled and scsi_ifgetcap will always return true. The best approach 16203 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16204 * 16205 * The 3rd case is the HBA (adp) always return enabled on 16206 * scsi_ifgetgetcap even when it's not enable, the best approach 16207 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16208 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16209 */ 16210 16211 if (un->un_f_is_fibre == TRUE) { 16212 un->un_f_arq_enabled = TRUE; 16213 } else { 16214 #if defined(__i386) || defined(__amd64) 16215 /* 16216 * Circumvent the Adaptec bug, remove this code when 16217 * the bug is fixed 16218 */ 16219 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16220 #endif 16221 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16222 case 0: 16223 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16224 "sd_alloc_rqs: HBA supports ARQ\n"); 16225 /* 16226 * ARQ is supported by this HBA but currently is not 16227 * enabled. Attempt to enable it and if successful then 16228 * mark this instance as ARQ enabled. 16229 */ 16230 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16231 == 1) { 16232 /* Successfully enabled ARQ in the HBA */ 16233 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16234 "sd_alloc_rqs: ARQ enabled\n"); 16235 un->un_f_arq_enabled = TRUE; 16236 } else { 16237 /* Could not enable ARQ in the HBA */ 16238 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16239 "sd_alloc_rqs: failed ARQ enable\n"); 16240 un->un_f_arq_enabled = FALSE; 16241 } 16242 break; 16243 case 1: 16244 /* 16245 * ARQ is supported by this HBA and is already enabled. 16246 * Just mark ARQ as enabled for this instance. 16247 */ 16248 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16249 "sd_alloc_rqs: ARQ already enabled\n"); 16250 un->un_f_arq_enabled = TRUE; 16251 break; 16252 default: 16253 /* 16254 * ARQ is not supported by this HBA; disable it for this 16255 * instance. 16256 */ 16257 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16258 "sd_alloc_rqs: HBA does not support ARQ\n"); 16259 un->un_f_arq_enabled = FALSE; 16260 break; 16261 } 16262 } 16263 16264 return (DDI_SUCCESS); 16265 } 16266 16267 16268 /* 16269 * Function: sd_free_rqs 16270 * 16271 * Description: Cleanup for the pre-instance RQS command. 16272 * 16273 * Context: Kernel thread context 16274 */ 16275 16276 static void 16277 sd_free_rqs(struct sd_lun *un) 16278 { 16279 ASSERT(un != NULL); 16280 16281 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16282 16283 /* 16284 * If consistent memory is bound to a scsi_pkt, the pkt 16285 * has to be destroyed *before* freeing the consistent memory. 16286 * Don't change the sequence of this operations. 16287 * scsi_destroy_pkt() might access memory, which isn't allowed, 16288 * after it was freed in scsi_free_consistent_buf(). 16289 */ 16290 if (un->un_rqs_pktp != NULL) { 16291 scsi_destroy_pkt(un->un_rqs_pktp); 16292 un->un_rqs_pktp = NULL; 16293 } 16294 16295 if (un->un_rqs_bp != NULL) { 16296 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16297 if (xp != NULL) { 16298 kmem_free(xp, sizeof (struct sd_xbuf)); 16299 } 16300 scsi_free_consistent_buf(un->un_rqs_bp); 16301 un->un_rqs_bp = NULL; 16302 } 16303 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16304 } 16305 16306 16307 16308 /* 16309 * Function: sd_reduce_throttle 16310 * 16311 * Description: Reduces the maximum # of outstanding commands on a 16312 * target to the current number of outstanding commands. 16313 * Queues a tiemout(9F) callback to restore the limit 16314 * after a specified interval has elapsed. 16315 * Typically used when we get a TRAN_BUSY return code 16316 * back from scsi_transport(). 16317 * 16318 * Arguments: un - ptr to the sd_lun softstate struct 16319 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16320 * 16321 * Context: May be called from interrupt context 16322 */ 16323 16324 static void 16325 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16326 { 16327 ASSERT(un != NULL); 16328 ASSERT(mutex_owned(SD_MUTEX(un))); 16329 ASSERT(un->un_ncmds_in_transport >= 0); 16330 16331 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16332 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16333 un, un->un_throttle, un->un_ncmds_in_transport); 16334 16335 if (un->un_throttle > 1) { 16336 if (un->un_f_use_adaptive_throttle == TRUE) { 16337 switch (throttle_type) { 16338 case SD_THROTTLE_TRAN_BUSY: 16339 if (un->un_busy_throttle == 0) { 16340 un->un_busy_throttle = un->un_throttle; 16341 } 16342 break; 16343 case SD_THROTTLE_QFULL: 16344 un->un_busy_throttle = 0; 16345 break; 16346 default: 16347 ASSERT(FALSE); 16348 } 16349 16350 if (un->un_ncmds_in_transport > 0) { 16351 un->un_throttle = un->un_ncmds_in_transport; 16352 } 16353 16354 } else { 16355 if (un->un_ncmds_in_transport == 0) { 16356 un->un_throttle = 1; 16357 } else { 16358 un->un_throttle = un->un_ncmds_in_transport; 16359 } 16360 } 16361 } 16362 16363 /* Reschedule the timeout if none is currently active */ 16364 if (un->un_reset_throttle_timeid == NULL) { 16365 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16366 un, SD_THROTTLE_RESET_INTERVAL); 16367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16368 "sd_reduce_throttle: timeout scheduled!\n"); 16369 } 16370 16371 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16372 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16373 } 16374 16375 16376 16377 /* 16378 * Function: sd_restore_throttle 16379 * 16380 * Description: Callback function for timeout(9F). Resets the current 16381 * value of un->un_throttle to its default. 16382 * 16383 * Arguments: arg - pointer to associated softstate for the device. 16384 * 16385 * Context: May be called from interrupt context 16386 */ 16387 16388 static void 16389 sd_restore_throttle(void *arg) 16390 { 16391 struct sd_lun *un = arg; 16392 16393 ASSERT(un != NULL); 16394 ASSERT(!mutex_owned(SD_MUTEX(un))); 16395 16396 mutex_enter(SD_MUTEX(un)); 16397 16398 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16399 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16400 16401 un->un_reset_throttle_timeid = NULL; 16402 16403 if (un->un_f_use_adaptive_throttle == TRUE) { 16404 /* 16405 * If un_busy_throttle is nonzero, then it contains the 16406 * value that un_throttle was when we got a TRAN_BUSY back 16407 * from scsi_transport(). We want to revert back to this 16408 * value. 16409 * 16410 * In the QFULL case, the throttle limit will incrementally 16411 * increase until it reaches max throttle. 16412 */ 16413 if (un->un_busy_throttle > 0) { 16414 un->un_throttle = un->un_busy_throttle; 16415 un->un_busy_throttle = 0; 16416 } else { 16417 /* 16418 * increase throttle by 10% open gate slowly, schedule 16419 * another restore if saved throttle has not been 16420 * reached 16421 */ 16422 short throttle; 16423 if (sd_qfull_throttle_enable) { 16424 throttle = un->un_throttle + 16425 max((un->un_throttle / 10), 1); 16426 un->un_throttle = 16427 (throttle < un->un_saved_throttle) ? 16428 throttle : un->un_saved_throttle; 16429 if (un->un_throttle < un->un_saved_throttle) { 16430 un->un_reset_throttle_timeid = 16431 timeout(sd_restore_throttle, 16432 un, 16433 SD_QFULL_THROTTLE_RESET_INTERVAL); 16434 } 16435 } 16436 } 16437 16438 /* 16439 * If un_throttle has fallen below the low-water mark, we 16440 * restore the maximum value here (and allow it to ratchet 16441 * down again if necessary). 16442 */ 16443 if (un->un_throttle < un->un_min_throttle) { 16444 un->un_throttle = un->un_saved_throttle; 16445 } 16446 } else { 16447 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16448 "restoring limit from 0x%x to 0x%x\n", 16449 un->un_throttle, un->un_saved_throttle); 16450 un->un_throttle = un->un_saved_throttle; 16451 } 16452 16453 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16454 "sd_restore_throttle: calling sd_start_cmds!\n"); 16455 16456 sd_start_cmds(un, NULL); 16457 16458 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16459 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16460 un, un->un_throttle); 16461 16462 mutex_exit(SD_MUTEX(un)); 16463 16464 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16465 } 16466 16467 /* 16468 * Function: sdrunout 16469 * 16470 * Description: Callback routine for scsi_init_pkt when a resource allocation 16471 * fails. 16472 * 16473 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16474 * soft state instance. 16475 * 16476 * Return Code: The scsi_init_pkt routine allows for the callback function to 16477 * return a 0 indicating the callback should be rescheduled or a 1 16478 * indicating not to reschedule. This routine always returns 1 16479 * because the driver always provides a callback function to 16480 * scsi_init_pkt. This results in a callback always being scheduled 16481 * (via the scsi_init_pkt callback implementation) if a resource 16482 * failure occurs. 16483 * 16484 * Context: This callback function may not block or call routines that block 16485 * 16486 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16487 * request persisting at the head of the list which cannot be 16488 * satisfied even after multiple retries. In the future the driver 16489 * may implement some time of maximum runout count before failing 16490 * an I/O. 16491 */ 16492 16493 static int 16494 sdrunout(caddr_t arg) 16495 { 16496 struct sd_lun *un = (struct sd_lun *)arg; 16497 16498 ASSERT(un != NULL); 16499 ASSERT(!mutex_owned(SD_MUTEX(un))); 16500 16501 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16502 16503 mutex_enter(SD_MUTEX(un)); 16504 sd_start_cmds(un, NULL); 16505 mutex_exit(SD_MUTEX(un)); 16506 /* 16507 * This callback routine always returns 1 (i.e. do not reschedule) 16508 * because we always specify sdrunout as the callback handler for 16509 * scsi_init_pkt inside the call to sd_start_cmds. 16510 */ 16511 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16512 return (1); 16513 } 16514 16515 16516 /* 16517 * Function: sdintr 16518 * 16519 * Description: Completion callback routine for scsi_pkt(9S) structs 16520 * sent to the HBA driver via scsi_transport(9F). 16521 * 16522 * Context: Interrupt context 16523 */ 16524 16525 static void 16526 sdintr(struct scsi_pkt *pktp) 16527 { 16528 struct buf *bp; 16529 struct sd_xbuf *xp; 16530 struct sd_lun *un; 16531 size_t actual_len; 16532 sd_ssc_t *sscp; 16533 16534 ASSERT(pktp != NULL); 16535 bp = (struct buf *)pktp->pkt_private; 16536 ASSERT(bp != NULL); 16537 xp = SD_GET_XBUF(bp); 16538 ASSERT(xp != NULL); 16539 ASSERT(xp->xb_pktp != NULL); 16540 un = SD_GET_UN(bp); 16541 ASSERT(un != NULL); 16542 ASSERT(!mutex_owned(SD_MUTEX(un))); 16543 16544 #ifdef SD_FAULT_INJECTION 16545 16546 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16547 /* SD FaultInjection */ 16548 sd_faultinjection(pktp); 16549 16550 #endif /* SD_FAULT_INJECTION */ 16551 16552 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16553 " xp:0x%p, un:0x%p\n", bp, xp, un); 16554 16555 mutex_enter(SD_MUTEX(un)); 16556 16557 ASSERT(un->un_fm_private != NULL); 16558 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16559 ASSERT(sscp != NULL); 16560 16561 /* Reduce the count of the #commands currently in transport */ 16562 un->un_ncmds_in_transport--; 16563 ASSERT(un->un_ncmds_in_transport >= 0); 16564 16565 /* Increment counter to indicate that the callback routine is active */ 16566 un->un_in_callback++; 16567 16568 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16569 16570 #ifdef SDDEBUG 16571 if (bp == un->un_retry_bp) { 16572 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16573 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16574 un, un->un_retry_bp, un->un_ncmds_in_transport); 16575 } 16576 #endif 16577 16578 /* 16579 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16580 * state if needed. 16581 */ 16582 if (pktp->pkt_reason == CMD_DEV_GONE) { 16583 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16584 "Command failed to complete...Device is gone\n"); 16585 if (un->un_mediastate != DKIO_DEV_GONE) { 16586 un->un_mediastate = DKIO_DEV_GONE; 16587 cv_broadcast(&un->un_state_cv); 16588 } 16589 sd_return_failed_command(un, bp, EIO); 16590 goto exit; 16591 } 16592 16593 if (pktp->pkt_state & STATE_XARQ_DONE) { 16594 SD_TRACE(SD_LOG_COMMON, un, 16595 "sdintr: extra sense data received. pkt=%p\n", pktp); 16596 } 16597 16598 /* 16599 * First see if the pkt has auto-request sense data with it.... 16600 * Look at the packet state first so we don't take a performance 16601 * hit looking at the arq enabled flag unless absolutely necessary. 16602 */ 16603 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16604 (un->un_f_arq_enabled == TRUE)) { 16605 /* 16606 * The HBA did an auto request sense for this command so check 16607 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16608 * driver command that should not be retried. 16609 */ 16610 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16611 /* 16612 * Save the relevant sense info into the xp for the 16613 * original cmd. 16614 */ 16615 struct scsi_arq_status *asp; 16616 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16617 xp->xb_sense_status = 16618 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16619 xp->xb_sense_state = asp->sts_rqpkt_state; 16620 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16621 if (pktp->pkt_state & STATE_XARQ_DONE) { 16622 actual_len = MAX_SENSE_LENGTH - 16623 xp->xb_sense_resid; 16624 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16625 MAX_SENSE_LENGTH); 16626 } else { 16627 if (xp->xb_sense_resid > SENSE_LENGTH) { 16628 actual_len = MAX_SENSE_LENGTH - 16629 xp->xb_sense_resid; 16630 } else { 16631 actual_len = SENSE_LENGTH - 16632 xp->xb_sense_resid; 16633 } 16634 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16635 if ((((struct uscsi_cmd *) 16636 (xp->xb_pktinfo))->uscsi_rqlen) > 16637 actual_len) { 16638 xp->xb_sense_resid = 16639 (((struct uscsi_cmd *) 16640 (xp->xb_pktinfo))-> 16641 uscsi_rqlen) - actual_len; 16642 } else { 16643 xp->xb_sense_resid = 0; 16644 } 16645 } 16646 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16647 SENSE_LENGTH); 16648 } 16649 16650 /* fail the command */ 16651 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16652 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16653 sd_return_failed_command(un, bp, EIO); 16654 goto exit; 16655 } 16656 16657 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16658 /* 16659 * We want to either retry or fail this command, so free 16660 * the DMA resources here. If we retry the command then 16661 * the DMA resources will be reallocated in sd_start_cmds(). 16662 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16663 * causes the *entire* transfer to start over again from the 16664 * beginning of the request, even for PARTIAL chunks that 16665 * have already transferred successfully. 16666 */ 16667 if ((un->un_f_is_fibre == TRUE) && 16668 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16669 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16670 scsi_dmafree(pktp); 16671 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16672 } 16673 #endif 16674 16675 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16676 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16677 16678 sd_handle_auto_request_sense(un, bp, xp, pktp); 16679 goto exit; 16680 } 16681 16682 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16683 if (pktp->pkt_flags & FLAG_SENSING) { 16684 /* This pktp is from the unit's REQUEST_SENSE command */ 16685 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16686 "sdintr: sd_handle_request_sense\n"); 16687 sd_handle_request_sense(un, bp, xp, pktp); 16688 goto exit; 16689 } 16690 16691 /* 16692 * Check to see if the command successfully completed as requested; 16693 * this is the most common case (and also the hot performance path). 16694 * 16695 * Requirements for successful completion are: 16696 * pkt_reason is CMD_CMPLT and packet status is status good. 16697 * In addition: 16698 * - A residual of zero indicates successful completion no matter what 16699 * the command is. 16700 * - If the residual is not zero and the command is not a read or 16701 * write, then it's still defined as successful completion. In other 16702 * words, if the command is a read or write the residual must be 16703 * zero for successful completion. 16704 * - If the residual is not zero and the command is a read or 16705 * write, and it's a USCSICMD, then it's still defined as 16706 * successful completion. 16707 */ 16708 if ((pktp->pkt_reason == CMD_CMPLT) && 16709 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16710 16711 /* 16712 * Since this command is returned with a good status, we 16713 * can reset the count for Sonoma failover. 16714 */ 16715 un->un_sonoma_failure_count = 0; 16716 16717 /* 16718 * Return all USCSI commands on good status 16719 */ 16720 if (pktp->pkt_resid == 0) { 16721 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16722 "sdintr: returning command for resid == 0\n"); 16723 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16724 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16725 SD_UPDATE_B_RESID(bp, pktp); 16726 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16727 "sdintr: returning command for resid != 0\n"); 16728 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16729 SD_UPDATE_B_RESID(bp, pktp); 16730 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16731 "sdintr: returning uscsi command\n"); 16732 } else { 16733 goto not_successful; 16734 } 16735 sd_return_command(un, bp); 16736 16737 /* 16738 * Decrement counter to indicate that the callback routine 16739 * is done. 16740 */ 16741 un->un_in_callback--; 16742 ASSERT(un->un_in_callback >= 0); 16743 mutex_exit(SD_MUTEX(un)); 16744 16745 return; 16746 } 16747 16748 not_successful: 16749 16750 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16751 /* 16752 * The following is based upon knowledge of the underlying transport 16753 * and its use of DMA resources. This code should be removed when 16754 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16755 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16756 * and sd_start_cmds(). 16757 * 16758 * Free any DMA resources associated with this command if there 16759 * is a chance it could be retried or enqueued for later retry. 16760 * If we keep the DMA binding then mpxio cannot reissue the 16761 * command on another path whenever a path failure occurs. 16762 * 16763 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16764 * causes the *entire* transfer to start over again from the 16765 * beginning of the request, even for PARTIAL chunks that 16766 * have already transferred successfully. 16767 * 16768 * This is only done for non-uscsi commands (and also skipped for the 16769 * driver's internal RQS command). Also just do this for Fibre Channel 16770 * devices as these are the only ones that support mpxio. 16771 */ 16772 if ((un->un_f_is_fibre == TRUE) && 16773 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16774 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16775 scsi_dmafree(pktp); 16776 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16777 } 16778 #endif 16779 16780 /* 16781 * The command did not successfully complete as requested so check 16782 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16783 * driver command that should not be retried so just return. If 16784 * FLAG_DIAGNOSE is not set the error will be processed below. 16785 */ 16786 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16787 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16788 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16789 /* 16790 * Issue a request sense if a check condition caused the error 16791 * (we handle the auto request sense case above), otherwise 16792 * just fail the command. 16793 */ 16794 if ((pktp->pkt_reason == CMD_CMPLT) && 16795 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16796 sd_send_request_sense_command(un, bp, pktp); 16797 } else { 16798 sd_return_failed_command(un, bp, EIO); 16799 } 16800 goto exit; 16801 } 16802 16803 /* 16804 * The command did not successfully complete as requested so process 16805 * the error, retry, and/or attempt recovery. 16806 */ 16807 switch (pktp->pkt_reason) { 16808 case CMD_CMPLT: 16809 switch (SD_GET_PKT_STATUS(pktp)) { 16810 case STATUS_GOOD: 16811 /* 16812 * The command completed successfully with a non-zero 16813 * residual 16814 */ 16815 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16816 "sdintr: STATUS_GOOD \n"); 16817 sd_pkt_status_good(un, bp, xp, pktp); 16818 break; 16819 16820 case STATUS_CHECK: 16821 case STATUS_TERMINATED: 16822 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16823 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16824 sd_pkt_status_check_condition(un, bp, xp, pktp); 16825 break; 16826 16827 case STATUS_BUSY: 16828 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16829 "sdintr: STATUS_BUSY\n"); 16830 sd_pkt_status_busy(un, bp, xp, pktp); 16831 break; 16832 16833 case STATUS_RESERVATION_CONFLICT: 16834 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16835 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16836 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16837 break; 16838 16839 case STATUS_QFULL: 16840 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16841 "sdintr: STATUS_QFULL\n"); 16842 sd_pkt_status_qfull(un, bp, xp, pktp); 16843 break; 16844 16845 case STATUS_MET: 16846 case STATUS_INTERMEDIATE: 16847 case STATUS_SCSI2: 16848 case STATUS_INTERMEDIATE_MET: 16849 case STATUS_ACA_ACTIVE: 16850 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16851 "Unexpected SCSI status received: 0x%x\n", 16852 SD_GET_PKT_STATUS(pktp)); 16853 /* 16854 * Mark the ssc_flags when detected invalid status 16855 * code for non-USCSI command. 16856 */ 16857 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16858 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16859 0, "stat-code"); 16860 } 16861 sd_return_failed_command(un, bp, EIO); 16862 break; 16863 16864 default: 16865 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16866 "Invalid SCSI status received: 0x%x\n", 16867 SD_GET_PKT_STATUS(pktp)); 16868 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16869 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16870 0, "stat-code"); 16871 } 16872 sd_return_failed_command(un, bp, EIO); 16873 break; 16874 16875 } 16876 break; 16877 16878 case CMD_INCOMPLETE: 16879 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16880 "sdintr: CMD_INCOMPLETE\n"); 16881 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16882 break; 16883 case CMD_TRAN_ERR: 16884 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16885 "sdintr: CMD_TRAN_ERR\n"); 16886 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16887 break; 16888 case CMD_RESET: 16889 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16890 "sdintr: CMD_RESET \n"); 16891 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16892 break; 16893 case CMD_ABORTED: 16894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16895 "sdintr: CMD_ABORTED \n"); 16896 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16897 break; 16898 case CMD_TIMEOUT: 16899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16900 "sdintr: CMD_TIMEOUT\n"); 16901 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16902 break; 16903 case CMD_UNX_BUS_FREE: 16904 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16905 "sdintr: CMD_UNX_BUS_FREE \n"); 16906 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16907 break; 16908 case CMD_TAG_REJECT: 16909 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16910 "sdintr: CMD_TAG_REJECT\n"); 16911 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16912 break; 16913 default: 16914 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16915 "sdintr: default\n"); 16916 /* 16917 * Mark the ssc_flags for detecting invliad pkt_reason. 16918 */ 16919 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16920 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16921 0, "pkt-reason"); 16922 } 16923 sd_pkt_reason_default(un, bp, xp, pktp); 16924 break; 16925 } 16926 16927 exit: 16928 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16929 16930 /* Decrement counter to indicate that the callback routine is done. */ 16931 un->un_in_callback--; 16932 ASSERT(un->un_in_callback >= 0); 16933 16934 /* 16935 * At this point, the pkt has been dispatched, ie, it is either 16936 * being re-tried or has been returned to its caller and should 16937 * not be referenced. 16938 */ 16939 16940 mutex_exit(SD_MUTEX(un)); 16941 } 16942 16943 16944 /* 16945 * Function: sd_print_incomplete_msg 16946 * 16947 * Description: Prints the error message for a CMD_INCOMPLETE error. 16948 * 16949 * Arguments: un - ptr to associated softstate for the device. 16950 * bp - ptr to the buf(9S) for the command. 16951 * arg - message string ptr 16952 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16953 * or SD_NO_RETRY_ISSUED. 16954 * 16955 * Context: May be called under interrupt context 16956 */ 16957 16958 static void 16959 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16960 { 16961 struct scsi_pkt *pktp; 16962 char *msgp; 16963 char *cmdp = arg; 16964 16965 ASSERT(un != NULL); 16966 ASSERT(mutex_owned(SD_MUTEX(un))); 16967 ASSERT(bp != NULL); 16968 ASSERT(arg != NULL); 16969 pktp = SD_GET_PKTP(bp); 16970 ASSERT(pktp != NULL); 16971 16972 switch (code) { 16973 case SD_DELAYED_RETRY_ISSUED: 16974 case SD_IMMEDIATE_RETRY_ISSUED: 16975 msgp = "retrying"; 16976 break; 16977 case SD_NO_RETRY_ISSUED: 16978 default: 16979 msgp = "giving up"; 16980 break; 16981 } 16982 16983 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16984 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16985 "incomplete %s- %s\n", cmdp, msgp); 16986 } 16987 } 16988 16989 16990 16991 /* 16992 * Function: sd_pkt_status_good 16993 * 16994 * Description: Processing for a STATUS_GOOD code in pkt_status. 16995 * 16996 * Context: May be called under interrupt context 16997 */ 16998 16999 static void 17000 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17001 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17002 { 17003 char *cmdp; 17004 17005 ASSERT(un != NULL); 17006 ASSERT(mutex_owned(SD_MUTEX(un))); 17007 ASSERT(bp != NULL); 17008 ASSERT(xp != NULL); 17009 ASSERT(pktp != NULL); 17010 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17011 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17012 ASSERT(pktp->pkt_resid != 0); 17013 17014 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17015 17016 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17017 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17018 case SCMD_READ: 17019 cmdp = "read"; 17020 break; 17021 case SCMD_WRITE: 17022 cmdp = "write"; 17023 break; 17024 default: 17025 SD_UPDATE_B_RESID(bp, pktp); 17026 sd_return_command(un, bp); 17027 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17028 return; 17029 } 17030 17031 /* 17032 * See if we can retry the read/write, preferrably immediately. 17033 * If retries are exhaused, then sd_retry_command() will update 17034 * the b_resid count. 17035 */ 17036 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17037 cmdp, EIO, (clock_t)0, NULL); 17038 17039 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17040 } 17041 17042 17043 17044 17045 17046 /* 17047 * Function: sd_handle_request_sense 17048 * 17049 * Description: Processing for non-auto Request Sense command. 17050 * 17051 * Arguments: un - ptr to associated softstate 17052 * sense_bp - ptr to buf(9S) for the RQS command 17053 * sense_xp - ptr to the sd_xbuf for the RQS command 17054 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17055 * 17056 * Context: May be called under interrupt context 17057 */ 17058 17059 static void 17060 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17061 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17062 { 17063 struct buf *cmd_bp; /* buf for the original command */ 17064 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17065 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17066 size_t actual_len; /* actual sense data length */ 17067 17068 ASSERT(un != NULL); 17069 ASSERT(mutex_owned(SD_MUTEX(un))); 17070 ASSERT(sense_bp != NULL); 17071 ASSERT(sense_xp != NULL); 17072 ASSERT(sense_pktp != NULL); 17073 17074 /* 17075 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17076 * RQS command and not the original command. 17077 */ 17078 ASSERT(sense_pktp == un->un_rqs_pktp); 17079 ASSERT(sense_bp == un->un_rqs_bp); 17080 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17081 (FLAG_SENSING | FLAG_HEAD)); 17082 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17083 FLAG_SENSING) == FLAG_SENSING); 17084 17085 /* These are the bp, xp, and pktp for the original command */ 17086 cmd_bp = sense_xp->xb_sense_bp; 17087 cmd_xp = SD_GET_XBUF(cmd_bp); 17088 cmd_pktp = SD_GET_PKTP(cmd_bp); 17089 17090 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17091 /* 17092 * The REQUEST SENSE command failed. Release the REQUEST 17093 * SENSE command for re-use, get back the bp for the original 17094 * command, and attempt to re-try the original command if 17095 * FLAG_DIAGNOSE is not set in the original packet. 17096 */ 17097 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17098 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17099 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17100 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17101 NULL, NULL, EIO, (clock_t)0, NULL); 17102 return; 17103 } 17104 } 17105 17106 /* 17107 * Save the relevant sense info into the xp for the original cmd. 17108 * 17109 * Note: if the request sense failed the state info will be zero 17110 * as set in sd_mark_rqs_busy() 17111 */ 17112 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17113 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17114 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17115 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17116 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17117 SENSE_LENGTH)) { 17118 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17119 MAX_SENSE_LENGTH); 17120 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17121 } else { 17122 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17123 SENSE_LENGTH); 17124 if (actual_len < SENSE_LENGTH) { 17125 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17126 } else { 17127 cmd_xp->xb_sense_resid = 0; 17128 } 17129 } 17130 17131 /* 17132 * Free up the RQS command.... 17133 * NOTE: 17134 * Must do this BEFORE calling sd_validate_sense_data! 17135 * sd_validate_sense_data may return the original command in 17136 * which case the pkt will be freed and the flags can no 17137 * longer be touched. 17138 * SD_MUTEX is held through this process until the command 17139 * is dispatched based upon the sense data, so there are 17140 * no race conditions. 17141 */ 17142 (void) sd_mark_rqs_idle(un, sense_xp); 17143 17144 /* 17145 * For a retryable command see if we have valid sense data, if so then 17146 * turn it over to sd_decode_sense() to figure out the right course of 17147 * action. Just fail a non-retryable command. 17148 */ 17149 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17150 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17151 SD_SENSE_DATA_IS_VALID) { 17152 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17153 } 17154 } else { 17155 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17156 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17157 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17158 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17159 sd_return_failed_command(un, cmd_bp, EIO); 17160 } 17161 } 17162 17163 17164 17165 17166 /* 17167 * Function: sd_handle_auto_request_sense 17168 * 17169 * Description: Processing for auto-request sense information. 17170 * 17171 * Arguments: un - ptr to associated softstate 17172 * bp - ptr to buf(9S) for the command 17173 * xp - ptr to the sd_xbuf for the command 17174 * pktp - ptr to the scsi_pkt(9S) for the command 17175 * 17176 * Context: May be called under interrupt context 17177 */ 17178 17179 static void 17180 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17181 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17182 { 17183 struct scsi_arq_status *asp; 17184 size_t actual_len; 17185 17186 ASSERT(un != NULL); 17187 ASSERT(mutex_owned(SD_MUTEX(un))); 17188 ASSERT(bp != NULL); 17189 ASSERT(xp != NULL); 17190 ASSERT(pktp != NULL); 17191 ASSERT(pktp != un->un_rqs_pktp); 17192 ASSERT(bp != un->un_rqs_bp); 17193 17194 /* 17195 * For auto-request sense, we get a scsi_arq_status back from 17196 * the HBA, with the sense data in the sts_sensedata member. 17197 * The pkt_scbp of the packet points to this scsi_arq_status. 17198 */ 17199 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17200 17201 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17202 /* 17203 * The auto REQUEST SENSE failed; see if we can re-try 17204 * the original command. 17205 */ 17206 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17207 "auto request sense failed (reason=%s)\n", 17208 scsi_rname(asp->sts_rqpkt_reason)); 17209 17210 sd_reset_target(un, pktp); 17211 17212 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17213 NULL, NULL, EIO, (clock_t)0, NULL); 17214 return; 17215 } 17216 17217 /* Save the relevant sense info into the xp for the original cmd. */ 17218 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17219 xp->xb_sense_state = asp->sts_rqpkt_state; 17220 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17221 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17222 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17223 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17224 MAX_SENSE_LENGTH); 17225 } else { 17226 if (xp->xb_sense_resid > SENSE_LENGTH) { 17227 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17228 } else { 17229 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17230 } 17231 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17232 if ((((struct uscsi_cmd *) 17233 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17234 xp->xb_sense_resid = (((struct uscsi_cmd *) 17235 (xp->xb_pktinfo))->uscsi_rqlen) - 17236 actual_len; 17237 } else { 17238 xp->xb_sense_resid = 0; 17239 } 17240 } 17241 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17242 } 17243 17244 /* 17245 * See if we have valid sense data, if so then turn it over to 17246 * sd_decode_sense() to figure out the right course of action. 17247 */ 17248 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17249 SD_SENSE_DATA_IS_VALID) { 17250 sd_decode_sense(un, bp, xp, pktp); 17251 } 17252 } 17253 17254 17255 /* 17256 * Function: sd_print_sense_failed_msg 17257 * 17258 * Description: Print log message when RQS has failed. 17259 * 17260 * Arguments: un - ptr to associated softstate 17261 * bp - ptr to buf(9S) for the command 17262 * arg - generic message string ptr 17263 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17264 * or SD_NO_RETRY_ISSUED 17265 * 17266 * Context: May be called from interrupt context 17267 */ 17268 17269 static void 17270 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17271 int code) 17272 { 17273 char *msgp = arg; 17274 17275 ASSERT(un != NULL); 17276 ASSERT(mutex_owned(SD_MUTEX(un))); 17277 ASSERT(bp != NULL); 17278 17279 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17280 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17281 } 17282 } 17283 17284 17285 /* 17286 * Function: sd_validate_sense_data 17287 * 17288 * Description: Check the given sense data for validity. 17289 * If the sense data is not valid, the command will 17290 * be either failed or retried! 17291 * 17292 * Return Code: SD_SENSE_DATA_IS_INVALID 17293 * SD_SENSE_DATA_IS_VALID 17294 * 17295 * Context: May be called from interrupt context 17296 */ 17297 17298 static int 17299 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17300 size_t actual_len) 17301 { 17302 struct scsi_extended_sense *esp; 17303 struct scsi_pkt *pktp; 17304 char *msgp = NULL; 17305 sd_ssc_t *sscp; 17306 17307 ASSERT(un != NULL); 17308 ASSERT(mutex_owned(SD_MUTEX(un))); 17309 ASSERT(bp != NULL); 17310 ASSERT(bp != un->un_rqs_bp); 17311 ASSERT(xp != NULL); 17312 ASSERT(un->un_fm_private != NULL); 17313 17314 pktp = SD_GET_PKTP(bp); 17315 ASSERT(pktp != NULL); 17316 17317 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17318 ASSERT(sscp != NULL); 17319 17320 /* 17321 * Check the status of the RQS command (auto or manual). 17322 */ 17323 switch (xp->xb_sense_status & STATUS_MASK) { 17324 case STATUS_GOOD: 17325 break; 17326 17327 case STATUS_RESERVATION_CONFLICT: 17328 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17329 return (SD_SENSE_DATA_IS_INVALID); 17330 17331 case STATUS_BUSY: 17332 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17333 "Busy Status on REQUEST SENSE\n"); 17334 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17335 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17336 return (SD_SENSE_DATA_IS_INVALID); 17337 17338 case STATUS_QFULL: 17339 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17340 "QFULL Status on REQUEST SENSE\n"); 17341 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17342 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17343 return (SD_SENSE_DATA_IS_INVALID); 17344 17345 case STATUS_CHECK: 17346 case STATUS_TERMINATED: 17347 msgp = "Check Condition on REQUEST SENSE\n"; 17348 goto sense_failed; 17349 17350 default: 17351 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17352 goto sense_failed; 17353 } 17354 17355 /* 17356 * See if we got the minimum required amount of sense data. 17357 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17358 * or less. 17359 */ 17360 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17361 (actual_len == 0)) { 17362 msgp = "Request Sense couldn't get sense data\n"; 17363 goto sense_failed; 17364 } 17365 17366 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17367 msgp = "Not enough sense information\n"; 17368 /* Mark the ssc_flags for detecting invalid sense data */ 17369 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17370 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17371 "sense-data"); 17372 } 17373 goto sense_failed; 17374 } 17375 17376 /* 17377 * We require the extended sense data 17378 */ 17379 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17380 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17381 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17382 static char tmp[8]; 17383 static char buf[148]; 17384 char *p = (char *)(xp->xb_sense_data); 17385 int i; 17386 17387 mutex_enter(&sd_sense_mutex); 17388 (void) strcpy(buf, "undecodable sense information:"); 17389 for (i = 0; i < actual_len; i++) { 17390 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17391 (void) strcpy(&buf[strlen(buf)], tmp); 17392 } 17393 i = strlen(buf); 17394 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17395 17396 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17397 scsi_log(SD_DEVINFO(un), sd_label, 17398 CE_WARN, buf); 17399 } 17400 mutex_exit(&sd_sense_mutex); 17401 } 17402 17403 /* Mark the ssc_flags for detecting invalid sense data */ 17404 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17405 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17406 "sense-data"); 17407 } 17408 17409 /* Note: Legacy behavior, fail the command with no retry */ 17410 sd_return_failed_command(un, bp, EIO); 17411 return (SD_SENSE_DATA_IS_INVALID); 17412 } 17413 17414 /* 17415 * Check that es_code is valid (es_class concatenated with es_code 17416 * make up the "response code" field. es_class will always be 7, so 17417 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17418 * format. 17419 */ 17420 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17421 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17422 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17423 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17424 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17425 /* Mark the ssc_flags for detecting invalid sense data */ 17426 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17427 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17428 "sense-data"); 17429 } 17430 goto sense_failed; 17431 } 17432 17433 return (SD_SENSE_DATA_IS_VALID); 17434 17435 sense_failed: 17436 /* 17437 * If the request sense failed (for whatever reason), attempt 17438 * to retry the original command. 17439 */ 17440 #if defined(__i386) || defined(__amd64) 17441 /* 17442 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17443 * sddef.h for Sparc platform, and x86 uses 1 binary 17444 * for both SCSI/FC. 17445 * The SD_RETRY_DELAY value need to be adjusted here 17446 * when SD_RETRY_DELAY change in sddef.h 17447 */ 17448 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17449 sd_print_sense_failed_msg, msgp, EIO, 17450 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17451 #else 17452 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17453 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17454 #endif 17455 17456 return (SD_SENSE_DATA_IS_INVALID); 17457 } 17458 17459 /* 17460 * Function: sd_decode_sense 17461 * 17462 * Description: Take recovery action(s) when SCSI Sense Data is received. 17463 * 17464 * Context: Interrupt context. 17465 */ 17466 17467 static void 17468 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17469 struct scsi_pkt *pktp) 17470 { 17471 uint8_t sense_key; 17472 17473 ASSERT(un != NULL); 17474 ASSERT(mutex_owned(SD_MUTEX(un))); 17475 ASSERT(bp != NULL); 17476 ASSERT(bp != un->un_rqs_bp); 17477 ASSERT(xp != NULL); 17478 ASSERT(pktp != NULL); 17479 17480 sense_key = scsi_sense_key(xp->xb_sense_data); 17481 17482 switch (sense_key) { 17483 case KEY_NO_SENSE: 17484 sd_sense_key_no_sense(un, bp, xp, pktp); 17485 break; 17486 case KEY_RECOVERABLE_ERROR: 17487 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17488 bp, xp, pktp); 17489 break; 17490 case KEY_NOT_READY: 17491 sd_sense_key_not_ready(un, xp->xb_sense_data, 17492 bp, xp, pktp); 17493 break; 17494 case KEY_MEDIUM_ERROR: 17495 case KEY_HARDWARE_ERROR: 17496 sd_sense_key_medium_or_hardware_error(un, 17497 xp->xb_sense_data, bp, xp, pktp); 17498 break; 17499 case KEY_ILLEGAL_REQUEST: 17500 sd_sense_key_illegal_request(un, bp, xp, pktp); 17501 break; 17502 case KEY_UNIT_ATTENTION: 17503 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17504 bp, xp, pktp); 17505 break; 17506 case KEY_WRITE_PROTECT: 17507 case KEY_VOLUME_OVERFLOW: 17508 case KEY_MISCOMPARE: 17509 sd_sense_key_fail_command(un, bp, xp, pktp); 17510 break; 17511 case KEY_BLANK_CHECK: 17512 sd_sense_key_blank_check(un, bp, xp, pktp); 17513 break; 17514 case KEY_ABORTED_COMMAND: 17515 sd_sense_key_aborted_command(un, bp, xp, pktp); 17516 break; 17517 case KEY_VENDOR_UNIQUE: 17518 case KEY_COPY_ABORTED: 17519 case KEY_EQUAL: 17520 case KEY_RESERVED: 17521 default: 17522 sd_sense_key_default(un, xp->xb_sense_data, 17523 bp, xp, pktp); 17524 break; 17525 } 17526 } 17527 17528 17529 /* 17530 * Function: sd_dump_memory 17531 * 17532 * Description: Debug logging routine to print the contents of a user provided 17533 * buffer. The output of the buffer is broken up into 256 byte 17534 * segments due to a size constraint of the scsi_log. 17535 * implementation. 17536 * 17537 * Arguments: un - ptr to softstate 17538 * comp - component mask 17539 * title - "title" string to preceed data when printed 17540 * data - ptr to data block to be printed 17541 * len - size of data block to be printed 17542 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17543 * 17544 * Context: May be called from interrupt context 17545 */ 17546 17547 #define SD_DUMP_MEMORY_BUF_SIZE 256 17548 17549 static char *sd_dump_format_string[] = { 17550 " 0x%02x", 17551 " %c" 17552 }; 17553 17554 static void 17555 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17556 int len, int fmt) 17557 { 17558 int i, j; 17559 int avail_count; 17560 int start_offset; 17561 int end_offset; 17562 size_t entry_len; 17563 char *bufp; 17564 char *local_buf; 17565 char *format_string; 17566 17567 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17568 17569 /* 17570 * In the debug version of the driver, this function is called from a 17571 * number of places which are NOPs in the release driver. 17572 * The debug driver therefore has additional methods of filtering 17573 * debug output. 17574 */ 17575 #ifdef SDDEBUG 17576 /* 17577 * In the debug version of the driver we can reduce the amount of debug 17578 * messages by setting sd_error_level to something other than 17579 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17580 * sd_component_mask. 17581 */ 17582 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17583 (sd_error_level != SCSI_ERR_ALL)) { 17584 return; 17585 } 17586 if (((sd_component_mask & comp) == 0) || 17587 (sd_error_level != SCSI_ERR_ALL)) { 17588 return; 17589 } 17590 #else 17591 if (sd_error_level != SCSI_ERR_ALL) { 17592 return; 17593 } 17594 #endif 17595 17596 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17597 bufp = local_buf; 17598 /* 17599 * Available length is the length of local_buf[], minus the 17600 * length of the title string, minus one for the ":", minus 17601 * one for the newline, minus one for the NULL terminator. 17602 * This gives the #bytes available for holding the printed 17603 * values from the given data buffer. 17604 */ 17605 if (fmt == SD_LOG_HEX) { 17606 format_string = sd_dump_format_string[0]; 17607 } else /* SD_LOG_CHAR */ { 17608 format_string = sd_dump_format_string[1]; 17609 } 17610 /* 17611 * Available count is the number of elements from the given 17612 * data buffer that we can fit into the available length. 17613 * This is based upon the size of the format string used. 17614 * Make one entry and find it's size. 17615 */ 17616 (void) sprintf(bufp, format_string, data[0]); 17617 entry_len = strlen(bufp); 17618 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17619 17620 j = 0; 17621 while (j < len) { 17622 bufp = local_buf; 17623 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17624 start_offset = j; 17625 17626 end_offset = start_offset + avail_count; 17627 17628 (void) sprintf(bufp, "%s:", title); 17629 bufp += strlen(bufp); 17630 for (i = start_offset; ((i < end_offset) && (j < len)); 17631 i++, j++) { 17632 (void) sprintf(bufp, format_string, data[i]); 17633 bufp += entry_len; 17634 } 17635 (void) sprintf(bufp, "\n"); 17636 17637 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17638 } 17639 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17640 } 17641 17642 /* 17643 * Function: sd_print_sense_msg 17644 * 17645 * Description: Log a message based upon the given sense data. 17646 * 17647 * Arguments: un - ptr to associated softstate 17648 * bp - ptr to buf(9S) for the command 17649 * arg - ptr to associate sd_sense_info struct 17650 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17651 * or SD_NO_RETRY_ISSUED 17652 * 17653 * Context: May be called from interrupt context 17654 */ 17655 17656 static void 17657 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17658 { 17659 struct sd_xbuf *xp; 17660 struct scsi_pkt *pktp; 17661 uint8_t *sensep; 17662 daddr_t request_blkno; 17663 diskaddr_t err_blkno; 17664 int severity; 17665 int pfa_flag; 17666 extern struct scsi_key_strings scsi_cmds[]; 17667 17668 ASSERT(un != NULL); 17669 ASSERT(mutex_owned(SD_MUTEX(un))); 17670 ASSERT(bp != NULL); 17671 xp = SD_GET_XBUF(bp); 17672 ASSERT(xp != NULL); 17673 pktp = SD_GET_PKTP(bp); 17674 ASSERT(pktp != NULL); 17675 ASSERT(arg != NULL); 17676 17677 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17678 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17679 17680 if ((code == SD_DELAYED_RETRY_ISSUED) || 17681 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17682 severity = SCSI_ERR_RETRYABLE; 17683 } 17684 17685 /* Use absolute block number for the request block number */ 17686 request_blkno = xp->xb_blkno; 17687 17688 /* 17689 * Now try to get the error block number from the sense data 17690 */ 17691 sensep = xp->xb_sense_data; 17692 17693 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17694 (uint64_t *)&err_blkno)) { 17695 /* 17696 * We retrieved the error block number from the information 17697 * portion of the sense data. 17698 * 17699 * For USCSI commands we are better off using the error 17700 * block no. as the requested block no. (This is the best 17701 * we can estimate.) 17702 */ 17703 if ((SD_IS_BUFIO(xp) == FALSE) && 17704 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17705 request_blkno = err_blkno; 17706 } 17707 } else { 17708 /* 17709 * Without the es_valid bit set (for fixed format) or an 17710 * information descriptor (for descriptor format) we cannot 17711 * be certain of the error blkno, so just use the 17712 * request_blkno. 17713 */ 17714 err_blkno = (diskaddr_t)request_blkno; 17715 } 17716 17717 /* 17718 * The following will log the buffer contents for the release driver 17719 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17720 * level is set to verbose. 17721 */ 17722 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17723 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17724 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17725 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17726 17727 if (pfa_flag == FALSE) { 17728 /* This is normally only set for USCSI */ 17729 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17730 return; 17731 } 17732 17733 if ((SD_IS_BUFIO(xp) == TRUE) && 17734 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17735 (severity < sd_error_level))) { 17736 return; 17737 } 17738 } 17739 /* 17740 * Check for Sonoma Failover and keep a count of how many failed I/O's 17741 */ 17742 if ((SD_IS_LSI(un)) && 17743 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17744 (scsi_sense_asc(sensep) == 0x94) && 17745 (scsi_sense_ascq(sensep) == 0x01)) { 17746 un->un_sonoma_failure_count++; 17747 if (un->un_sonoma_failure_count > 1) { 17748 return; 17749 } 17750 } 17751 17752 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17753 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17754 (pktp->pkt_resid == 0))) { 17755 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17756 request_blkno, err_blkno, scsi_cmds, 17757 (struct scsi_extended_sense *)sensep, 17758 un->un_additional_codes, NULL); 17759 } 17760 } 17761 17762 /* 17763 * Function: sd_sense_key_no_sense 17764 * 17765 * Description: Recovery action when sense data was not received. 17766 * 17767 * Context: May be called from interrupt context 17768 */ 17769 17770 static void 17771 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17772 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17773 { 17774 struct sd_sense_info si; 17775 17776 ASSERT(un != NULL); 17777 ASSERT(mutex_owned(SD_MUTEX(un))); 17778 ASSERT(bp != NULL); 17779 ASSERT(xp != NULL); 17780 ASSERT(pktp != NULL); 17781 17782 si.ssi_severity = SCSI_ERR_FATAL; 17783 si.ssi_pfa_flag = FALSE; 17784 17785 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17786 17787 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17788 &si, EIO, (clock_t)0, NULL); 17789 } 17790 17791 17792 /* 17793 * Function: sd_sense_key_recoverable_error 17794 * 17795 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17796 * 17797 * Context: May be called from interrupt context 17798 */ 17799 17800 static void 17801 sd_sense_key_recoverable_error(struct sd_lun *un, 17802 uint8_t *sense_datap, 17803 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17804 { 17805 struct sd_sense_info si; 17806 uint8_t asc = scsi_sense_asc(sense_datap); 17807 17808 ASSERT(un != NULL); 17809 ASSERT(mutex_owned(SD_MUTEX(un))); 17810 ASSERT(bp != NULL); 17811 ASSERT(xp != NULL); 17812 ASSERT(pktp != NULL); 17813 17814 /* 17815 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17816 */ 17817 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17818 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17819 si.ssi_severity = SCSI_ERR_INFO; 17820 si.ssi_pfa_flag = TRUE; 17821 } else { 17822 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17823 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17824 si.ssi_severity = SCSI_ERR_RECOVERED; 17825 si.ssi_pfa_flag = FALSE; 17826 } 17827 17828 if (pktp->pkt_resid == 0) { 17829 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17830 sd_return_command(un, bp); 17831 return; 17832 } 17833 17834 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17835 &si, EIO, (clock_t)0, NULL); 17836 } 17837 17838 17839 17840 17841 /* 17842 * Function: sd_sense_key_not_ready 17843 * 17844 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17845 * 17846 * Context: May be called from interrupt context 17847 */ 17848 17849 static void 17850 sd_sense_key_not_ready(struct sd_lun *un, 17851 uint8_t *sense_datap, 17852 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17853 { 17854 struct sd_sense_info si; 17855 uint8_t asc = scsi_sense_asc(sense_datap); 17856 uint8_t ascq = scsi_sense_ascq(sense_datap); 17857 17858 ASSERT(un != NULL); 17859 ASSERT(mutex_owned(SD_MUTEX(un))); 17860 ASSERT(bp != NULL); 17861 ASSERT(xp != NULL); 17862 ASSERT(pktp != NULL); 17863 17864 si.ssi_severity = SCSI_ERR_FATAL; 17865 si.ssi_pfa_flag = FALSE; 17866 17867 /* 17868 * Update error stats after first NOT READY error. Disks may have 17869 * been powered down and may need to be restarted. For CDROMs, 17870 * report NOT READY errors only if media is present. 17871 */ 17872 if ((ISCD(un) && (asc == 0x3A)) || 17873 (xp->xb_nr_retry_count > 0)) { 17874 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17875 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17876 } 17877 17878 /* 17879 * Just fail if the "not ready" retry limit has been reached. 17880 */ 17881 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17882 /* Special check for error message printing for removables. */ 17883 if (un->un_f_has_removable_media && (asc == 0x04) && 17884 (ascq >= 0x04)) { 17885 si.ssi_severity = SCSI_ERR_ALL; 17886 } 17887 goto fail_command; 17888 } 17889 17890 /* 17891 * Check the ASC and ASCQ in the sense data as needed, to determine 17892 * what to do. 17893 */ 17894 switch (asc) { 17895 case 0x04: /* LOGICAL UNIT NOT READY */ 17896 /* 17897 * disk drives that don't spin up result in a very long delay 17898 * in format without warning messages. We will log a message 17899 * if the error level is set to verbose. 17900 */ 17901 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17902 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17903 "logical unit not ready, resetting disk\n"); 17904 } 17905 17906 /* 17907 * There are different requirements for CDROMs and disks for 17908 * the number of retries. If a CD-ROM is giving this, it is 17909 * probably reading TOC and is in the process of getting 17910 * ready, so we should keep on trying for a long time to make 17911 * sure that all types of media are taken in account (for 17912 * some media the drive takes a long time to read TOC). For 17913 * disks we do not want to retry this too many times as this 17914 * can cause a long hang in format when the drive refuses to 17915 * spin up (a very common failure). 17916 */ 17917 switch (ascq) { 17918 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17919 /* 17920 * Disk drives frequently refuse to spin up which 17921 * results in a very long hang in format without 17922 * warning messages. 17923 * 17924 * Note: This code preserves the legacy behavior of 17925 * comparing xb_nr_retry_count against zero for fibre 17926 * channel targets instead of comparing against the 17927 * un_reset_retry_count value. The reason for this 17928 * discrepancy has been so utterly lost beneath the 17929 * Sands of Time that even Indiana Jones could not 17930 * find it. 17931 */ 17932 if (un->un_f_is_fibre == TRUE) { 17933 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17934 (xp->xb_nr_retry_count > 0)) && 17935 (un->un_startstop_timeid == NULL)) { 17936 scsi_log(SD_DEVINFO(un), sd_label, 17937 CE_WARN, "logical unit not ready, " 17938 "resetting disk\n"); 17939 sd_reset_target(un, pktp); 17940 } 17941 } else { 17942 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17943 (xp->xb_nr_retry_count > 17944 un->un_reset_retry_count)) && 17945 (un->un_startstop_timeid == NULL)) { 17946 scsi_log(SD_DEVINFO(un), sd_label, 17947 CE_WARN, "logical unit not ready, " 17948 "resetting disk\n"); 17949 sd_reset_target(un, pktp); 17950 } 17951 } 17952 break; 17953 17954 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17955 /* 17956 * If the target is in the process of becoming 17957 * ready, just proceed with the retry. This can 17958 * happen with CD-ROMs that take a long time to 17959 * read TOC after a power cycle or reset. 17960 */ 17961 goto do_retry; 17962 17963 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17964 break; 17965 17966 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17967 /* 17968 * Retries cannot help here so just fail right away. 17969 */ 17970 goto fail_command; 17971 17972 case 0x88: 17973 /* 17974 * Vendor-unique code for T3/T4: it indicates a 17975 * path problem in a mutipathed config, but as far as 17976 * the target driver is concerned it equates to a fatal 17977 * error, so we should just fail the command right away 17978 * (without printing anything to the console). If this 17979 * is not a T3/T4, fall thru to the default recovery 17980 * action. 17981 * T3/T4 is FC only, don't need to check is_fibre 17982 */ 17983 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17984 sd_return_failed_command(un, bp, EIO); 17985 return; 17986 } 17987 /* FALLTHRU */ 17988 17989 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17990 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17991 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17992 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17993 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17994 default: /* Possible future codes in SCSI spec? */ 17995 /* 17996 * For removable-media devices, do not retry if 17997 * ASCQ > 2 as these result mostly from USCSI commands 17998 * on MMC devices issued to check status of an 17999 * operation initiated in immediate mode. Also for 18000 * ASCQ >= 4 do not print console messages as these 18001 * mainly represent a user-initiated operation 18002 * instead of a system failure. 18003 */ 18004 if (un->un_f_has_removable_media) { 18005 si.ssi_severity = SCSI_ERR_ALL; 18006 goto fail_command; 18007 } 18008 break; 18009 } 18010 18011 /* 18012 * As part of our recovery attempt for the NOT READY 18013 * condition, we issue a START STOP UNIT command. However 18014 * we want to wait for a short delay before attempting this 18015 * as there may still be more commands coming back from the 18016 * target with the check condition. To do this we use 18017 * timeout(9F) to call sd_start_stop_unit_callback() after 18018 * the delay interval expires. (sd_start_stop_unit_callback() 18019 * dispatches sd_start_stop_unit_task(), which will issue 18020 * the actual START STOP UNIT command. The delay interval 18021 * is one-half of the delay that we will use to retry the 18022 * command that generated the NOT READY condition. 18023 * 18024 * Note that we could just dispatch sd_start_stop_unit_task() 18025 * from here and allow it to sleep for the delay interval, 18026 * but then we would be tying up the taskq thread 18027 * uncesessarily for the duration of the delay. 18028 * 18029 * Do not issue the START STOP UNIT if the current command 18030 * is already a START STOP UNIT. 18031 */ 18032 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18033 break; 18034 } 18035 18036 /* 18037 * Do not schedule the timeout if one is already pending. 18038 */ 18039 if (un->un_startstop_timeid != NULL) { 18040 SD_INFO(SD_LOG_ERROR, un, 18041 "sd_sense_key_not_ready: restart already issued to" 18042 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18043 ddi_get_instance(SD_DEVINFO(un))); 18044 break; 18045 } 18046 18047 /* 18048 * Schedule the START STOP UNIT command, then queue the command 18049 * for a retry. 18050 * 18051 * Note: A timeout is not scheduled for this retry because we 18052 * want the retry to be serial with the START_STOP_UNIT. The 18053 * retry will be started when the START_STOP_UNIT is completed 18054 * in sd_start_stop_unit_task. 18055 */ 18056 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18057 un, un->un_busy_timeout / 2); 18058 xp->xb_nr_retry_count++; 18059 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18060 return; 18061 18062 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18063 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18064 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18065 "unit does not respond to selection\n"); 18066 } 18067 break; 18068 18069 case 0x3A: /* MEDIUM NOT PRESENT */ 18070 if (sd_error_level >= SCSI_ERR_FATAL) { 18071 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18072 "Caddy not inserted in drive\n"); 18073 } 18074 18075 sr_ejected(un); 18076 un->un_mediastate = DKIO_EJECTED; 18077 /* The state has changed, inform the media watch routines */ 18078 cv_broadcast(&un->un_state_cv); 18079 /* Just fail if no media is present in the drive. */ 18080 goto fail_command; 18081 18082 default: 18083 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18084 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18085 "Unit not Ready. Additional sense code 0x%x\n", 18086 asc); 18087 } 18088 break; 18089 } 18090 18091 do_retry: 18092 18093 /* 18094 * Retry the command, as some targets may report NOT READY for 18095 * several seconds after being reset. 18096 */ 18097 xp->xb_nr_retry_count++; 18098 si.ssi_severity = SCSI_ERR_RETRYABLE; 18099 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18100 &si, EIO, un->un_busy_timeout, NULL); 18101 18102 return; 18103 18104 fail_command: 18105 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18106 sd_return_failed_command(un, bp, EIO); 18107 } 18108 18109 18110 18111 /* 18112 * Function: sd_sense_key_medium_or_hardware_error 18113 * 18114 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18115 * sense key. 18116 * 18117 * Context: May be called from interrupt context 18118 */ 18119 18120 static void 18121 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 18122 uint8_t *sense_datap, 18123 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18124 { 18125 struct sd_sense_info si; 18126 uint8_t sense_key = scsi_sense_key(sense_datap); 18127 uint8_t asc = scsi_sense_asc(sense_datap); 18128 18129 ASSERT(un != NULL); 18130 ASSERT(mutex_owned(SD_MUTEX(un))); 18131 ASSERT(bp != NULL); 18132 ASSERT(xp != NULL); 18133 ASSERT(pktp != NULL); 18134 18135 si.ssi_severity = SCSI_ERR_FATAL; 18136 si.ssi_pfa_flag = FALSE; 18137 18138 if (sense_key == KEY_MEDIUM_ERROR) { 18139 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18140 } 18141 18142 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18143 18144 if ((un->un_reset_retry_count != 0) && 18145 (xp->xb_retry_count == un->un_reset_retry_count)) { 18146 mutex_exit(SD_MUTEX(un)); 18147 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18148 if (un->un_f_allow_bus_device_reset == TRUE) { 18149 18150 boolean_t try_resetting_target = B_TRUE; 18151 18152 /* 18153 * We need to be able to handle specific ASC when we are 18154 * handling a KEY_HARDWARE_ERROR. In particular 18155 * taking the default action of resetting the target may 18156 * not be the appropriate way to attempt recovery. 18157 * Resetting a target because of a single LUN failure 18158 * victimizes all LUNs on that target. 18159 * 18160 * This is true for the LSI arrays, if an LSI 18161 * array controller returns an ASC of 0x84 (LUN Dead) we 18162 * should trust it. 18163 */ 18164 18165 if (sense_key == KEY_HARDWARE_ERROR) { 18166 switch (asc) { 18167 case 0x84: 18168 if (SD_IS_LSI(un)) { 18169 try_resetting_target = B_FALSE; 18170 } 18171 break; 18172 default: 18173 break; 18174 } 18175 } 18176 18177 if (try_resetting_target == B_TRUE) { 18178 int reset_retval = 0; 18179 if (un->un_f_lun_reset_enabled == TRUE) { 18180 SD_TRACE(SD_LOG_IO_CORE, un, 18181 "sd_sense_key_medium_or_hardware_" 18182 "error: issuing RESET_LUN\n"); 18183 reset_retval = 18184 scsi_reset(SD_ADDRESS(un), 18185 RESET_LUN); 18186 } 18187 if (reset_retval == 0) { 18188 SD_TRACE(SD_LOG_IO_CORE, un, 18189 "sd_sense_key_medium_or_hardware_" 18190 "error: issuing RESET_TARGET\n"); 18191 (void) scsi_reset(SD_ADDRESS(un), 18192 RESET_TARGET); 18193 } 18194 } 18195 } 18196 mutex_enter(SD_MUTEX(un)); 18197 } 18198 18199 /* 18200 * This really ought to be a fatal error, but we will retry anyway 18201 * as some drives report this as a spurious error. 18202 */ 18203 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18204 &si, EIO, (clock_t)0, NULL); 18205 } 18206 18207 18208 18209 /* 18210 * Function: sd_sense_key_illegal_request 18211 * 18212 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18213 * 18214 * Context: May be called from interrupt context 18215 */ 18216 18217 static void 18218 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18219 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18220 { 18221 struct sd_sense_info si; 18222 18223 ASSERT(un != NULL); 18224 ASSERT(mutex_owned(SD_MUTEX(un))); 18225 ASSERT(bp != NULL); 18226 ASSERT(xp != NULL); 18227 ASSERT(pktp != NULL); 18228 18229 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18230 18231 si.ssi_severity = SCSI_ERR_INFO; 18232 si.ssi_pfa_flag = FALSE; 18233 18234 /* Pointless to retry if the target thinks it's an illegal request */ 18235 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18236 sd_return_failed_command(un, bp, EIO); 18237 } 18238 18239 18240 18241 18242 /* 18243 * Function: sd_sense_key_unit_attention 18244 * 18245 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18246 * 18247 * Context: May be called from interrupt context 18248 */ 18249 18250 static void 18251 sd_sense_key_unit_attention(struct sd_lun *un, 18252 uint8_t *sense_datap, 18253 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18254 { 18255 /* 18256 * For UNIT ATTENTION we allow retries for one minute. Devices 18257 * like Sonoma can return UNIT ATTENTION close to a minute 18258 * under certain conditions. 18259 */ 18260 int retry_check_flag = SD_RETRIES_UA; 18261 boolean_t kstat_updated = B_FALSE; 18262 struct sd_sense_info si; 18263 uint8_t asc = scsi_sense_asc(sense_datap); 18264 uint8_t ascq = scsi_sense_ascq(sense_datap); 18265 18266 ASSERT(un != NULL); 18267 ASSERT(mutex_owned(SD_MUTEX(un))); 18268 ASSERT(bp != NULL); 18269 ASSERT(xp != NULL); 18270 ASSERT(pktp != NULL); 18271 18272 si.ssi_severity = SCSI_ERR_INFO; 18273 si.ssi_pfa_flag = FALSE; 18274 18275 18276 switch (asc) { 18277 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18278 if (sd_report_pfa != 0) { 18279 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18280 si.ssi_pfa_flag = TRUE; 18281 retry_check_flag = SD_RETRIES_STANDARD; 18282 goto do_retry; 18283 } 18284 18285 break; 18286 18287 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18288 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18289 un->un_resvd_status |= 18290 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18291 } 18292 #ifdef _LP64 18293 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18294 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18295 un, KM_NOSLEEP) == 0) { 18296 /* 18297 * If we can't dispatch the task we'll just 18298 * live without descriptor sense. We can 18299 * try again on the next "unit attention" 18300 */ 18301 SD_ERROR(SD_LOG_ERROR, un, 18302 "sd_sense_key_unit_attention: " 18303 "Could not dispatch " 18304 "sd_reenable_dsense_task\n"); 18305 } 18306 } 18307 #endif /* _LP64 */ 18308 /* FALLTHRU */ 18309 18310 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18311 if (!un->un_f_has_removable_media) { 18312 break; 18313 } 18314 18315 /* 18316 * When we get a unit attention from a removable-media device, 18317 * it may be in a state that will take a long time to recover 18318 * (e.g., from a reset). Since we are executing in interrupt 18319 * context here, we cannot wait around for the device to come 18320 * back. So hand this command off to sd_media_change_task() 18321 * for deferred processing under taskq thread context. (Note 18322 * that the command still may be failed if a problem is 18323 * encountered at a later time.) 18324 */ 18325 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18326 KM_NOSLEEP) == 0) { 18327 /* 18328 * Cannot dispatch the request so fail the command. 18329 */ 18330 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18331 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18332 si.ssi_severity = SCSI_ERR_FATAL; 18333 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18334 sd_return_failed_command(un, bp, EIO); 18335 } 18336 18337 /* 18338 * If failed to dispatch sd_media_change_task(), we already 18339 * updated kstat. If succeed to dispatch sd_media_change_task(), 18340 * we should update kstat later if it encounters an error. So, 18341 * we update kstat_updated flag here. 18342 */ 18343 kstat_updated = B_TRUE; 18344 18345 /* 18346 * Either the command has been successfully dispatched to a 18347 * task Q for retrying, or the dispatch failed. In either case 18348 * do NOT retry again by calling sd_retry_command. This sets up 18349 * two retries of the same command and when one completes and 18350 * frees the resources the other will access freed memory, 18351 * a bad thing. 18352 */ 18353 return; 18354 18355 default: 18356 break; 18357 } 18358 18359 /* 18360 * ASC ASCQ 18361 * 2A 09 Capacity data has changed 18362 * 2A 01 Mode parameters changed 18363 * 3F 0E Reported luns data has changed 18364 * Arrays that support logical unit expansion should report 18365 * capacity changes(2Ah/09). Mode parameters changed and 18366 * reported luns data has changed are the approximation. 18367 */ 18368 if (((asc == 0x2a) && (ascq == 0x09)) || 18369 ((asc == 0x2a) && (ascq == 0x01)) || 18370 ((asc == 0x3f) && (ascq == 0x0e))) { 18371 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18372 KM_NOSLEEP) == 0) { 18373 SD_ERROR(SD_LOG_ERROR, un, 18374 "sd_sense_key_unit_attention: " 18375 "Could not dispatch sd_target_change_task\n"); 18376 } 18377 } 18378 18379 /* 18380 * Update kstat if we haven't done that. 18381 */ 18382 if (!kstat_updated) { 18383 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18384 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18385 } 18386 18387 do_retry: 18388 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18389 EIO, SD_UA_RETRY_DELAY, NULL); 18390 } 18391 18392 18393 18394 /* 18395 * Function: sd_sense_key_fail_command 18396 * 18397 * Description: Use to fail a command when we don't like the sense key that 18398 * was returned. 18399 * 18400 * Context: May be called from interrupt context 18401 */ 18402 18403 static void 18404 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18405 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18406 { 18407 struct sd_sense_info si; 18408 18409 ASSERT(un != NULL); 18410 ASSERT(mutex_owned(SD_MUTEX(un))); 18411 ASSERT(bp != NULL); 18412 ASSERT(xp != NULL); 18413 ASSERT(pktp != NULL); 18414 18415 si.ssi_severity = SCSI_ERR_FATAL; 18416 si.ssi_pfa_flag = FALSE; 18417 18418 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18419 sd_return_failed_command(un, bp, EIO); 18420 } 18421 18422 18423 18424 /* 18425 * Function: sd_sense_key_blank_check 18426 * 18427 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18428 * Has no monetary connotation. 18429 * 18430 * Context: May be called from interrupt context 18431 */ 18432 18433 static void 18434 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18435 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18436 { 18437 struct sd_sense_info si; 18438 18439 ASSERT(un != NULL); 18440 ASSERT(mutex_owned(SD_MUTEX(un))); 18441 ASSERT(bp != NULL); 18442 ASSERT(xp != NULL); 18443 ASSERT(pktp != NULL); 18444 18445 /* 18446 * Blank check is not fatal for removable devices, therefore 18447 * it does not require a console message. 18448 */ 18449 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18450 SCSI_ERR_FATAL; 18451 si.ssi_pfa_flag = FALSE; 18452 18453 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18454 sd_return_failed_command(un, bp, EIO); 18455 } 18456 18457 18458 18459 18460 /* 18461 * Function: sd_sense_key_aborted_command 18462 * 18463 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18464 * 18465 * Context: May be called from interrupt context 18466 */ 18467 18468 static void 18469 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18470 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18471 { 18472 struct sd_sense_info si; 18473 18474 ASSERT(un != NULL); 18475 ASSERT(mutex_owned(SD_MUTEX(un))); 18476 ASSERT(bp != NULL); 18477 ASSERT(xp != NULL); 18478 ASSERT(pktp != NULL); 18479 18480 si.ssi_severity = SCSI_ERR_FATAL; 18481 si.ssi_pfa_flag = FALSE; 18482 18483 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18484 18485 /* 18486 * This really ought to be a fatal error, but we will retry anyway 18487 * as some drives report this as a spurious error. 18488 */ 18489 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18490 &si, EIO, drv_usectohz(100000), NULL); 18491 } 18492 18493 18494 18495 /* 18496 * Function: sd_sense_key_default 18497 * 18498 * Description: Default recovery action for several SCSI sense keys (basically 18499 * attempts a retry). 18500 * 18501 * Context: May be called from interrupt context 18502 */ 18503 18504 static void 18505 sd_sense_key_default(struct sd_lun *un, 18506 uint8_t *sense_datap, 18507 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18508 { 18509 struct sd_sense_info si; 18510 uint8_t sense_key = scsi_sense_key(sense_datap); 18511 18512 ASSERT(un != NULL); 18513 ASSERT(mutex_owned(SD_MUTEX(un))); 18514 ASSERT(bp != NULL); 18515 ASSERT(xp != NULL); 18516 ASSERT(pktp != NULL); 18517 18518 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18519 18520 /* 18521 * Undecoded sense key. Attempt retries and hope that will fix 18522 * the problem. Otherwise, we're dead. 18523 */ 18524 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18525 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18526 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18527 } 18528 18529 si.ssi_severity = SCSI_ERR_FATAL; 18530 si.ssi_pfa_flag = FALSE; 18531 18532 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18533 &si, EIO, (clock_t)0, NULL); 18534 } 18535 18536 18537 18538 /* 18539 * Function: sd_print_retry_msg 18540 * 18541 * Description: Print a message indicating the retry action being taken. 18542 * 18543 * Arguments: un - ptr to associated softstate 18544 * bp - ptr to buf(9S) for the command 18545 * arg - not used. 18546 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18547 * or SD_NO_RETRY_ISSUED 18548 * 18549 * Context: May be called from interrupt context 18550 */ 18551 /* ARGSUSED */ 18552 static void 18553 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18554 { 18555 struct sd_xbuf *xp; 18556 struct scsi_pkt *pktp; 18557 char *reasonp; 18558 char *msgp; 18559 18560 ASSERT(un != NULL); 18561 ASSERT(mutex_owned(SD_MUTEX(un))); 18562 ASSERT(bp != NULL); 18563 pktp = SD_GET_PKTP(bp); 18564 ASSERT(pktp != NULL); 18565 xp = SD_GET_XBUF(bp); 18566 ASSERT(xp != NULL); 18567 18568 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18569 mutex_enter(&un->un_pm_mutex); 18570 if ((un->un_state == SD_STATE_SUSPENDED) || 18571 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18572 (pktp->pkt_flags & FLAG_SILENT)) { 18573 mutex_exit(&un->un_pm_mutex); 18574 goto update_pkt_reason; 18575 } 18576 mutex_exit(&un->un_pm_mutex); 18577 18578 /* 18579 * Suppress messages if they are all the same pkt_reason; with 18580 * TQ, many (up to 256) are returned with the same pkt_reason. 18581 * If we are in panic, then suppress the retry messages. 18582 */ 18583 switch (flag) { 18584 case SD_NO_RETRY_ISSUED: 18585 msgp = "giving up"; 18586 break; 18587 case SD_IMMEDIATE_RETRY_ISSUED: 18588 case SD_DELAYED_RETRY_ISSUED: 18589 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18590 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18591 (sd_error_level != SCSI_ERR_ALL))) { 18592 return; 18593 } 18594 msgp = "retrying command"; 18595 break; 18596 default: 18597 goto update_pkt_reason; 18598 } 18599 18600 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18601 scsi_rname(pktp->pkt_reason)); 18602 18603 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18604 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18605 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18606 } 18607 18608 update_pkt_reason: 18609 /* 18610 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18611 * This is to prevent multiple console messages for the same failure 18612 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18613 * when the command is retried successfully because there still may be 18614 * more commands coming back with the same value of pktp->pkt_reason. 18615 */ 18616 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18617 un->un_last_pkt_reason = pktp->pkt_reason; 18618 } 18619 } 18620 18621 18622 /* 18623 * Function: sd_print_cmd_incomplete_msg 18624 * 18625 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18626 * 18627 * Arguments: un - ptr to associated softstate 18628 * bp - ptr to buf(9S) for the command 18629 * arg - passed to sd_print_retry_msg() 18630 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18631 * or SD_NO_RETRY_ISSUED 18632 * 18633 * Context: May be called from interrupt context 18634 */ 18635 18636 static void 18637 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18638 int code) 18639 { 18640 dev_info_t *dip; 18641 18642 ASSERT(un != NULL); 18643 ASSERT(mutex_owned(SD_MUTEX(un))); 18644 ASSERT(bp != NULL); 18645 18646 switch (code) { 18647 case SD_NO_RETRY_ISSUED: 18648 /* Command was failed. Someone turned off this target? */ 18649 if (un->un_state != SD_STATE_OFFLINE) { 18650 /* 18651 * Suppress message if we are detaching and 18652 * device has been disconnected 18653 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18654 * private interface and not part of the DDI 18655 */ 18656 dip = un->un_sd->sd_dev; 18657 if (!(DEVI_IS_DETACHING(dip) && 18658 DEVI_IS_DEVICE_REMOVED(dip))) { 18659 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18660 "disk not responding to selection\n"); 18661 } 18662 New_state(un, SD_STATE_OFFLINE); 18663 } 18664 break; 18665 18666 case SD_DELAYED_RETRY_ISSUED: 18667 case SD_IMMEDIATE_RETRY_ISSUED: 18668 default: 18669 /* Command was successfully queued for retry */ 18670 sd_print_retry_msg(un, bp, arg, code); 18671 break; 18672 } 18673 } 18674 18675 18676 /* 18677 * Function: sd_pkt_reason_cmd_incomplete 18678 * 18679 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18680 * 18681 * Context: May be called from interrupt context 18682 */ 18683 18684 static void 18685 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18686 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18687 { 18688 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18689 18690 ASSERT(un != NULL); 18691 ASSERT(mutex_owned(SD_MUTEX(un))); 18692 ASSERT(bp != NULL); 18693 ASSERT(xp != NULL); 18694 ASSERT(pktp != NULL); 18695 18696 /* Do not do a reset if selection did not complete */ 18697 /* Note: Should this not just check the bit? */ 18698 if (pktp->pkt_state != STATE_GOT_BUS) { 18699 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18700 sd_reset_target(un, pktp); 18701 } 18702 18703 /* 18704 * If the target was not successfully selected, then set 18705 * SD_RETRIES_FAILFAST to indicate that we lost communication 18706 * with the target, and further retries and/or commands are 18707 * likely to take a long time. 18708 */ 18709 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18710 flag |= SD_RETRIES_FAILFAST; 18711 } 18712 18713 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18714 18715 sd_retry_command(un, bp, flag, 18716 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18717 } 18718 18719 18720 18721 /* 18722 * Function: sd_pkt_reason_cmd_tran_err 18723 * 18724 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18725 * 18726 * Context: May be called from interrupt context 18727 */ 18728 18729 static void 18730 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18731 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18732 { 18733 ASSERT(un != NULL); 18734 ASSERT(mutex_owned(SD_MUTEX(un))); 18735 ASSERT(bp != NULL); 18736 ASSERT(xp != NULL); 18737 ASSERT(pktp != NULL); 18738 18739 /* 18740 * Do not reset if we got a parity error, or if 18741 * selection did not complete. 18742 */ 18743 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18744 /* Note: Should this not just check the bit for pkt_state? */ 18745 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18746 (pktp->pkt_state != STATE_GOT_BUS)) { 18747 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18748 sd_reset_target(un, pktp); 18749 } 18750 18751 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18752 18753 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18754 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18755 } 18756 18757 18758 18759 /* 18760 * Function: sd_pkt_reason_cmd_reset 18761 * 18762 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18763 * 18764 * Context: May be called from interrupt context 18765 */ 18766 18767 static void 18768 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18769 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18770 { 18771 ASSERT(un != NULL); 18772 ASSERT(mutex_owned(SD_MUTEX(un))); 18773 ASSERT(bp != NULL); 18774 ASSERT(xp != NULL); 18775 ASSERT(pktp != NULL); 18776 18777 /* The target may still be running the command, so try to reset. */ 18778 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18779 sd_reset_target(un, pktp); 18780 18781 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18782 18783 /* 18784 * If pkt_reason is CMD_RESET chances are that this pkt got 18785 * reset because another target on this bus caused it. The target 18786 * that caused it should get CMD_TIMEOUT with pkt_statistics 18787 * of STAT_TIMEOUT/STAT_DEV_RESET. 18788 */ 18789 18790 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18791 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18792 } 18793 18794 18795 18796 18797 /* 18798 * Function: sd_pkt_reason_cmd_aborted 18799 * 18800 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18801 * 18802 * Context: May be called from interrupt context 18803 */ 18804 18805 static void 18806 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18807 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18808 { 18809 ASSERT(un != NULL); 18810 ASSERT(mutex_owned(SD_MUTEX(un))); 18811 ASSERT(bp != NULL); 18812 ASSERT(xp != NULL); 18813 ASSERT(pktp != NULL); 18814 18815 /* The target may still be running the command, so try to reset. */ 18816 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18817 sd_reset_target(un, pktp); 18818 18819 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18820 18821 /* 18822 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18823 * aborted because another target on this bus caused it. The target 18824 * that caused it should get CMD_TIMEOUT with pkt_statistics 18825 * of STAT_TIMEOUT/STAT_DEV_RESET. 18826 */ 18827 18828 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18829 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18830 } 18831 18832 18833 18834 /* 18835 * Function: sd_pkt_reason_cmd_timeout 18836 * 18837 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18838 * 18839 * Context: May be called from interrupt context 18840 */ 18841 18842 static void 18843 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18844 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18845 { 18846 ASSERT(un != NULL); 18847 ASSERT(mutex_owned(SD_MUTEX(un))); 18848 ASSERT(bp != NULL); 18849 ASSERT(xp != NULL); 18850 ASSERT(pktp != NULL); 18851 18852 18853 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18854 sd_reset_target(un, pktp); 18855 18856 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18857 18858 /* 18859 * A command timeout indicates that we could not establish 18860 * communication with the target, so set SD_RETRIES_FAILFAST 18861 * as further retries/commands are likely to take a long time. 18862 */ 18863 sd_retry_command(un, bp, 18864 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18865 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18866 } 18867 18868 18869 18870 /* 18871 * Function: sd_pkt_reason_cmd_unx_bus_free 18872 * 18873 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18874 * 18875 * Context: May be called from interrupt context 18876 */ 18877 18878 static void 18879 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18880 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18881 { 18882 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18883 18884 ASSERT(un != NULL); 18885 ASSERT(mutex_owned(SD_MUTEX(un))); 18886 ASSERT(bp != NULL); 18887 ASSERT(xp != NULL); 18888 ASSERT(pktp != NULL); 18889 18890 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18891 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18892 18893 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18894 sd_print_retry_msg : NULL; 18895 18896 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18897 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18898 } 18899 18900 18901 /* 18902 * Function: sd_pkt_reason_cmd_tag_reject 18903 * 18904 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18905 * 18906 * Context: May be called from interrupt context 18907 */ 18908 18909 static void 18910 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18911 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18912 { 18913 ASSERT(un != NULL); 18914 ASSERT(mutex_owned(SD_MUTEX(un))); 18915 ASSERT(bp != NULL); 18916 ASSERT(xp != NULL); 18917 ASSERT(pktp != NULL); 18918 18919 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18920 pktp->pkt_flags = 0; 18921 un->un_tagflags = 0; 18922 if (un->un_f_opt_queueing == TRUE) { 18923 un->un_throttle = min(un->un_throttle, 3); 18924 } else { 18925 un->un_throttle = 1; 18926 } 18927 mutex_exit(SD_MUTEX(un)); 18928 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18929 mutex_enter(SD_MUTEX(un)); 18930 18931 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18932 18933 /* Legacy behavior not to check retry counts here. */ 18934 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18935 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18936 } 18937 18938 18939 /* 18940 * Function: sd_pkt_reason_default 18941 * 18942 * Description: Default recovery actions for SCSA pkt_reason values that 18943 * do not have more explicit recovery actions. 18944 * 18945 * Context: May be called from interrupt context 18946 */ 18947 18948 static void 18949 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18950 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18951 { 18952 ASSERT(un != NULL); 18953 ASSERT(mutex_owned(SD_MUTEX(un))); 18954 ASSERT(bp != NULL); 18955 ASSERT(xp != NULL); 18956 ASSERT(pktp != NULL); 18957 18958 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18959 sd_reset_target(un, pktp); 18960 18961 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18962 18963 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18964 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18965 } 18966 18967 18968 18969 /* 18970 * Function: sd_pkt_status_check_condition 18971 * 18972 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18973 * 18974 * Context: May be called from interrupt context 18975 */ 18976 18977 static void 18978 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18979 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18980 { 18981 ASSERT(un != NULL); 18982 ASSERT(mutex_owned(SD_MUTEX(un))); 18983 ASSERT(bp != NULL); 18984 ASSERT(xp != NULL); 18985 ASSERT(pktp != NULL); 18986 18987 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18988 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18989 18990 /* 18991 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18992 * command will be retried after the request sense). Otherwise, retry 18993 * the command. Note: we are issuing the request sense even though the 18994 * retry limit may have been reached for the failed command. 18995 */ 18996 if (un->un_f_arq_enabled == FALSE) { 18997 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18998 "no ARQ, sending request sense command\n"); 18999 sd_send_request_sense_command(un, bp, pktp); 19000 } else { 19001 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19002 "ARQ,retrying request sense command\n"); 19003 #if defined(__i386) || defined(__amd64) 19004 /* 19005 * The SD_RETRY_DELAY value need to be adjusted here 19006 * when SD_RETRY_DELAY change in sddef.h 19007 */ 19008 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19009 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19010 NULL); 19011 #else 19012 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19013 EIO, SD_RETRY_DELAY, NULL); 19014 #endif 19015 } 19016 19017 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19018 } 19019 19020 19021 /* 19022 * Function: sd_pkt_status_busy 19023 * 19024 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19025 * 19026 * Context: May be called from interrupt context 19027 */ 19028 19029 static void 19030 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19031 struct scsi_pkt *pktp) 19032 { 19033 ASSERT(un != NULL); 19034 ASSERT(mutex_owned(SD_MUTEX(un))); 19035 ASSERT(bp != NULL); 19036 ASSERT(xp != NULL); 19037 ASSERT(pktp != NULL); 19038 19039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19040 "sd_pkt_status_busy: entry\n"); 19041 19042 /* If retries are exhausted, just fail the command. */ 19043 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19044 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19045 "device busy too long\n"); 19046 sd_return_failed_command(un, bp, EIO); 19047 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19048 "sd_pkt_status_busy: exit\n"); 19049 return; 19050 } 19051 xp->xb_retry_count++; 19052 19053 /* 19054 * Try to reset the target. However, we do not want to perform 19055 * more than one reset if the device continues to fail. The reset 19056 * will be performed when the retry count reaches the reset 19057 * threshold. This threshold should be set such that at least 19058 * one retry is issued before the reset is performed. 19059 */ 19060 if (xp->xb_retry_count == 19061 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19062 int rval = 0; 19063 mutex_exit(SD_MUTEX(un)); 19064 if (un->un_f_allow_bus_device_reset == TRUE) { 19065 /* 19066 * First try to reset the LUN; if we cannot then 19067 * try to reset the target. 19068 */ 19069 if (un->un_f_lun_reset_enabled == TRUE) { 19070 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19071 "sd_pkt_status_busy: RESET_LUN\n"); 19072 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19073 } 19074 if (rval == 0) { 19075 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19076 "sd_pkt_status_busy: RESET_TARGET\n"); 19077 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19078 } 19079 } 19080 if (rval == 0) { 19081 /* 19082 * If the RESET_LUN and/or RESET_TARGET failed, 19083 * try RESET_ALL 19084 */ 19085 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19086 "sd_pkt_status_busy: RESET_ALL\n"); 19087 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19088 } 19089 mutex_enter(SD_MUTEX(un)); 19090 if (rval == 0) { 19091 /* 19092 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19093 * At this point we give up & fail the command. 19094 */ 19095 sd_return_failed_command(un, bp, EIO); 19096 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19097 "sd_pkt_status_busy: exit (failed cmd)\n"); 19098 return; 19099 } 19100 } 19101 19102 /* 19103 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19104 * we have already checked the retry counts above. 19105 */ 19106 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19107 EIO, un->un_busy_timeout, NULL); 19108 19109 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19110 "sd_pkt_status_busy: exit\n"); 19111 } 19112 19113 19114 /* 19115 * Function: sd_pkt_status_reservation_conflict 19116 * 19117 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19118 * command status. 19119 * 19120 * Context: May be called from interrupt context 19121 */ 19122 19123 static void 19124 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19125 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19126 { 19127 ASSERT(un != NULL); 19128 ASSERT(mutex_owned(SD_MUTEX(un))); 19129 ASSERT(bp != NULL); 19130 ASSERT(xp != NULL); 19131 ASSERT(pktp != NULL); 19132 19133 /* 19134 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19135 * conflict could be due to various reasons like incorrect keys, not 19136 * registered or not reserved etc. So, we return EACCES to the caller. 19137 */ 19138 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19139 int cmd = SD_GET_PKT_OPCODE(pktp); 19140 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19141 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19142 sd_return_failed_command(un, bp, EACCES); 19143 return; 19144 } 19145 } 19146 19147 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19148 19149 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19150 if (sd_failfast_enable != 0) { 19151 /* By definition, we must panic here.... */ 19152 sd_panic_for_res_conflict(un); 19153 /*NOTREACHED*/ 19154 } 19155 SD_ERROR(SD_LOG_IO, un, 19156 "sd_handle_resv_conflict: Disk Reserved\n"); 19157 sd_return_failed_command(un, bp, EACCES); 19158 return; 19159 } 19160 19161 /* 19162 * 1147670: retry only if sd_retry_on_reservation_conflict 19163 * property is set (default is 1). Retries will not succeed 19164 * on a disk reserved by another initiator. HA systems 19165 * may reset this via sd.conf to avoid these retries. 19166 * 19167 * Note: The legacy return code for this failure is EIO, however EACCES 19168 * seems more appropriate for a reservation conflict. 19169 */ 19170 if (sd_retry_on_reservation_conflict == 0) { 19171 SD_ERROR(SD_LOG_IO, un, 19172 "sd_handle_resv_conflict: Device Reserved\n"); 19173 sd_return_failed_command(un, bp, EIO); 19174 return; 19175 } 19176 19177 /* 19178 * Retry the command if we can. 19179 * 19180 * Note: The legacy return code for this failure is EIO, however EACCES 19181 * seems more appropriate for a reservation conflict. 19182 */ 19183 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19184 (clock_t)2, NULL); 19185 } 19186 19187 19188 19189 /* 19190 * Function: sd_pkt_status_qfull 19191 * 19192 * Description: Handle a QUEUE FULL condition from the target. This can 19193 * occur if the HBA does not handle the queue full condition. 19194 * (Basically this means third-party HBAs as Sun HBAs will 19195 * handle the queue full condition.) Note that if there are 19196 * some commands already in the transport, then the queue full 19197 * has occurred because the queue for this nexus is actually 19198 * full. If there are no commands in the transport, then the 19199 * queue full is resulting from some other initiator or lun 19200 * consuming all the resources at the target. 19201 * 19202 * Context: May be called from interrupt context 19203 */ 19204 19205 static void 19206 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 19207 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19208 { 19209 ASSERT(un != NULL); 19210 ASSERT(mutex_owned(SD_MUTEX(un))); 19211 ASSERT(bp != NULL); 19212 ASSERT(xp != NULL); 19213 ASSERT(pktp != NULL); 19214 19215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19216 "sd_pkt_status_qfull: entry\n"); 19217 19218 /* 19219 * Just lower the QFULL throttle and retry the command. Note that 19220 * we do not limit the number of retries here. 19221 */ 19222 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19223 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19224 SD_RESTART_TIMEOUT, NULL); 19225 19226 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19227 "sd_pkt_status_qfull: exit\n"); 19228 } 19229 19230 19231 /* 19232 * Function: sd_reset_target 19233 * 19234 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19235 * RESET_TARGET, or RESET_ALL. 19236 * 19237 * Context: May be called under interrupt context. 19238 */ 19239 19240 static void 19241 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19242 { 19243 int rval = 0; 19244 19245 ASSERT(un != NULL); 19246 ASSERT(mutex_owned(SD_MUTEX(un))); 19247 ASSERT(pktp != NULL); 19248 19249 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19250 19251 /* 19252 * No need to reset if the transport layer has already done so. 19253 */ 19254 if ((pktp->pkt_statistics & 19255 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19256 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19257 "sd_reset_target: no reset\n"); 19258 return; 19259 } 19260 19261 mutex_exit(SD_MUTEX(un)); 19262 19263 if (un->un_f_allow_bus_device_reset == TRUE) { 19264 if (un->un_f_lun_reset_enabled == TRUE) { 19265 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19266 "sd_reset_target: RESET_LUN\n"); 19267 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19268 } 19269 if (rval == 0) { 19270 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19271 "sd_reset_target: RESET_TARGET\n"); 19272 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19273 } 19274 } 19275 19276 if (rval == 0) { 19277 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19278 "sd_reset_target: RESET_ALL\n"); 19279 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19280 } 19281 19282 mutex_enter(SD_MUTEX(un)); 19283 19284 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19285 } 19286 19287 /* 19288 * Function: sd_target_change_task 19289 * 19290 * Description: Handle dynamic target change 19291 * 19292 * Context: Executes in a taskq() thread context 19293 */ 19294 static void 19295 sd_target_change_task(void *arg) 19296 { 19297 struct sd_lun *un = arg; 19298 uint64_t capacity; 19299 diskaddr_t label_cap; 19300 uint_t lbasize; 19301 sd_ssc_t *ssc; 19302 19303 ASSERT(un != NULL); 19304 ASSERT(!mutex_owned(SD_MUTEX(un))); 19305 19306 if ((un->un_f_blockcount_is_valid == FALSE) || 19307 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19308 return; 19309 } 19310 19311 ssc = sd_ssc_init(un); 19312 19313 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19314 &lbasize, SD_PATH_DIRECT) != 0) { 19315 SD_ERROR(SD_LOG_ERROR, un, 19316 "sd_target_change_task: fail to read capacity\n"); 19317 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19318 goto task_exit; 19319 } 19320 19321 mutex_enter(SD_MUTEX(un)); 19322 if (capacity <= un->un_blockcount) { 19323 mutex_exit(SD_MUTEX(un)); 19324 goto task_exit; 19325 } 19326 19327 sd_update_block_info(un, lbasize, capacity); 19328 mutex_exit(SD_MUTEX(un)); 19329 19330 /* 19331 * If lun is EFI labeled and lun capacity is greater than the 19332 * capacity contained in the label, log a sys event. 19333 */ 19334 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19335 (void*)SD_PATH_DIRECT) == 0) { 19336 mutex_enter(SD_MUTEX(un)); 19337 if (un->un_f_blockcount_is_valid && 19338 un->un_blockcount > label_cap) { 19339 mutex_exit(SD_MUTEX(un)); 19340 sd_log_lun_expansion_event(un, KM_SLEEP); 19341 } else { 19342 mutex_exit(SD_MUTEX(un)); 19343 } 19344 } 19345 19346 task_exit: 19347 sd_ssc_fini(ssc); 19348 } 19349 19350 /* 19351 * Function: sd_log_lun_expansion_event 19352 * 19353 * Description: Log lun expansion sys event 19354 * 19355 * Context: Never called from interrupt context 19356 */ 19357 static void 19358 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19359 { 19360 int err; 19361 char *path; 19362 nvlist_t *dle_attr_list; 19363 19364 /* Allocate and build sysevent attribute list */ 19365 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19366 if (err != 0) { 19367 SD_ERROR(SD_LOG_ERROR, un, 19368 "sd_log_lun_expansion_event: fail to allocate space\n"); 19369 return; 19370 } 19371 19372 path = kmem_alloc(MAXPATHLEN, km_flag); 19373 if (path == NULL) { 19374 nvlist_free(dle_attr_list); 19375 SD_ERROR(SD_LOG_ERROR, un, 19376 "sd_log_lun_expansion_event: fail to allocate space\n"); 19377 return; 19378 } 19379 /* 19380 * Add path attribute to identify the lun. 19381 * We are using minor node 'a' as the sysevent attribute. 19382 */ 19383 (void) snprintf(path, MAXPATHLEN, "/devices"); 19384 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19385 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19386 ":a"); 19387 19388 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 19389 if (err != 0) { 19390 nvlist_free(dle_attr_list); 19391 kmem_free(path, MAXPATHLEN); 19392 SD_ERROR(SD_LOG_ERROR, un, 19393 "sd_log_lun_expansion_event: fail to add attribute\n"); 19394 return; 19395 } 19396 19397 /* Log dynamic lun expansion sysevent */ 19398 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19399 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 19400 if (err != DDI_SUCCESS) { 19401 SD_ERROR(SD_LOG_ERROR, un, 19402 "sd_log_lun_expansion_event: fail to log sysevent\n"); 19403 } 19404 19405 nvlist_free(dle_attr_list); 19406 kmem_free(path, MAXPATHLEN); 19407 } 19408 19409 /* 19410 * Function: sd_media_change_task 19411 * 19412 * Description: Recovery action for CDROM to become available. 19413 * 19414 * Context: Executes in a taskq() thread context 19415 */ 19416 19417 static void 19418 sd_media_change_task(void *arg) 19419 { 19420 struct scsi_pkt *pktp = arg; 19421 struct sd_lun *un; 19422 struct buf *bp; 19423 struct sd_xbuf *xp; 19424 int err = 0; 19425 int retry_count = 0; 19426 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19427 struct sd_sense_info si; 19428 19429 ASSERT(pktp != NULL); 19430 bp = (struct buf *)pktp->pkt_private; 19431 ASSERT(bp != NULL); 19432 xp = SD_GET_XBUF(bp); 19433 ASSERT(xp != NULL); 19434 un = SD_GET_UN(bp); 19435 ASSERT(un != NULL); 19436 ASSERT(!mutex_owned(SD_MUTEX(un))); 19437 ASSERT(un->un_f_monitor_media_state); 19438 19439 si.ssi_severity = SCSI_ERR_INFO; 19440 si.ssi_pfa_flag = FALSE; 19441 19442 /* 19443 * When a reset is issued on a CDROM, it takes a long time to 19444 * recover. First few attempts to read capacity and other things 19445 * related to handling unit attention fail (with a ASC 0x4 and 19446 * ASCQ 0x1). In that case we want to do enough retries and we want 19447 * to limit the retries in other cases of genuine failures like 19448 * no media in drive. 19449 */ 19450 while (retry_count++ < retry_limit) { 19451 if ((err = sd_handle_mchange(un)) == 0) { 19452 break; 19453 } 19454 if (err == EAGAIN) { 19455 retry_limit = SD_UNIT_ATTENTION_RETRY; 19456 } 19457 /* Sleep for 0.5 sec. & try again */ 19458 delay(drv_usectohz(500000)); 19459 } 19460 19461 /* 19462 * Dispatch (retry or fail) the original command here, 19463 * along with appropriate console messages.... 19464 * 19465 * Must grab the mutex before calling sd_retry_command, 19466 * sd_print_sense_msg and sd_return_failed_command. 19467 */ 19468 mutex_enter(SD_MUTEX(un)); 19469 if (err != SD_CMD_SUCCESS) { 19470 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19471 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19472 si.ssi_severity = SCSI_ERR_FATAL; 19473 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19474 sd_return_failed_command(un, bp, EIO); 19475 } else { 19476 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 19477 &si, EIO, (clock_t)0, NULL); 19478 } 19479 mutex_exit(SD_MUTEX(un)); 19480 } 19481 19482 19483 19484 /* 19485 * Function: sd_handle_mchange 19486 * 19487 * Description: Perform geometry validation & other recovery when CDROM 19488 * has been removed from drive. 19489 * 19490 * Return Code: 0 for success 19491 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19492 * sd_send_scsi_READ_CAPACITY() 19493 * 19494 * Context: Executes in a taskq() thread context 19495 */ 19496 19497 static int 19498 sd_handle_mchange(struct sd_lun *un) 19499 { 19500 uint64_t capacity; 19501 uint32_t lbasize; 19502 int rval; 19503 sd_ssc_t *ssc; 19504 19505 ASSERT(!mutex_owned(SD_MUTEX(un))); 19506 ASSERT(un->un_f_monitor_media_state); 19507 19508 ssc = sd_ssc_init(un); 19509 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19510 SD_PATH_DIRECT_PRIORITY); 19511 19512 if (rval != 0) 19513 goto failed; 19514 19515 mutex_enter(SD_MUTEX(un)); 19516 sd_update_block_info(un, lbasize, capacity); 19517 19518 if (un->un_errstats != NULL) { 19519 struct sd_errstats *stp = 19520 (struct sd_errstats *)un->un_errstats->ks_data; 19521 stp->sd_capacity.value.ui64 = (uint64_t) 19522 ((uint64_t)un->un_blockcount * 19523 (uint64_t)un->un_tgt_blocksize); 19524 } 19525 19526 /* 19527 * Check if the media in the device is writable or not 19528 */ 19529 if (ISCD(un)) { 19530 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19531 } 19532 19533 /* 19534 * Note: Maybe let the strategy/partitioning chain worry about getting 19535 * valid geometry. 19536 */ 19537 mutex_exit(SD_MUTEX(un)); 19538 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19539 19540 19541 if (cmlb_validate(un->un_cmlbhandle, 0, 19542 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19543 sd_ssc_fini(ssc); 19544 return (EIO); 19545 } else { 19546 if (un->un_f_pkstats_enabled) { 19547 sd_set_pstats(un); 19548 SD_TRACE(SD_LOG_IO_PARTITION, un, 19549 "sd_handle_mchange: un:0x%p pstats created and " 19550 "set\n", un); 19551 } 19552 } 19553 19554 /* 19555 * Try to lock the door 19556 */ 19557 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19558 SD_PATH_DIRECT_PRIORITY); 19559 failed: 19560 if (rval != 0) 19561 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19562 sd_ssc_fini(ssc); 19563 return (rval); 19564 } 19565 19566 19567 /* 19568 * Function: sd_send_scsi_DOORLOCK 19569 * 19570 * Description: Issue the scsi DOOR LOCK command 19571 * 19572 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19573 * structure for this target. 19574 * flag - SD_REMOVAL_ALLOW 19575 * SD_REMOVAL_PREVENT 19576 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19577 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19578 * to use the USCSI "direct" chain and bypass the normal 19579 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19580 * command is issued as part of an error recovery action. 19581 * 19582 * Return Code: 0 - Success 19583 * errno return code from sd_ssc_send() 19584 * 19585 * Context: Can sleep. 19586 */ 19587 19588 static int 19589 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19590 { 19591 struct scsi_extended_sense sense_buf; 19592 union scsi_cdb cdb; 19593 struct uscsi_cmd ucmd_buf; 19594 int status; 19595 struct sd_lun *un; 19596 19597 ASSERT(ssc != NULL); 19598 un = ssc->ssc_un; 19599 ASSERT(un != NULL); 19600 ASSERT(!mutex_owned(SD_MUTEX(un))); 19601 19602 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19603 19604 /* already determined doorlock is not supported, fake success */ 19605 if (un->un_f_doorlock_supported == FALSE) { 19606 return (0); 19607 } 19608 19609 /* 19610 * If we are ejecting and see an SD_REMOVAL_PREVENT 19611 * ignore the command so we can complete the eject 19612 * operation. 19613 */ 19614 if (flag == SD_REMOVAL_PREVENT) { 19615 mutex_enter(SD_MUTEX(un)); 19616 if (un->un_f_ejecting == TRUE) { 19617 mutex_exit(SD_MUTEX(un)); 19618 return (EAGAIN); 19619 } 19620 mutex_exit(SD_MUTEX(un)); 19621 } 19622 19623 bzero(&cdb, sizeof (cdb)); 19624 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19625 19626 cdb.scc_cmd = SCMD_DOORLOCK; 19627 cdb.cdb_opaque[4] = (uchar_t)flag; 19628 19629 ucmd_buf.uscsi_cdb = (char *)&cdb; 19630 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19631 ucmd_buf.uscsi_bufaddr = NULL; 19632 ucmd_buf.uscsi_buflen = 0; 19633 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19634 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19635 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19636 ucmd_buf.uscsi_timeout = 15; 19637 19638 SD_TRACE(SD_LOG_IO, un, 19639 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19640 19641 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19642 UIO_SYSSPACE, path_flag); 19643 19644 if (status == 0) 19645 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19646 19647 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19648 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19649 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19650 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19651 19652 /* fake success and skip subsequent doorlock commands */ 19653 un->un_f_doorlock_supported = FALSE; 19654 return (0); 19655 } 19656 19657 return (status); 19658 } 19659 19660 /* 19661 * Function: sd_send_scsi_READ_CAPACITY 19662 * 19663 * Description: This routine uses the scsi READ CAPACITY command to determine 19664 * the device capacity in number of blocks and the device native 19665 * block size. If this function returns a failure, then the 19666 * values in *capp and *lbap are undefined. If the capacity 19667 * returned is 0xffffffff then the lun is too large for a 19668 * normal READ CAPACITY command and the results of a 19669 * READ CAPACITY 16 will be used instead. 19670 * 19671 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19672 * capp - ptr to unsigned 64-bit variable to receive the 19673 * capacity value from the command. 19674 * lbap - ptr to unsigned 32-bit varaible to receive the 19675 * block size value from the command 19676 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19677 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19678 * to use the USCSI "direct" chain and bypass the normal 19679 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19680 * command is issued as part of an error recovery action. 19681 * 19682 * Return Code: 0 - Success 19683 * EIO - IO error 19684 * EACCES - Reservation conflict detected 19685 * EAGAIN - Device is becoming ready 19686 * errno return code from sd_ssc_send() 19687 * 19688 * Context: Can sleep. Blocks until command completes. 19689 */ 19690 19691 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19692 19693 static int 19694 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19695 int path_flag) 19696 { 19697 struct scsi_extended_sense sense_buf; 19698 struct uscsi_cmd ucmd_buf; 19699 union scsi_cdb cdb; 19700 uint32_t *capacity_buf; 19701 uint64_t capacity; 19702 uint32_t lbasize; 19703 uint32_t pbsize; 19704 int status; 19705 struct sd_lun *un; 19706 19707 ASSERT(ssc != NULL); 19708 19709 un = ssc->ssc_un; 19710 ASSERT(un != NULL); 19711 ASSERT(!mutex_owned(SD_MUTEX(un))); 19712 ASSERT(capp != NULL); 19713 ASSERT(lbap != NULL); 19714 19715 SD_TRACE(SD_LOG_IO, un, 19716 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19717 19718 /* 19719 * First send a READ_CAPACITY command to the target. 19720 * (This command is mandatory under SCSI-2.) 19721 * 19722 * Set up the CDB for the READ_CAPACITY command. The Partial 19723 * Medium Indicator bit is cleared. The address field must be 19724 * zero if the PMI bit is zero. 19725 */ 19726 bzero(&cdb, sizeof (cdb)); 19727 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19728 19729 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19730 19731 cdb.scc_cmd = SCMD_READ_CAPACITY; 19732 19733 ucmd_buf.uscsi_cdb = (char *)&cdb; 19734 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19735 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19736 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19737 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19738 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19739 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19740 ucmd_buf.uscsi_timeout = 60; 19741 19742 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19743 UIO_SYSSPACE, path_flag); 19744 19745 switch (status) { 19746 case 0: 19747 /* Return failure if we did not get valid capacity data. */ 19748 if (ucmd_buf.uscsi_resid != 0) { 19749 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19750 "sd_send_scsi_READ_CAPACITY received invalid " 19751 "capacity data"); 19752 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19753 return (EIO); 19754 } 19755 /* 19756 * Read capacity and block size from the READ CAPACITY 10 data. 19757 * This data may be adjusted later due to device specific 19758 * issues. 19759 * 19760 * According to the SCSI spec, the READ CAPACITY 10 19761 * command returns the following: 19762 * 19763 * bytes 0-3: Maximum logical block address available. 19764 * (MSB in byte:0 & LSB in byte:3) 19765 * 19766 * bytes 4-7: Block length in bytes 19767 * (MSB in byte:4 & LSB in byte:7) 19768 * 19769 */ 19770 capacity = BE_32(capacity_buf[0]); 19771 lbasize = BE_32(capacity_buf[1]); 19772 19773 /* 19774 * Done with capacity_buf 19775 */ 19776 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19777 19778 /* 19779 * if the reported capacity is set to all 0xf's, then 19780 * this disk is too large and requires SBC-2 commands. 19781 * Reissue the request using READ CAPACITY 16. 19782 */ 19783 if (capacity == 0xffffffff) { 19784 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19785 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19786 &lbasize, &pbsize, path_flag); 19787 if (status != 0) { 19788 return (status); 19789 } 19790 } 19791 break; /* Success! */ 19792 case EIO: 19793 switch (ucmd_buf.uscsi_status) { 19794 case STATUS_RESERVATION_CONFLICT: 19795 status = EACCES; 19796 break; 19797 case STATUS_CHECK: 19798 /* 19799 * Check condition; look for ASC/ASCQ of 0x04/0x01 19800 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19801 */ 19802 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19803 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19804 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19805 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19806 return (EAGAIN); 19807 } 19808 break; 19809 default: 19810 break; 19811 } 19812 /* FALLTHRU */ 19813 default: 19814 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19815 return (status); 19816 } 19817 19818 /* 19819 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19820 * (2352 and 0 are common) so for these devices always force the value 19821 * to 2048 as required by the ATAPI specs. 19822 */ 19823 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19824 lbasize = 2048; 19825 } 19826 19827 /* 19828 * Get the maximum LBA value from the READ CAPACITY data. 19829 * Here we assume that the Partial Medium Indicator (PMI) bit 19830 * was cleared when issuing the command. This means that the LBA 19831 * returned from the device is the LBA of the last logical block 19832 * on the logical unit. The actual logical block count will be 19833 * this value plus one. 19834 * 19835 * Currently, for removable media, the capacity is saved in terms 19836 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 19837 */ 19838 if (un->un_f_has_removable_media) 19839 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19840 19841 /* 19842 * Copy the values from the READ CAPACITY command into the space 19843 * provided by the caller. 19844 */ 19845 *capp = capacity; 19846 *lbap = lbasize; 19847 19848 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19849 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19850 19851 /* 19852 * Both the lbasize and capacity from the device must be nonzero, 19853 * otherwise we assume that the values are not valid and return 19854 * failure to the caller. (4203735) 19855 */ 19856 if ((capacity == 0) || (lbasize == 0)) { 19857 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19858 "sd_send_scsi_READ_CAPACITY received invalid value " 19859 "capacity %llu lbasize %d", capacity, lbasize); 19860 return (EIO); 19861 } 19862 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19863 return (0); 19864 } 19865 19866 /* 19867 * Function: sd_send_scsi_READ_CAPACITY_16 19868 * 19869 * Description: This routine uses the scsi READ CAPACITY 16 command to 19870 * determine the device capacity in number of blocks and the 19871 * device native block size. If this function returns a failure, 19872 * then the values in *capp and *lbap are undefined. 19873 * This routine should be called by sd_send_scsi_READ_CAPACITY 19874 * which will apply any device specific adjustments to capacity 19875 * and lbasize. One exception is it is also called by 19876 * sd_get_media_info_ext. In that function, there is no need to 19877 * adjust the capacity and lbasize. 19878 * 19879 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19880 * capp - ptr to unsigned 64-bit variable to receive the 19881 * capacity value from the command. 19882 * lbap - ptr to unsigned 32-bit varaible to receive the 19883 * block size value from the command 19884 * psp - ptr to unsigned 32-bit variable to receive the 19885 * physical block size value from the command 19886 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19887 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19888 * to use the USCSI "direct" chain and bypass the normal 19889 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19890 * this command is issued as part of an error recovery 19891 * action. 19892 * 19893 * Return Code: 0 - Success 19894 * EIO - IO error 19895 * EACCES - Reservation conflict detected 19896 * EAGAIN - Device is becoming ready 19897 * errno return code from sd_ssc_send() 19898 * 19899 * Context: Can sleep. Blocks until command completes. 19900 */ 19901 19902 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19903 19904 static int 19905 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19906 uint32_t *lbap, uint32_t *psp, int path_flag) 19907 { 19908 struct scsi_extended_sense sense_buf; 19909 struct uscsi_cmd ucmd_buf; 19910 union scsi_cdb cdb; 19911 uint64_t *capacity16_buf; 19912 uint64_t capacity; 19913 uint32_t lbasize; 19914 uint32_t pbsize; 19915 uint32_t lbpb_exp; 19916 int status; 19917 struct sd_lun *un; 19918 19919 ASSERT(ssc != NULL); 19920 19921 un = ssc->ssc_un; 19922 ASSERT(un != NULL); 19923 ASSERT(!mutex_owned(SD_MUTEX(un))); 19924 ASSERT(capp != NULL); 19925 ASSERT(lbap != NULL); 19926 19927 SD_TRACE(SD_LOG_IO, un, 19928 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19929 19930 /* 19931 * First send a READ_CAPACITY_16 command to the target. 19932 * 19933 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19934 * Medium Indicator bit is cleared. The address field must be 19935 * zero if the PMI bit is zero. 19936 */ 19937 bzero(&cdb, sizeof (cdb)); 19938 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19939 19940 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19941 19942 ucmd_buf.uscsi_cdb = (char *)&cdb; 19943 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19944 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19945 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19946 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19947 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19948 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19949 ucmd_buf.uscsi_timeout = 60; 19950 19951 /* 19952 * Read Capacity (16) is a Service Action In command. One 19953 * command byte (0x9E) is overloaded for multiple operations, 19954 * with the second CDB byte specifying the desired operation 19955 */ 19956 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19957 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19958 19959 /* 19960 * Fill in allocation length field 19961 */ 19962 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19963 19964 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19965 UIO_SYSSPACE, path_flag); 19966 19967 switch (status) { 19968 case 0: 19969 /* Return failure if we did not get valid capacity data. */ 19970 if (ucmd_buf.uscsi_resid > 20) { 19971 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19972 "sd_send_scsi_READ_CAPACITY_16 received invalid " 19973 "capacity data"); 19974 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19975 return (EIO); 19976 } 19977 19978 /* 19979 * Read capacity and block size from the READ CAPACITY 10 data. 19980 * This data may be adjusted later due to device specific 19981 * issues. 19982 * 19983 * According to the SCSI spec, the READ CAPACITY 10 19984 * command returns the following: 19985 * 19986 * bytes 0-7: Maximum logical block address available. 19987 * (MSB in byte:0 & LSB in byte:7) 19988 * 19989 * bytes 8-11: Block length in bytes 19990 * (MSB in byte:8 & LSB in byte:11) 19991 * 19992 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 19993 */ 19994 capacity = BE_64(capacity16_buf[0]); 19995 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19996 lbpb_exp = (BE_64(capacity16_buf[1]) >> 40) & 0x0f; 19997 19998 pbsize = lbasize << lbpb_exp; 19999 20000 /* 20001 * Done with capacity16_buf 20002 */ 20003 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20004 20005 /* 20006 * if the reported capacity is set to all 0xf's, then 20007 * this disk is too large. This could only happen with 20008 * a device that supports LBAs larger than 64 bits which 20009 * are not defined by any current T10 standards. 20010 */ 20011 if (capacity == 0xffffffffffffffff) { 20012 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20013 "disk is too large"); 20014 return (EIO); 20015 } 20016 break; /* Success! */ 20017 case EIO: 20018 switch (ucmd_buf.uscsi_status) { 20019 case STATUS_RESERVATION_CONFLICT: 20020 status = EACCES; 20021 break; 20022 case STATUS_CHECK: 20023 /* 20024 * Check condition; look for ASC/ASCQ of 0x04/0x01 20025 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20026 */ 20027 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20028 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20029 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20030 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20031 return (EAGAIN); 20032 } 20033 break; 20034 default: 20035 break; 20036 } 20037 /* FALLTHRU */ 20038 default: 20039 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20040 return (status); 20041 } 20042 20043 *capp = capacity; 20044 *lbap = lbasize; 20045 *psp = pbsize; 20046 20047 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20048 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20049 capacity, lbasize, pbsize); 20050 20051 return (0); 20052 } 20053 20054 20055 /* 20056 * Function: sd_send_scsi_START_STOP_UNIT 20057 * 20058 * Description: Issue a scsi START STOP UNIT command to the target. 20059 * 20060 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20061 * structure for this target. 20062 * flag - SD_TARGET_START 20063 * SD_TARGET_STOP 20064 * SD_TARGET_EJECT 20065 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20066 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20067 * to use the USCSI "direct" chain and bypass the normal 20068 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20069 * command is issued as part of an error recovery action. 20070 * 20071 * Return Code: 0 - Success 20072 * EIO - IO error 20073 * EACCES - Reservation conflict detected 20074 * ENXIO - Not Ready, medium not present 20075 * errno return code from sd_ssc_send() 20076 * 20077 * Context: Can sleep. 20078 */ 20079 20080 static int 20081 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 20082 { 20083 struct scsi_extended_sense sense_buf; 20084 union scsi_cdb cdb; 20085 struct uscsi_cmd ucmd_buf; 20086 int status; 20087 struct sd_lun *un; 20088 20089 ASSERT(ssc != NULL); 20090 un = ssc->ssc_un; 20091 ASSERT(un != NULL); 20092 ASSERT(!mutex_owned(SD_MUTEX(un))); 20093 20094 SD_TRACE(SD_LOG_IO, un, 20095 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20096 20097 if (un->un_f_check_start_stop && 20098 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20099 (un->un_f_start_stop_supported != TRUE)) { 20100 return (0); 20101 } 20102 20103 /* 20104 * If we are performing an eject operation and 20105 * we receive any command other than SD_TARGET_EJECT 20106 * we should immediately return. 20107 */ 20108 if (flag != SD_TARGET_EJECT) { 20109 mutex_enter(SD_MUTEX(un)); 20110 if (un->un_f_ejecting == TRUE) { 20111 mutex_exit(SD_MUTEX(un)); 20112 return (EAGAIN); 20113 } 20114 mutex_exit(SD_MUTEX(un)); 20115 } 20116 20117 bzero(&cdb, sizeof (cdb)); 20118 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20119 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20120 20121 cdb.scc_cmd = SCMD_START_STOP; 20122 cdb.cdb_opaque[4] = (uchar_t)flag; 20123 20124 ucmd_buf.uscsi_cdb = (char *)&cdb; 20125 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20126 ucmd_buf.uscsi_bufaddr = NULL; 20127 ucmd_buf.uscsi_buflen = 0; 20128 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20129 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20130 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20131 ucmd_buf.uscsi_timeout = 200; 20132 20133 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20134 UIO_SYSSPACE, path_flag); 20135 20136 switch (status) { 20137 case 0: 20138 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20139 break; /* Success! */ 20140 case EIO: 20141 switch (ucmd_buf.uscsi_status) { 20142 case STATUS_RESERVATION_CONFLICT: 20143 status = EACCES; 20144 break; 20145 case STATUS_CHECK: 20146 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20147 switch (scsi_sense_key( 20148 (uint8_t *)&sense_buf)) { 20149 case KEY_ILLEGAL_REQUEST: 20150 status = ENOTSUP; 20151 break; 20152 case KEY_NOT_READY: 20153 if (scsi_sense_asc( 20154 (uint8_t *)&sense_buf) 20155 == 0x3A) { 20156 status = ENXIO; 20157 } 20158 break; 20159 default: 20160 break; 20161 } 20162 } 20163 break; 20164 default: 20165 break; 20166 } 20167 break; 20168 default: 20169 break; 20170 } 20171 20172 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20173 20174 return (status); 20175 } 20176 20177 20178 /* 20179 * Function: sd_start_stop_unit_callback 20180 * 20181 * Description: timeout(9F) callback to begin recovery process for a 20182 * device that has spun down. 20183 * 20184 * Arguments: arg - pointer to associated softstate struct. 20185 * 20186 * Context: Executes in a timeout(9F) thread context 20187 */ 20188 20189 static void 20190 sd_start_stop_unit_callback(void *arg) 20191 { 20192 struct sd_lun *un = arg; 20193 ASSERT(un != NULL); 20194 ASSERT(!mutex_owned(SD_MUTEX(un))); 20195 20196 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20197 20198 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20199 } 20200 20201 20202 /* 20203 * Function: sd_start_stop_unit_task 20204 * 20205 * Description: Recovery procedure when a drive is spun down. 20206 * 20207 * Arguments: arg - pointer to associated softstate struct. 20208 * 20209 * Context: Executes in a taskq() thread context 20210 */ 20211 20212 static void 20213 sd_start_stop_unit_task(void *arg) 20214 { 20215 struct sd_lun *un = arg; 20216 sd_ssc_t *ssc; 20217 int rval; 20218 20219 ASSERT(un != NULL); 20220 ASSERT(!mutex_owned(SD_MUTEX(un))); 20221 20222 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20223 20224 /* 20225 * Some unformatted drives report not ready error, no need to 20226 * restart if format has been initiated. 20227 */ 20228 mutex_enter(SD_MUTEX(un)); 20229 if (un->un_f_format_in_progress == TRUE) { 20230 mutex_exit(SD_MUTEX(un)); 20231 return; 20232 } 20233 mutex_exit(SD_MUTEX(un)); 20234 20235 /* 20236 * When a START STOP command is issued from here, it is part of a 20237 * failure recovery operation and must be issued before any other 20238 * commands, including any pending retries. Thus it must be sent 20239 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20240 * succeeds or not, we will start I/O after the attempt. 20241 */ 20242 ssc = sd_ssc_init(un); 20243 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 20244 SD_PATH_DIRECT_PRIORITY); 20245 if (rval != 0) 20246 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20247 sd_ssc_fini(ssc); 20248 /* 20249 * The above call blocks until the START_STOP_UNIT command completes. 20250 * Now that it has completed, we must re-try the original IO that 20251 * received the NOT READY condition in the first place. There are 20252 * three possible conditions here: 20253 * 20254 * (1) The original IO is on un_retry_bp. 20255 * (2) The original IO is on the regular wait queue, and un_retry_bp 20256 * is NULL. 20257 * (3) The original IO is on the regular wait queue, and un_retry_bp 20258 * points to some other, unrelated bp. 20259 * 20260 * For each case, we must call sd_start_cmds() with un_retry_bp 20261 * as the argument. If un_retry_bp is NULL, this will initiate 20262 * processing of the regular wait queue. If un_retry_bp is not NULL, 20263 * then this will process the bp on un_retry_bp. That may or may not 20264 * be the original IO, but that does not matter: the important thing 20265 * is to keep the IO processing going at this point. 20266 * 20267 * Note: This is a very specific error recovery sequence associated 20268 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20269 * serialize the I/O with completion of the spin-up. 20270 */ 20271 mutex_enter(SD_MUTEX(un)); 20272 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20273 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20274 un, un->un_retry_bp); 20275 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20276 sd_start_cmds(un, un->un_retry_bp); 20277 mutex_exit(SD_MUTEX(un)); 20278 20279 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20280 } 20281 20282 20283 /* 20284 * Function: sd_send_scsi_INQUIRY 20285 * 20286 * Description: Issue the scsi INQUIRY command. 20287 * 20288 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20289 * structure for this target. 20290 * bufaddr 20291 * buflen 20292 * evpd 20293 * page_code 20294 * page_length 20295 * 20296 * Return Code: 0 - Success 20297 * errno return code from sd_ssc_send() 20298 * 20299 * Context: Can sleep. Does not return until command is completed. 20300 */ 20301 20302 static int 20303 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20304 uchar_t evpd, uchar_t page_code, size_t *residp) 20305 { 20306 union scsi_cdb cdb; 20307 struct uscsi_cmd ucmd_buf; 20308 int status; 20309 struct sd_lun *un; 20310 20311 ASSERT(ssc != NULL); 20312 un = ssc->ssc_un; 20313 ASSERT(un != NULL); 20314 ASSERT(!mutex_owned(SD_MUTEX(un))); 20315 ASSERT(bufaddr != NULL); 20316 20317 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20318 20319 bzero(&cdb, sizeof (cdb)); 20320 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20321 bzero(bufaddr, buflen); 20322 20323 cdb.scc_cmd = SCMD_INQUIRY; 20324 cdb.cdb_opaque[1] = evpd; 20325 cdb.cdb_opaque[2] = page_code; 20326 FORMG0COUNT(&cdb, buflen); 20327 20328 ucmd_buf.uscsi_cdb = (char *)&cdb; 20329 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20330 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20331 ucmd_buf.uscsi_buflen = buflen; 20332 ucmd_buf.uscsi_rqbuf = NULL; 20333 ucmd_buf.uscsi_rqlen = 0; 20334 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20335 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20336 20337 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20338 UIO_SYSSPACE, SD_PATH_DIRECT); 20339 20340 /* 20341 * Only handle status == 0, the upper-level caller 20342 * will put different assessment based on the context. 20343 */ 20344 if (status == 0) 20345 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20346 20347 if ((status == 0) && (residp != NULL)) { 20348 *residp = ucmd_buf.uscsi_resid; 20349 } 20350 20351 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20352 20353 return (status); 20354 } 20355 20356 20357 /* 20358 * Function: sd_send_scsi_TEST_UNIT_READY 20359 * 20360 * Description: Issue the scsi TEST UNIT READY command. 20361 * This routine can be told to set the flag USCSI_DIAGNOSE to 20362 * prevent retrying failed commands. Use this when the intent 20363 * is either to check for device readiness, to clear a Unit 20364 * Attention, or to clear any outstanding sense data. 20365 * However under specific conditions the expected behavior 20366 * is for retries to bring a device ready, so use the flag 20367 * with caution. 20368 * 20369 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20370 * structure for this target. 20371 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20372 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20373 * 0: dont check for media present, do retries on cmd. 20374 * 20375 * Return Code: 0 - Success 20376 * EIO - IO error 20377 * EACCES - Reservation conflict detected 20378 * ENXIO - Not Ready, medium not present 20379 * errno return code from sd_ssc_send() 20380 * 20381 * Context: Can sleep. Does not return until command is completed. 20382 */ 20383 20384 static int 20385 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20386 { 20387 struct scsi_extended_sense sense_buf; 20388 union scsi_cdb cdb; 20389 struct uscsi_cmd ucmd_buf; 20390 int status; 20391 struct sd_lun *un; 20392 20393 ASSERT(ssc != NULL); 20394 un = ssc->ssc_un; 20395 ASSERT(un != NULL); 20396 ASSERT(!mutex_owned(SD_MUTEX(un))); 20397 20398 SD_TRACE(SD_LOG_IO, un, 20399 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20400 20401 /* 20402 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20403 * timeouts when they receive a TUR and the queue is not empty. Check 20404 * the configuration flag set during attach (indicating the drive has 20405 * this firmware bug) and un_ncmds_in_transport before issuing the 20406 * TUR. If there are 20407 * pending commands return success, this is a bit arbitrary but is ok 20408 * for non-removables (i.e. the eliteI disks) and non-clustering 20409 * configurations. 20410 */ 20411 if (un->un_f_cfg_tur_check == TRUE) { 20412 mutex_enter(SD_MUTEX(un)); 20413 if (un->un_ncmds_in_transport != 0) { 20414 mutex_exit(SD_MUTEX(un)); 20415 return (0); 20416 } 20417 mutex_exit(SD_MUTEX(un)); 20418 } 20419 20420 bzero(&cdb, sizeof (cdb)); 20421 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20422 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20423 20424 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20425 20426 ucmd_buf.uscsi_cdb = (char *)&cdb; 20427 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20428 ucmd_buf.uscsi_bufaddr = NULL; 20429 ucmd_buf.uscsi_buflen = 0; 20430 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20431 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20432 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20433 20434 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20435 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20436 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20437 } 20438 ucmd_buf.uscsi_timeout = 60; 20439 20440 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20441 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20442 SD_PATH_STANDARD)); 20443 20444 switch (status) { 20445 case 0: 20446 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20447 break; /* Success! */ 20448 case EIO: 20449 switch (ucmd_buf.uscsi_status) { 20450 case STATUS_RESERVATION_CONFLICT: 20451 status = EACCES; 20452 break; 20453 case STATUS_CHECK: 20454 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20455 break; 20456 } 20457 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20458 (scsi_sense_key((uint8_t *)&sense_buf) == 20459 KEY_NOT_READY) && 20460 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20461 status = ENXIO; 20462 } 20463 break; 20464 default: 20465 break; 20466 } 20467 break; 20468 default: 20469 break; 20470 } 20471 20472 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20473 20474 return (status); 20475 } 20476 20477 /* 20478 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20479 * 20480 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20481 * 20482 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20483 * structure for this target. 20484 * 20485 * Return Code: 0 - Success 20486 * EACCES 20487 * ENOTSUP 20488 * errno return code from sd_ssc_send() 20489 * 20490 * Context: Can sleep. Does not return until command is completed. 20491 */ 20492 20493 static int 20494 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20495 uint16_t data_len, uchar_t *data_bufp) 20496 { 20497 struct scsi_extended_sense sense_buf; 20498 union scsi_cdb cdb; 20499 struct uscsi_cmd ucmd_buf; 20500 int status; 20501 int no_caller_buf = FALSE; 20502 struct sd_lun *un; 20503 20504 ASSERT(ssc != NULL); 20505 un = ssc->ssc_un; 20506 ASSERT(un != NULL); 20507 ASSERT(!mutex_owned(SD_MUTEX(un))); 20508 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20509 20510 SD_TRACE(SD_LOG_IO, un, 20511 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20512 20513 bzero(&cdb, sizeof (cdb)); 20514 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20515 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20516 if (data_bufp == NULL) { 20517 /* Allocate a default buf if the caller did not give one */ 20518 ASSERT(data_len == 0); 20519 data_len = MHIOC_RESV_KEY_SIZE; 20520 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20521 no_caller_buf = TRUE; 20522 } 20523 20524 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20525 cdb.cdb_opaque[1] = usr_cmd; 20526 FORMG1COUNT(&cdb, data_len); 20527 20528 ucmd_buf.uscsi_cdb = (char *)&cdb; 20529 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20530 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20531 ucmd_buf.uscsi_buflen = data_len; 20532 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20533 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20534 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20535 ucmd_buf.uscsi_timeout = 60; 20536 20537 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20538 UIO_SYSSPACE, SD_PATH_STANDARD); 20539 20540 switch (status) { 20541 case 0: 20542 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20543 20544 break; /* Success! */ 20545 case EIO: 20546 switch (ucmd_buf.uscsi_status) { 20547 case STATUS_RESERVATION_CONFLICT: 20548 status = EACCES; 20549 break; 20550 case STATUS_CHECK: 20551 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20552 (scsi_sense_key((uint8_t *)&sense_buf) == 20553 KEY_ILLEGAL_REQUEST)) { 20554 status = ENOTSUP; 20555 } 20556 break; 20557 default: 20558 break; 20559 } 20560 break; 20561 default: 20562 break; 20563 } 20564 20565 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20566 20567 if (no_caller_buf == TRUE) { 20568 kmem_free(data_bufp, data_len); 20569 } 20570 20571 return (status); 20572 } 20573 20574 20575 /* 20576 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20577 * 20578 * Description: This routine is the driver entry point for handling CD-ROM 20579 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20580 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20581 * device. 20582 * 20583 * Arguments: ssc - ssc contains un - pointer to soft state struct 20584 * for the target. 20585 * usr_cmd SCSI-3 reservation facility command (one of 20586 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20587 * SD_SCSI3_PREEMPTANDABORT) 20588 * usr_bufp - user provided pointer register, reserve descriptor or 20589 * preempt and abort structure (mhioc_register_t, 20590 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20591 * 20592 * Return Code: 0 - Success 20593 * EACCES 20594 * ENOTSUP 20595 * errno return code from sd_ssc_send() 20596 * 20597 * Context: Can sleep. Does not return until command is completed. 20598 */ 20599 20600 static int 20601 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20602 uchar_t *usr_bufp) 20603 { 20604 struct scsi_extended_sense sense_buf; 20605 union scsi_cdb cdb; 20606 struct uscsi_cmd ucmd_buf; 20607 int status; 20608 uchar_t data_len = sizeof (sd_prout_t); 20609 sd_prout_t *prp; 20610 struct sd_lun *un; 20611 20612 ASSERT(ssc != NULL); 20613 un = ssc->ssc_un; 20614 ASSERT(un != NULL); 20615 ASSERT(!mutex_owned(SD_MUTEX(un))); 20616 ASSERT(data_len == 24); /* required by scsi spec */ 20617 20618 SD_TRACE(SD_LOG_IO, un, 20619 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20620 20621 if (usr_bufp == NULL) { 20622 return (EINVAL); 20623 } 20624 20625 bzero(&cdb, sizeof (cdb)); 20626 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20627 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20628 prp = kmem_zalloc(data_len, KM_SLEEP); 20629 20630 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20631 cdb.cdb_opaque[1] = usr_cmd; 20632 FORMG1COUNT(&cdb, data_len); 20633 20634 ucmd_buf.uscsi_cdb = (char *)&cdb; 20635 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20636 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20637 ucmd_buf.uscsi_buflen = data_len; 20638 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20639 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20640 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20641 ucmd_buf.uscsi_timeout = 60; 20642 20643 switch (usr_cmd) { 20644 case SD_SCSI3_REGISTER: { 20645 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20646 20647 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20648 bcopy(ptr->newkey.key, prp->service_key, 20649 MHIOC_RESV_KEY_SIZE); 20650 prp->aptpl = ptr->aptpl; 20651 break; 20652 } 20653 case SD_SCSI3_RESERVE: 20654 case SD_SCSI3_RELEASE: { 20655 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20656 20657 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20658 prp->scope_address = BE_32(ptr->scope_specific_addr); 20659 cdb.cdb_opaque[2] = ptr->type; 20660 break; 20661 } 20662 case SD_SCSI3_PREEMPTANDABORT: { 20663 mhioc_preemptandabort_t *ptr = 20664 (mhioc_preemptandabort_t *)usr_bufp; 20665 20666 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20667 bcopy(ptr->victim_key.key, prp->service_key, 20668 MHIOC_RESV_KEY_SIZE); 20669 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20670 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20671 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20672 break; 20673 } 20674 case SD_SCSI3_REGISTERANDIGNOREKEY: 20675 { 20676 mhioc_registerandignorekey_t *ptr; 20677 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20678 bcopy(ptr->newkey.key, 20679 prp->service_key, MHIOC_RESV_KEY_SIZE); 20680 prp->aptpl = ptr->aptpl; 20681 break; 20682 } 20683 default: 20684 ASSERT(FALSE); 20685 break; 20686 } 20687 20688 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20689 UIO_SYSSPACE, SD_PATH_STANDARD); 20690 20691 switch (status) { 20692 case 0: 20693 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20694 break; /* Success! */ 20695 case EIO: 20696 switch (ucmd_buf.uscsi_status) { 20697 case STATUS_RESERVATION_CONFLICT: 20698 status = EACCES; 20699 break; 20700 case STATUS_CHECK: 20701 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20702 (scsi_sense_key((uint8_t *)&sense_buf) == 20703 KEY_ILLEGAL_REQUEST)) { 20704 status = ENOTSUP; 20705 } 20706 break; 20707 default: 20708 break; 20709 } 20710 break; 20711 default: 20712 break; 20713 } 20714 20715 kmem_free(prp, data_len); 20716 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20717 return (status); 20718 } 20719 20720 20721 /* 20722 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20723 * 20724 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20725 * 20726 * Arguments: un - pointer to the target's soft state struct 20727 * dkc - pointer to the callback structure 20728 * 20729 * Return Code: 0 - success 20730 * errno-type error code 20731 * 20732 * Context: kernel thread context only. 20733 * 20734 * _______________________________________________________________ 20735 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20736 * |FLUSH_VOLATILE| | operation | 20737 * |______________|______________|_________________________________| 20738 * | 0 | NULL | Synchronous flush on both | 20739 * | | | volatile and non-volatile cache | 20740 * |______________|______________|_________________________________| 20741 * | 1 | NULL | Synchronous flush on volatile | 20742 * | | | cache; disk drivers may suppress| 20743 * | | | flush if disk table indicates | 20744 * | | | non-volatile cache | 20745 * |______________|______________|_________________________________| 20746 * | 0 | !NULL | Asynchronous flush on both | 20747 * | | | volatile and non-volatile cache;| 20748 * |______________|______________|_________________________________| 20749 * | 1 | !NULL | Asynchronous flush on volatile | 20750 * | | | cache; disk drivers may suppress| 20751 * | | | flush if disk table indicates | 20752 * | | | non-volatile cache | 20753 * |______________|______________|_________________________________| 20754 * 20755 */ 20756 20757 static int 20758 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20759 { 20760 struct sd_uscsi_info *uip; 20761 struct uscsi_cmd *uscmd; 20762 union scsi_cdb *cdb; 20763 struct buf *bp; 20764 int rval = 0; 20765 int is_async; 20766 20767 SD_TRACE(SD_LOG_IO, un, 20768 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20769 20770 ASSERT(un != NULL); 20771 ASSERT(!mutex_owned(SD_MUTEX(un))); 20772 20773 if (dkc == NULL || dkc->dkc_callback == NULL) { 20774 is_async = FALSE; 20775 } else { 20776 is_async = TRUE; 20777 } 20778 20779 mutex_enter(SD_MUTEX(un)); 20780 /* check whether cache flush should be suppressed */ 20781 if (un->un_f_suppress_cache_flush == TRUE) { 20782 mutex_exit(SD_MUTEX(un)); 20783 /* 20784 * suppress the cache flush if the device is told to do 20785 * so by sd.conf or disk table 20786 */ 20787 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20788 skip the cache flush since suppress_cache_flush is %d!\n", 20789 un->un_f_suppress_cache_flush); 20790 20791 if (is_async == TRUE) { 20792 /* invoke callback for asynchronous flush */ 20793 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20794 } 20795 return (rval); 20796 } 20797 mutex_exit(SD_MUTEX(un)); 20798 20799 /* 20800 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20801 * set properly 20802 */ 20803 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20804 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20805 20806 mutex_enter(SD_MUTEX(un)); 20807 if (dkc != NULL && un->un_f_sync_nv_supported && 20808 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20809 /* 20810 * if the device supports SYNC_NV bit, turn on 20811 * the SYNC_NV bit to only flush volatile cache 20812 */ 20813 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20814 } 20815 mutex_exit(SD_MUTEX(un)); 20816 20817 /* 20818 * First get some memory for the uscsi_cmd struct and cdb 20819 * and initialize for SYNCHRONIZE_CACHE cmd. 20820 */ 20821 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20822 uscmd->uscsi_cdblen = CDB_GROUP1; 20823 uscmd->uscsi_cdb = (caddr_t)cdb; 20824 uscmd->uscsi_bufaddr = NULL; 20825 uscmd->uscsi_buflen = 0; 20826 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20827 uscmd->uscsi_rqlen = SENSE_LENGTH; 20828 uscmd->uscsi_rqresid = SENSE_LENGTH; 20829 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20830 uscmd->uscsi_timeout = sd_io_time; 20831 20832 /* 20833 * Allocate an sd_uscsi_info struct and fill it with the info 20834 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20835 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20836 * since we allocate the buf here in this function, we do not 20837 * need to preserve the prior contents of b_private. 20838 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20839 */ 20840 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20841 uip->ui_flags = SD_PATH_DIRECT; 20842 uip->ui_cmdp = uscmd; 20843 20844 bp = getrbuf(KM_SLEEP); 20845 bp->b_private = uip; 20846 20847 /* 20848 * Setup buffer to carry uscsi request. 20849 */ 20850 bp->b_flags = B_BUSY; 20851 bp->b_bcount = 0; 20852 bp->b_blkno = 0; 20853 20854 if (is_async == TRUE) { 20855 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20856 uip->ui_dkc = *dkc; 20857 } 20858 20859 bp->b_edev = SD_GET_DEV(un); 20860 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20861 20862 /* 20863 * Unset un_f_sync_cache_required flag 20864 */ 20865 mutex_enter(SD_MUTEX(un)); 20866 un->un_f_sync_cache_required = FALSE; 20867 mutex_exit(SD_MUTEX(un)); 20868 20869 (void) sd_uscsi_strategy(bp); 20870 20871 /* 20872 * If synchronous request, wait for completion 20873 * If async just return and let b_iodone callback 20874 * cleanup. 20875 * NOTE: On return, u_ncmds_in_driver will be decremented, 20876 * but it was also incremented in sd_uscsi_strategy(), so 20877 * we should be ok. 20878 */ 20879 if (is_async == FALSE) { 20880 (void) biowait(bp); 20881 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20882 } 20883 20884 return (rval); 20885 } 20886 20887 20888 static int 20889 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20890 { 20891 struct sd_uscsi_info *uip; 20892 struct uscsi_cmd *uscmd; 20893 uint8_t *sense_buf; 20894 struct sd_lun *un; 20895 int status; 20896 union scsi_cdb *cdb; 20897 20898 uip = (struct sd_uscsi_info *)(bp->b_private); 20899 ASSERT(uip != NULL); 20900 20901 uscmd = uip->ui_cmdp; 20902 ASSERT(uscmd != NULL); 20903 20904 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20905 ASSERT(sense_buf != NULL); 20906 20907 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20908 ASSERT(un != NULL); 20909 20910 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20911 20912 status = geterror(bp); 20913 switch (status) { 20914 case 0: 20915 break; /* Success! */ 20916 case EIO: 20917 switch (uscmd->uscsi_status) { 20918 case STATUS_RESERVATION_CONFLICT: 20919 /* Ignore reservation conflict */ 20920 status = 0; 20921 goto done; 20922 20923 case STATUS_CHECK: 20924 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20925 (scsi_sense_key(sense_buf) == 20926 KEY_ILLEGAL_REQUEST)) { 20927 /* Ignore Illegal Request error */ 20928 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20929 mutex_enter(SD_MUTEX(un)); 20930 un->un_f_sync_nv_supported = FALSE; 20931 mutex_exit(SD_MUTEX(un)); 20932 status = 0; 20933 SD_TRACE(SD_LOG_IO, un, 20934 "un_f_sync_nv_supported \ 20935 is set to false.\n"); 20936 goto done; 20937 } 20938 20939 mutex_enter(SD_MUTEX(un)); 20940 un->un_f_sync_cache_supported = FALSE; 20941 mutex_exit(SD_MUTEX(un)); 20942 SD_TRACE(SD_LOG_IO, un, 20943 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20944 un_f_sync_cache_supported set to false \ 20945 with asc = %x, ascq = %x\n", 20946 scsi_sense_asc(sense_buf), 20947 scsi_sense_ascq(sense_buf)); 20948 status = ENOTSUP; 20949 goto done; 20950 } 20951 break; 20952 default: 20953 break; 20954 } 20955 /* FALLTHRU */ 20956 default: 20957 /* 20958 * Turn on the un_f_sync_cache_required flag 20959 * since the SYNC CACHE command failed 20960 */ 20961 mutex_enter(SD_MUTEX(un)); 20962 un->un_f_sync_cache_required = TRUE; 20963 mutex_exit(SD_MUTEX(un)); 20964 20965 /* 20966 * Don't log an error message if this device 20967 * has removable media. 20968 */ 20969 if (!un->un_f_has_removable_media) { 20970 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20971 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20972 } 20973 break; 20974 } 20975 20976 done: 20977 if (uip->ui_dkc.dkc_callback != NULL) { 20978 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20979 } 20980 20981 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20982 freerbuf(bp); 20983 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20984 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20985 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20986 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20987 20988 return (status); 20989 } 20990 20991 20992 /* 20993 * Function: sd_send_scsi_GET_CONFIGURATION 20994 * 20995 * Description: Issues the get configuration command to the device. 20996 * Called from sd_check_for_writable_cd & sd_get_media_info 20997 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20998 * Arguments: ssc 20999 * ucmdbuf 21000 * rqbuf 21001 * rqbuflen 21002 * bufaddr 21003 * buflen 21004 * path_flag 21005 * 21006 * Return Code: 0 - Success 21007 * errno return code from sd_ssc_send() 21008 * 21009 * Context: Can sleep. Does not return until command is completed. 21010 * 21011 */ 21012 21013 static int 21014 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21015 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21016 int path_flag) 21017 { 21018 char cdb[CDB_GROUP1]; 21019 int status; 21020 struct sd_lun *un; 21021 21022 ASSERT(ssc != NULL); 21023 un = ssc->ssc_un; 21024 ASSERT(un != NULL); 21025 ASSERT(!mutex_owned(SD_MUTEX(un))); 21026 ASSERT(bufaddr != NULL); 21027 ASSERT(ucmdbuf != NULL); 21028 ASSERT(rqbuf != NULL); 21029 21030 SD_TRACE(SD_LOG_IO, un, 21031 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21032 21033 bzero(cdb, sizeof (cdb)); 21034 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21035 bzero(rqbuf, rqbuflen); 21036 bzero(bufaddr, buflen); 21037 21038 /* 21039 * Set up cdb field for the get configuration command. 21040 */ 21041 cdb[0] = SCMD_GET_CONFIGURATION; 21042 cdb[1] = 0x02; /* Requested Type */ 21043 cdb[8] = SD_PROFILE_HEADER_LEN; 21044 ucmdbuf->uscsi_cdb = cdb; 21045 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21046 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21047 ucmdbuf->uscsi_buflen = buflen; 21048 ucmdbuf->uscsi_timeout = sd_io_time; 21049 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21050 ucmdbuf->uscsi_rqlen = rqbuflen; 21051 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21052 21053 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21054 UIO_SYSSPACE, path_flag); 21055 21056 switch (status) { 21057 case 0: 21058 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21059 break; /* Success! */ 21060 case EIO: 21061 switch (ucmdbuf->uscsi_status) { 21062 case STATUS_RESERVATION_CONFLICT: 21063 status = EACCES; 21064 break; 21065 default: 21066 break; 21067 } 21068 break; 21069 default: 21070 break; 21071 } 21072 21073 if (status == 0) { 21074 SD_DUMP_MEMORY(un, SD_LOG_IO, 21075 "sd_send_scsi_GET_CONFIGURATION: data", 21076 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21077 } 21078 21079 SD_TRACE(SD_LOG_IO, un, 21080 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21081 21082 return (status); 21083 } 21084 21085 /* 21086 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21087 * 21088 * Description: Issues the get configuration command to the device to 21089 * retrieve a specific feature. Called from 21090 * sd_check_for_writable_cd & sd_set_mmc_caps. 21091 * Arguments: ssc 21092 * ucmdbuf 21093 * rqbuf 21094 * rqbuflen 21095 * bufaddr 21096 * buflen 21097 * feature 21098 * 21099 * Return Code: 0 - Success 21100 * errno return code from sd_ssc_send() 21101 * 21102 * Context: Can sleep. Does not return until command is completed. 21103 * 21104 */ 21105 static int 21106 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 21107 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 21108 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 21109 { 21110 char cdb[CDB_GROUP1]; 21111 int status; 21112 struct sd_lun *un; 21113 21114 ASSERT(ssc != NULL); 21115 un = ssc->ssc_un; 21116 ASSERT(un != NULL); 21117 ASSERT(!mutex_owned(SD_MUTEX(un))); 21118 ASSERT(bufaddr != NULL); 21119 ASSERT(ucmdbuf != NULL); 21120 ASSERT(rqbuf != NULL); 21121 21122 SD_TRACE(SD_LOG_IO, un, 21123 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21124 21125 bzero(cdb, sizeof (cdb)); 21126 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21127 bzero(rqbuf, rqbuflen); 21128 bzero(bufaddr, buflen); 21129 21130 /* 21131 * Set up cdb field for the get configuration command. 21132 */ 21133 cdb[0] = SCMD_GET_CONFIGURATION; 21134 cdb[1] = 0x02; /* Requested Type */ 21135 cdb[3] = feature; 21136 cdb[8] = buflen; 21137 ucmdbuf->uscsi_cdb = cdb; 21138 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21139 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21140 ucmdbuf->uscsi_buflen = buflen; 21141 ucmdbuf->uscsi_timeout = sd_io_time; 21142 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21143 ucmdbuf->uscsi_rqlen = rqbuflen; 21144 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21145 21146 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21147 UIO_SYSSPACE, path_flag); 21148 21149 switch (status) { 21150 case 0: 21151 21152 break; /* Success! */ 21153 case EIO: 21154 switch (ucmdbuf->uscsi_status) { 21155 case STATUS_RESERVATION_CONFLICT: 21156 status = EACCES; 21157 break; 21158 default: 21159 break; 21160 } 21161 break; 21162 default: 21163 break; 21164 } 21165 21166 if (status == 0) { 21167 SD_DUMP_MEMORY(un, SD_LOG_IO, 21168 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21169 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21170 } 21171 21172 SD_TRACE(SD_LOG_IO, un, 21173 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21174 21175 return (status); 21176 } 21177 21178 21179 /* 21180 * Function: sd_send_scsi_MODE_SENSE 21181 * 21182 * Description: Utility function for issuing a scsi MODE SENSE command. 21183 * Note: This routine uses a consistent implementation for Group0, 21184 * Group1, and Group2 commands across all platforms. ATAPI devices 21185 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21186 * 21187 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21188 * structure for this target. 21189 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21190 * CDB_GROUP[1|2] (10 byte). 21191 * bufaddr - buffer for page data retrieved from the target. 21192 * buflen - size of page to be retrieved. 21193 * page_code - page code of data to be retrieved from the target. 21194 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21195 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21196 * to use the USCSI "direct" chain and bypass the normal 21197 * command waitq. 21198 * 21199 * Return Code: 0 - Success 21200 * errno return code from sd_ssc_send() 21201 * 21202 * Context: Can sleep. Does not return until command is completed. 21203 */ 21204 21205 static int 21206 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21207 size_t buflen, uchar_t page_code, int path_flag) 21208 { 21209 struct scsi_extended_sense sense_buf; 21210 union scsi_cdb cdb; 21211 struct uscsi_cmd ucmd_buf; 21212 int status; 21213 int headlen; 21214 struct sd_lun *un; 21215 21216 ASSERT(ssc != NULL); 21217 un = ssc->ssc_un; 21218 ASSERT(un != NULL); 21219 ASSERT(!mutex_owned(SD_MUTEX(un))); 21220 ASSERT(bufaddr != NULL); 21221 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21222 (cdbsize == CDB_GROUP2)); 21223 21224 SD_TRACE(SD_LOG_IO, un, 21225 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21226 21227 bzero(&cdb, sizeof (cdb)); 21228 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21229 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21230 bzero(bufaddr, buflen); 21231 21232 if (cdbsize == CDB_GROUP0) { 21233 cdb.scc_cmd = SCMD_MODE_SENSE; 21234 cdb.cdb_opaque[2] = page_code; 21235 FORMG0COUNT(&cdb, buflen); 21236 headlen = MODE_HEADER_LENGTH; 21237 } else { 21238 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21239 cdb.cdb_opaque[2] = page_code; 21240 FORMG1COUNT(&cdb, buflen); 21241 headlen = MODE_HEADER_LENGTH_GRP2; 21242 } 21243 21244 ASSERT(headlen <= buflen); 21245 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21246 21247 ucmd_buf.uscsi_cdb = (char *)&cdb; 21248 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21249 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21250 ucmd_buf.uscsi_buflen = buflen; 21251 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21252 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21253 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21254 ucmd_buf.uscsi_timeout = 60; 21255 21256 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21257 UIO_SYSSPACE, path_flag); 21258 21259 switch (status) { 21260 case 0: 21261 /* 21262 * sr_check_wp() uses 0x3f page code and check the header of 21263 * mode page to determine if target device is write-protected. 21264 * But some USB devices return 0 bytes for 0x3f page code. For 21265 * this case, make sure that mode page header is returned at 21266 * least. 21267 */ 21268 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21269 status = EIO; 21270 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21271 "mode page header is not returned"); 21272 } 21273 break; /* Success! */ 21274 case EIO: 21275 switch (ucmd_buf.uscsi_status) { 21276 case STATUS_RESERVATION_CONFLICT: 21277 status = EACCES; 21278 break; 21279 default: 21280 break; 21281 } 21282 break; 21283 default: 21284 break; 21285 } 21286 21287 if (status == 0) { 21288 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21289 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21290 } 21291 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21292 21293 return (status); 21294 } 21295 21296 21297 /* 21298 * Function: sd_send_scsi_MODE_SELECT 21299 * 21300 * Description: Utility function for issuing a scsi MODE SELECT command. 21301 * Note: This routine uses a consistent implementation for Group0, 21302 * Group1, and Group2 commands across all platforms. ATAPI devices 21303 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21304 * 21305 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21306 * structure for this target. 21307 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21308 * CDB_GROUP[1|2] (10 byte). 21309 * bufaddr - buffer for page data retrieved from the target. 21310 * buflen - size of page to be retrieved. 21311 * save_page - boolean to determin if SP bit should be set. 21312 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21313 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21314 * to use the USCSI "direct" chain and bypass the normal 21315 * command waitq. 21316 * 21317 * Return Code: 0 - Success 21318 * errno return code from sd_ssc_send() 21319 * 21320 * Context: Can sleep. Does not return until command is completed. 21321 */ 21322 21323 static int 21324 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21325 size_t buflen, uchar_t save_page, int path_flag) 21326 { 21327 struct scsi_extended_sense sense_buf; 21328 union scsi_cdb cdb; 21329 struct uscsi_cmd ucmd_buf; 21330 int status; 21331 struct sd_lun *un; 21332 21333 ASSERT(ssc != NULL); 21334 un = ssc->ssc_un; 21335 ASSERT(un != NULL); 21336 ASSERT(!mutex_owned(SD_MUTEX(un))); 21337 ASSERT(bufaddr != NULL); 21338 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21339 (cdbsize == CDB_GROUP2)); 21340 21341 SD_TRACE(SD_LOG_IO, un, 21342 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21343 21344 bzero(&cdb, sizeof (cdb)); 21345 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21346 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21347 21348 /* Set the PF bit for many third party drives */ 21349 cdb.cdb_opaque[1] = 0x10; 21350 21351 /* Set the savepage(SP) bit if given */ 21352 if (save_page == SD_SAVE_PAGE) { 21353 cdb.cdb_opaque[1] |= 0x01; 21354 } 21355 21356 if (cdbsize == CDB_GROUP0) { 21357 cdb.scc_cmd = SCMD_MODE_SELECT; 21358 FORMG0COUNT(&cdb, buflen); 21359 } else { 21360 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21361 FORMG1COUNT(&cdb, buflen); 21362 } 21363 21364 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21365 21366 ucmd_buf.uscsi_cdb = (char *)&cdb; 21367 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21368 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21369 ucmd_buf.uscsi_buflen = buflen; 21370 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21371 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21372 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21373 ucmd_buf.uscsi_timeout = 60; 21374 21375 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21376 UIO_SYSSPACE, path_flag); 21377 21378 switch (status) { 21379 case 0: 21380 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21381 break; /* Success! */ 21382 case EIO: 21383 switch (ucmd_buf.uscsi_status) { 21384 case STATUS_RESERVATION_CONFLICT: 21385 status = EACCES; 21386 break; 21387 default: 21388 break; 21389 } 21390 break; 21391 default: 21392 break; 21393 } 21394 21395 if (status == 0) { 21396 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21397 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21398 } 21399 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21400 21401 return (status); 21402 } 21403 21404 21405 /* 21406 * Function: sd_send_scsi_RDWR 21407 * 21408 * Description: Issue a scsi READ or WRITE command with the given parameters. 21409 * 21410 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21411 * structure for this target. 21412 * cmd: SCMD_READ or SCMD_WRITE 21413 * bufaddr: Address of caller's buffer to receive the RDWR data 21414 * buflen: Length of caller's buffer receive the RDWR data. 21415 * start_block: Block number for the start of the RDWR operation. 21416 * (Assumes target-native block size.) 21417 * residp: Pointer to variable to receive the redisual of the 21418 * RDWR operation (may be NULL of no residual requested). 21419 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21420 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21421 * to use the USCSI "direct" chain and bypass the normal 21422 * command waitq. 21423 * 21424 * Return Code: 0 - Success 21425 * errno return code from sd_ssc_send() 21426 * 21427 * Context: Can sleep. Does not return until command is completed. 21428 */ 21429 21430 static int 21431 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21432 size_t buflen, daddr_t start_block, int path_flag) 21433 { 21434 struct scsi_extended_sense sense_buf; 21435 union scsi_cdb cdb; 21436 struct uscsi_cmd ucmd_buf; 21437 uint32_t block_count; 21438 int status; 21439 int cdbsize; 21440 uchar_t flag; 21441 struct sd_lun *un; 21442 21443 ASSERT(ssc != NULL); 21444 un = ssc->ssc_un; 21445 ASSERT(un != NULL); 21446 ASSERT(!mutex_owned(SD_MUTEX(un))); 21447 ASSERT(bufaddr != NULL); 21448 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21449 21450 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21451 21452 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21453 return (EINVAL); 21454 } 21455 21456 mutex_enter(SD_MUTEX(un)); 21457 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21458 mutex_exit(SD_MUTEX(un)); 21459 21460 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21461 21462 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21463 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21464 bufaddr, buflen, start_block, block_count); 21465 21466 bzero(&cdb, sizeof (cdb)); 21467 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21468 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21469 21470 /* Compute CDB size to use */ 21471 if (start_block > 0xffffffff) 21472 cdbsize = CDB_GROUP4; 21473 else if ((start_block & 0xFFE00000) || 21474 (un->un_f_cfg_is_atapi == TRUE)) 21475 cdbsize = CDB_GROUP1; 21476 else 21477 cdbsize = CDB_GROUP0; 21478 21479 switch (cdbsize) { 21480 case CDB_GROUP0: /* 6-byte CDBs */ 21481 cdb.scc_cmd = cmd; 21482 FORMG0ADDR(&cdb, start_block); 21483 FORMG0COUNT(&cdb, block_count); 21484 break; 21485 case CDB_GROUP1: /* 10-byte CDBs */ 21486 cdb.scc_cmd = cmd | SCMD_GROUP1; 21487 FORMG1ADDR(&cdb, start_block); 21488 FORMG1COUNT(&cdb, block_count); 21489 break; 21490 case CDB_GROUP4: /* 16-byte CDBs */ 21491 cdb.scc_cmd = cmd | SCMD_GROUP4; 21492 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21493 FORMG4COUNT(&cdb, block_count); 21494 break; 21495 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21496 default: 21497 /* All others reserved */ 21498 return (EINVAL); 21499 } 21500 21501 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21502 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21503 21504 ucmd_buf.uscsi_cdb = (char *)&cdb; 21505 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21506 ucmd_buf.uscsi_bufaddr = bufaddr; 21507 ucmd_buf.uscsi_buflen = buflen; 21508 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21509 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21510 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21511 ucmd_buf.uscsi_timeout = 60; 21512 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21513 UIO_SYSSPACE, path_flag); 21514 21515 switch (status) { 21516 case 0: 21517 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21518 break; /* Success! */ 21519 case EIO: 21520 switch (ucmd_buf.uscsi_status) { 21521 case STATUS_RESERVATION_CONFLICT: 21522 status = EACCES; 21523 break; 21524 default: 21525 break; 21526 } 21527 break; 21528 default: 21529 break; 21530 } 21531 21532 if (status == 0) { 21533 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21534 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21535 } 21536 21537 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21538 21539 return (status); 21540 } 21541 21542 21543 /* 21544 * Function: sd_send_scsi_LOG_SENSE 21545 * 21546 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21547 * 21548 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21549 * structure for this target. 21550 * 21551 * Return Code: 0 - Success 21552 * errno return code from sd_ssc_send() 21553 * 21554 * Context: Can sleep. Does not return until command is completed. 21555 */ 21556 21557 static int 21558 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21559 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21560 int path_flag) 21561 21562 { 21563 struct scsi_extended_sense sense_buf; 21564 union scsi_cdb cdb; 21565 struct uscsi_cmd ucmd_buf; 21566 int status; 21567 struct sd_lun *un; 21568 21569 ASSERT(ssc != NULL); 21570 un = ssc->ssc_un; 21571 ASSERT(un != NULL); 21572 ASSERT(!mutex_owned(SD_MUTEX(un))); 21573 21574 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21575 21576 bzero(&cdb, sizeof (cdb)); 21577 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21578 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21579 21580 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21581 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21582 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21583 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21584 FORMG1COUNT(&cdb, buflen); 21585 21586 ucmd_buf.uscsi_cdb = (char *)&cdb; 21587 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21588 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21589 ucmd_buf.uscsi_buflen = buflen; 21590 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21591 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21592 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21593 ucmd_buf.uscsi_timeout = 60; 21594 21595 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21596 UIO_SYSSPACE, path_flag); 21597 21598 switch (status) { 21599 case 0: 21600 break; 21601 case EIO: 21602 switch (ucmd_buf.uscsi_status) { 21603 case STATUS_RESERVATION_CONFLICT: 21604 status = EACCES; 21605 break; 21606 case STATUS_CHECK: 21607 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21608 (scsi_sense_key((uint8_t *)&sense_buf) == 21609 KEY_ILLEGAL_REQUEST) && 21610 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21611 /* 21612 * ASC 0x24: INVALID FIELD IN CDB 21613 */ 21614 switch (page_code) { 21615 case START_STOP_CYCLE_PAGE: 21616 /* 21617 * The start stop cycle counter is 21618 * implemented as page 0x31 in earlier 21619 * generation disks. In new generation 21620 * disks the start stop cycle counter is 21621 * implemented as page 0xE. To properly 21622 * handle this case if an attempt for 21623 * log page 0xE is made and fails we 21624 * will try again using page 0x31. 21625 * 21626 * Network storage BU committed to 21627 * maintain the page 0x31 for this 21628 * purpose and will not have any other 21629 * page implemented with page code 0x31 21630 * until all disks transition to the 21631 * standard page. 21632 */ 21633 mutex_enter(SD_MUTEX(un)); 21634 un->un_start_stop_cycle_page = 21635 START_STOP_CYCLE_VU_PAGE; 21636 cdb.cdb_opaque[2] = 21637 (char)(page_control << 6) | 21638 un->un_start_stop_cycle_page; 21639 mutex_exit(SD_MUTEX(un)); 21640 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21641 status = sd_ssc_send( 21642 ssc, &ucmd_buf, FKIOCTL, 21643 UIO_SYSSPACE, path_flag); 21644 21645 break; 21646 case TEMPERATURE_PAGE: 21647 status = ENOTTY; 21648 break; 21649 default: 21650 break; 21651 } 21652 } 21653 break; 21654 default: 21655 break; 21656 } 21657 break; 21658 default: 21659 break; 21660 } 21661 21662 if (status == 0) { 21663 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21664 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21665 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21666 } 21667 21668 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21669 21670 return (status); 21671 } 21672 21673 21674 /* 21675 * Function: sdioctl 21676 * 21677 * Description: Driver's ioctl(9e) entry point function. 21678 * 21679 * Arguments: dev - device number 21680 * cmd - ioctl operation to be performed 21681 * arg - user argument, contains data to be set or reference 21682 * parameter for get 21683 * flag - bit flag, indicating open settings, 32/64 bit type 21684 * cred_p - user credential pointer 21685 * rval_p - calling process return value (OPT) 21686 * 21687 * Return Code: EINVAL 21688 * ENOTTY 21689 * ENXIO 21690 * EIO 21691 * EFAULT 21692 * ENOTSUP 21693 * EPERM 21694 * 21695 * Context: Called from the device switch at normal priority. 21696 */ 21697 21698 static int 21699 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21700 { 21701 struct sd_lun *un = NULL; 21702 int err = 0; 21703 int i = 0; 21704 cred_t *cr; 21705 int tmprval = EINVAL; 21706 boolean_t is_valid; 21707 sd_ssc_t *ssc; 21708 21709 /* 21710 * All device accesses go thru sdstrategy where we check on suspend 21711 * status 21712 */ 21713 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21714 return (ENXIO); 21715 } 21716 21717 ASSERT(!mutex_owned(SD_MUTEX(un))); 21718 21719 /* Initialize sd_ssc_t for internal uscsi commands */ 21720 ssc = sd_ssc_init(un); 21721 21722 is_valid = SD_IS_VALID_LABEL(un); 21723 21724 /* 21725 * Moved this wait from sd_uscsi_strategy to here for 21726 * reasons of deadlock prevention. Internal driver commands, 21727 * specifically those to change a devices power level, result 21728 * in a call to sd_uscsi_strategy. 21729 */ 21730 mutex_enter(SD_MUTEX(un)); 21731 while ((un->un_state == SD_STATE_SUSPENDED) || 21732 (un->un_state == SD_STATE_PM_CHANGING)) { 21733 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21734 } 21735 /* 21736 * Twiddling the counter here protects commands from now 21737 * through to the top of sd_uscsi_strategy. Without the 21738 * counter inc. a power down, for example, could get in 21739 * after the above check for state is made and before 21740 * execution gets to the top of sd_uscsi_strategy. 21741 * That would cause problems. 21742 */ 21743 un->un_ncmds_in_driver++; 21744 21745 if (!is_valid && 21746 (flag & (FNDELAY | FNONBLOCK))) { 21747 switch (cmd) { 21748 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21749 case DKIOCGVTOC: 21750 case DKIOCGEXTVTOC: 21751 case DKIOCGAPART: 21752 case DKIOCPARTINFO: 21753 case DKIOCEXTPARTINFO: 21754 case DKIOCSGEOM: 21755 case DKIOCSAPART: 21756 case DKIOCGETEFI: 21757 case DKIOCPARTITION: 21758 case DKIOCSVTOC: 21759 case DKIOCSEXTVTOC: 21760 case DKIOCSETEFI: 21761 case DKIOCGMBOOT: 21762 case DKIOCSMBOOT: 21763 case DKIOCG_PHYGEOM: 21764 case DKIOCG_VIRTGEOM: 21765 #if defined(__i386) || defined(__amd64) 21766 case DKIOCSETEXTPART: 21767 #endif 21768 /* let cmlb handle it */ 21769 goto skip_ready_valid; 21770 21771 case CDROMPAUSE: 21772 case CDROMRESUME: 21773 case CDROMPLAYMSF: 21774 case CDROMPLAYTRKIND: 21775 case CDROMREADTOCHDR: 21776 case CDROMREADTOCENTRY: 21777 case CDROMSTOP: 21778 case CDROMSTART: 21779 case CDROMVOLCTRL: 21780 case CDROMSUBCHNL: 21781 case CDROMREADMODE2: 21782 case CDROMREADMODE1: 21783 case CDROMREADOFFSET: 21784 case CDROMSBLKMODE: 21785 case CDROMGBLKMODE: 21786 case CDROMGDRVSPEED: 21787 case CDROMSDRVSPEED: 21788 case CDROMCDDA: 21789 case CDROMCDXA: 21790 case CDROMSUBCODE: 21791 if (!ISCD(un)) { 21792 un->un_ncmds_in_driver--; 21793 ASSERT(un->un_ncmds_in_driver >= 0); 21794 mutex_exit(SD_MUTEX(un)); 21795 err = ENOTTY; 21796 goto done_without_assess; 21797 } 21798 break; 21799 case FDEJECT: 21800 case DKIOCEJECT: 21801 case CDROMEJECT: 21802 if (!un->un_f_eject_media_supported) { 21803 un->un_ncmds_in_driver--; 21804 ASSERT(un->un_ncmds_in_driver >= 0); 21805 mutex_exit(SD_MUTEX(un)); 21806 err = ENOTTY; 21807 goto done_without_assess; 21808 } 21809 break; 21810 case DKIOCFLUSHWRITECACHE: 21811 mutex_exit(SD_MUTEX(un)); 21812 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21813 if (err != 0) { 21814 mutex_enter(SD_MUTEX(un)); 21815 un->un_ncmds_in_driver--; 21816 ASSERT(un->un_ncmds_in_driver >= 0); 21817 mutex_exit(SD_MUTEX(un)); 21818 err = EIO; 21819 goto done_quick_assess; 21820 } 21821 mutex_enter(SD_MUTEX(un)); 21822 /* FALLTHROUGH */ 21823 case DKIOCREMOVABLE: 21824 case DKIOCHOTPLUGGABLE: 21825 case DKIOCINFO: 21826 case DKIOCGMEDIAINFO: 21827 case DKIOCGMEDIAINFOEXT: 21828 case MHIOCENFAILFAST: 21829 case MHIOCSTATUS: 21830 case MHIOCTKOWN: 21831 case MHIOCRELEASE: 21832 case MHIOCGRP_INKEYS: 21833 case MHIOCGRP_INRESV: 21834 case MHIOCGRP_REGISTER: 21835 case MHIOCGRP_RESERVE: 21836 case MHIOCGRP_PREEMPTANDABORT: 21837 case MHIOCGRP_REGISTERANDIGNOREKEY: 21838 case CDROMCLOSETRAY: 21839 case USCSICMD: 21840 goto skip_ready_valid; 21841 default: 21842 break; 21843 } 21844 21845 mutex_exit(SD_MUTEX(un)); 21846 err = sd_ready_and_valid(ssc, SDPART(dev)); 21847 mutex_enter(SD_MUTEX(un)); 21848 21849 if (err != SD_READY_VALID) { 21850 switch (cmd) { 21851 case DKIOCSTATE: 21852 case CDROMGDRVSPEED: 21853 case CDROMSDRVSPEED: 21854 case FDEJECT: /* for eject command */ 21855 case DKIOCEJECT: 21856 case CDROMEJECT: 21857 case DKIOCREMOVABLE: 21858 case DKIOCHOTPLUGGABLE: 21859 break; 21860 default: 21861 if (un->un_f_has_removable_media) { 21862 err = ENXIO; 21863 } else { 21864 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21865 if (err == SD_RESERVED_BY_OTHERS) { 21866 err = EACCES; 21867 } else { 21868 err = EIO; 21869 } 21870 } 21871 un->un_ncmds_in_driver--; 21872 ASSERT(un->un_ncmds_in_driver >= 0); 21873 mutex_exit(SD_MUTEX(un)); 21874 21875 goto done_without_assess; 21876 } 21877 } 21878 } 21879 21880 skip_ready_valid: 21881 mutex_exit(SD_MUTEX(un)); 21882 21883 switch (cmd) { 21884 case DKIOCINFO: 21885 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21886 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21887 break; 21888 21889 case DKIOCGMEDIAINFO: 21890 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21891 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21892 break; 21893 21894 case DKIOCGMEDIAINFOEXT: 21895 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 21896 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 21897 break; 21898 21899 case DKIOCGGEOM: 21900 case DKIOCGVTOC: 21901 case DKIOCGEXTVTOC: 21902 case DKIOCGAPART: 21903 case DKIOCPARTINFO: 21904 case DKIOCEXTPARTINFO: 21905 case DKIOCSGEOM: 21906 case DKIOCSAPART: 21907 case DKIOCGETEFI: 21908 case DKIOCPARTITION: 21909 case DKIOCSVTOC: 21910 case DKIOCSEXTVTOC: 21911 case DKIOCSETEFI: 21912 case DKIOCGMBOOT: 21913 case DKIOCSMBOOT: 21914 case DKIOCG_PHYGEOM: 21915 case DKIOCG_VIRTGEOM: 21916 #if defined(__i386) || defined(__amd64) 21917 case DKIOCSETEXTPART: 21918 #endif 21919 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21920 21921 /* TUR should spin up */ 21922 21923 if (un->un_f_has_removable_media) 21924 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21925 SD_CHECK_FOR_MEDIA); 21926 21927 else 21928 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21929 21930 if (err != 0) 21931 goto done_with_assess; 21932 21933 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21934 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21935 21936 if ((err == 0) && 21937 ((cmd == DKIOCSETEFI) || 21938 (un->un_f_pkstats_enabled) && 21939 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 21940 cmd == DKIOCSEXTVTOC))) { 21941 21942 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21943 (void *)SD_PATH_DIRECT); 21944 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21945 sd_set_pstats(un); 21946 SD_TRACE(SD_LOG_IO_PARTITION, un, 21947 "sd_ioctl: un:0x%p pstats created and " 21948 "set\n", un); 21949 } 21950 } 21951 21952 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 21953 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21954 21955 mutex_enter(SD_MUTEX(un)); 21956 if (un->un_f_devid_supported && 21957 (un->un_f_opt_fab_devid == TRUE)) { 21958 if (un->un_devid == NULL) { 21959 sd_register_devid(ssc, SD_DEVINFO(un), 21960 SD_TARGET_IS_UNRESERVED); 21961 } else { 21962 /* 21963 * The device id for this disk 21964 * has been fabricated. The 21965 * device id must be preserved 21966 * by writing it back out to 21967 * disk. 21968 */ 21969 if (sd_write_deviceid(ssc) != 0) { 21970 ddi_devid_free(un->un_devid); 21971 un->un_devid = NULL; 21972 } 21973 } 21974 } 21975 mutex_exit(SD_MUTEX(un)); 21976 } 21977 21978 break; 21979 21980 case DKIOCLOCK: 21981 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21982 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21983 SD_PATH_STANDARD); 21984 goto done_with_assess; 21985 21986 case DKIOCUNLOCK: 21987 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21988 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21989 SD_PATH_STANDARD); 21990 goto done_with_assess; 21991 21992 case DKIOCSTATE: { 21993 enum dkio_state state; 21994 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21995 21996 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21997 err = EFAULT; 21998 } else { 21999 err = sd_check_media(dev, state); 22000 if (err == 0) { 22001 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22002 sizeof (int), flag) != 0) 22003 err = EFAULT; 22004 } 22005 } 22006 break; 22007 } 22008 22009 case DKIOCREMOVABLE: 22010 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22011 i = un->un_f_has_removable_media ? 1 : 0; 22012 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22013 err = EFAULT; 22014 } else { 22015 err = 0; 22016 } 22017 break; 22018 22019 case DKIOCHOTPLUGGABLE: 22020 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22021 i = un->un_f_is_hotpluggable ? 1 : 0; 22022 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22023 err = EFAULT; 22024 } else { 22025 err = 0; 22026 } 22027 break; 22028 22029 case DKIOCGTEMPERATURE: 22030 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22031 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22032 break; 22033 22034 case MHIOCENFAILFAST: 22035 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22036 if ((err = drv_priv(cred_p)) == 0) { 22037 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22038 } 22039 break; 22040 22041 case MHIOCTKOWN: 22042 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22043 if ((err = drv_priv(cred_p)) == 0) { 22044 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22045 } 22046 break; 22047 22048 case MHIOCRELEASE: 22049 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22050 if ((err = drv_priv(cred_p)) == 0) { 22051 err = sd_mhdioc_release(dev); 22052 } 22053 break; 22054 22055 case MHIOCSTATUS: 22056 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22057 if ((err = drv_priv(cred_p)) == 0) { 22058 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22059 case 0: 22060 err = 0; 22061 break; 22062 case EACCES: 22063 *rval_p = 1; 22064 err = 0; 22065 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22066 break; 22067 default: 22068 err = EIO; 22069 goto done_with_assess; 22070 } 22071 } 22072 break; 22073 22074 case MHIOCQRESERVE: 22075 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22076 if ((err = drv_priv(cred_p)) == 0) { 22077 err = sd_reserve_release(dev, SD_RESERVE); 22078 } 22079 break; 22080 22081 case MHIOCREREGISTERDEVID: 22082 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22083 if (drv_priv(cred_p) == EPERM) { 22084 err = EPERM; 22085 } else if (!un->un_f_devid_supported) { 22086 err = ENOTTY; 22087 } else { 22088 err = sd_mhdioc_register_devid(dev); 22089 } 22090 break; 22091 22092 case MHIOCGRP_INKEYS: 22093 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22094 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22095 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22096 err = ENOTSUP; 22097 } else { 22098 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22099 flag); 22100 } 22101 } 22102 break; 22103 22104 case MHIOCGRP_INRESV: 22105 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22106 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22107 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22108 err = ENOTSUP; 22109 } else { 22110 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22111 } 22112 } 22113 break; 22114 22115 case MHIOCGRP_REGISTER: 22116 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22117 if ((err = drv_priv(cred_p)) != EPERM) { 22118 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22119 err = ENOTSUP; 22120 } else if (arg != NULL) { 22121 mhioc_register_t reg; 22122 if (ddi_copyin((void *)arg, ®, 22123 sizeof (mhioc_register_t), flag) != 0) { 22124 err = EFAULT; 22125 } else { 22126 err = 22127 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22128 ssc, SD_SCSI3_REGISTER, 22129 (uchar_t *)®); 22130 if (err != 0) 22131 goto done_with_assess; 22132 } 22133 } 22134 } 22135 break; 22136 22137 case MHIOCGRP_RESERVE: 22138 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22139 if ((err = drv_priv(cred_p)) != EPERM) { 22140 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22141 err = ENOTSUP; 22142 } else if (arg != NULL) { 22143 mhioc_resv_desc_t resv_desc; 22144 if (ddi_copyin((void *)arg, &resv_desc, 22145 sizeof (mhioc_resv_desc_t), flag) != 0) { 22146 err = EFAULT; 22147 } else { 22148 err = 22149 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22150 ssc, SD_SCSI3_RESERVE, 22151 (uchar_t *)&resv_desc); 22152 if (err != 0) 22153 goto done_with_assess; 22154 } 22155 } 22156 } 22157 break; 22158 22159 case MHIOCGRP_PREEMPTANDABORT: 22160 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22161 if ((err = drv_priv(cred_p)) != EPERM) { 22162 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22163 err = ENOTSUP; 22164 } else if (arg != NULL) { 22165 mhioc_preemptandabort_t preempt_abort; 22166 if (ddi_copyin((void *)arg, &preempt_abort, 22167 sizeof (mhioc_preemptandabort_t), 22168 flag) != 0) { 22169 err = EFAULT; 22170 } else { 22171 err = 22172 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22173 ssc, SD_SCSI3_PREEMPTANDABORT, 22174 (uchar_t *)&preempt_abort); 22175 if (err != 0) 22176 goto done_with_assess; 22177 } 22178 } 22179 } 22180 break; 22181 22182 case MHIOCGRP_REGISTERANDIGNOREKEY: 22183 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22184 if ((err = drv_priv(cred_p)) != EPERM) { 22185 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22186 err = ENOTSUP; 22187 } else if (arg != NULL) { 22188 mhioc_registerandignorekey_t r_and_i; 22189 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22190 sizeof (mhioc_registerandignorekey_t), 22191 flag) != 0) { 22192 err = EFAULT; 22193 } else { 22194 err = 22195 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22196 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22197 (uchar_t *)&r_and_i); 22198 if (err != 0) 22199 goto done_with_assess; 22200 } 22201 } 22202 } 22203 break; 22204 22205 case USCSICMD: 22206 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22207 cr = ddi_get_cred(); 22208 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22209 err = EPERM; 22210 } else { 22211 enum uio_seg uioseg; 22212 22213 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22214 UIO_USERSPACE; 22215 if (un->un_f_format_in_progress == TRUE) { 22216 err = EAGAIN; 22217 break; 22218 } 22219 22220 err = sd_ssc_send(ssc, 22221 (struct uscsi_cmd *)arg, 22222 flag, uioseg, SD_PATH_STANDARD); 22223 if (err != 0) 22224 goto done_with_assess; 22225 else 22226 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22227 } 22228 break; 22229 22230 case CDROMPAUSE: 22231 case CDROMRESUME: 22232 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22233 if (!ISCD(un)) { 22234 err = ENOTTY; 22235 } else { 22236 err = sr_pause_resume(dev, cmd); 22237 } 22238 break; 22239 22240 case CDROMPLAYMSF: 22241 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22242 if (!ISCD(un)) { 22243 err = ENOTTY; 22244 } else { 22245 err = sr_play_msf(dev, (caddr_t)arg, flag); 22246 } 22247 break; 22248 22249 case CDROMPLAYTRKIND: 22250 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22251 #if defined(__i386) || defined(__amd64) 22252 /* 22253 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22254 */ 22255 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22256 #else 22257 if (!ISCD(un)) { 22258 #endif 22259 err = ENOTTY; 22260 } else { 22261 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22262 } 22263 break; 22264 22265 case CDROMREADTOCHDR: 22266 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22267 if (!ISCD(un)) { 22268 err = ENOTTY; 22269 } else { 22270 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22271 } 22272 break; 22273 22274 case CDROMREADTOCENTRY: 22275 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22276 if (!ISCD(un)) { 22277 err = ENOTTY; 22278 } else { 22279 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22280 } 22281 break; 22282 22283 case CDROMSTOP: 22284 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22285 if (!ISCD(un)) { 22286 err = ENOTTY; 22287 } else { 22288 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 22289 SD_PATH_STANDARD); 22290 goto done_with_assess; 22291 } 22292 break; 22293 22294 case CDROMSTART: 22295 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22296 if (!ISCD(un)) { 22297 err = ENOTTY; 22298 } else { 22299 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 22300 SD_PATH_STANDARD); 22301 goto done_with_assess; 22302 } 22303 break; 22304 22305 case CDROMCLOSETRAY: 22306 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22307 if (!ISCD(un)) { 22308 err = ENOTTY; 22309 } else { 22310 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 22311 SD_PATH_STANDARD); 22312 goto done_with_assess; 22313 } 22314 break; 22315 22316 case FDEJECT: /* for eject command */ 22317 case DKIOCEJECT: 22318 case CDROMEJECT: 22319 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22320 if (!un->un_f_eject_media_supported) { 22321 err = ENOTTY; 22322 } else { 22323 err = sr_eject(dev); 22324 } 22325 break; 22326 22327 case CDROMVOLCTRL: 22328 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22329 if (!ISCD(un)) { 22330 err = ENOTTY; 22331 } else { 22332 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22333 } 22334 break; 22335 22336 case CDROMSUBCHNL: 22337 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22338 if (!ISCD(un)) { 22339 err = ENOTTY; 22340 } else { 22341 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22342 } 22343 break; 22344 22345 case CDROMREADMODE2: 22346 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22347 if (!ISCD(un)) { 22348 err = ENOTTY; 22349 } else if (un->un_f_cfg_is_atapi == TRUE) { 22350 /* 22351 * If the drive supports READ CD, use that instead of 22352 * switching the LBA size via a MODE SELECT 22353 * Block Descriptor 22354 */ 22355 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22356 } else { 22357 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22358 } 22359 break; 22360 22361 case CDROMREADMODE1: 22362 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22363 if (!ISCD(un)) { 22364 err = ENOTTY; 22365 } else { 22366 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22367 } 22368 break; 22369 22370 case CDROMREADOFFSET: 22371 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22372 if (!ISCD(un)) { 22373 err = ENOTTY; 22374 } else { 22375 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22376 flag); 22377 } 22378 break; 22379 22380 case CDROMSBLKMODE: 22381 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22382 /* 22383 * There is no means of changing block size in case of atapi 22384 * drives, thus return ENOTTY if drive type is atapi 22385 */ 22386 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22387 err = ENOTTY; 22388 } else if (un->un_f_mmc_cap == TRUE) { 22389 22390 /* 22391 * MMC Devices do not support changing the 22392 * logical block size 22393 * 22394 * Note: EINVAL is being returned instead of ENOTTY to 22395 * maintain consistancy with the original mmc 22396 * driver update. 22397 */ 22398 err = EINVAL; 22399 } else { 22400 mutex_enter(SD_MUTEX(un)); 22401 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22402 (un->un_ncmds_in_transport > 0)) { 22403 mutex_exit(SD_MUTEX(un)); 22404 err = EINVAL; 22405 } else { 22406 mutex_exit(SD_MUTEX(un)); 22407 err = sr_change_blkmode(dev, cmd, arg, flag); 22408 } 22409 } 22410 break; 22411 22412 case CDROMGBLKMODE: 22413 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22414 if (!ISCD(un)) { 22415 err = ENOTTY; 22416 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22417 (un->un_f_blockcount_is_valid != FALSE)) { 22418 /* 22419 * Drive is an ATAPI drive so return target block 22420 * size for ATAPI drives since we cannot change the 22421 * blocksize on ATAPI drives. Used primarily to detect 22422 * if an ATAPI cdrom is present. 22423 */ 22424 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22425 sizeof (int), flag) != 0) { 22426 err = EFAULT; 22427 } else { 22428 err = 0; 22429 } 22430 22431 } else { 22432 /* 22433 * Drive supports changing block sizes via a Mode 22434 * Select. 22435 */ 22436 err = sr_change_blkmode(dev, cmd, arg, flag); 22437 } 22438 break; 22439 22440 case CDROMGDRVSPEED: 22441 case CDROMSDRVSPEED: 22442 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22443 if (!ISCD(un)) { 22444 err = ENOTTY; 22445 } else if (un->un_f_mmc_cap == TRUE) { 22446 /* 22447 * Note: In the future the driver implementation 22448 * for getting and 22449 * setting cd speed should entail: 22450 * 1) If non-mmc try the Toshiba mode page 22451 * (sr_change_speed) 22452 * 2) If mmc but no support for Real Time Streaming try 22453 * the SET CD SPEED (0xBB) command 22454 * (sr_atapi_change_speed) 22455 * 3) If mmc and support for Real Time Streaming 22456 * try the GET PERFORMANCE and SET STREAMING 22457 * commands (not yet implemented, 4380808) 22458 */ 22459 /* 22460 * As per recent MMC spec, CD-ROM speed is variable 22461 * and changes with LBA. Since there is no such 22462 * things as drive speed now, fail this ioctl. 22463 * 22464 * Note: EINVAL is returned for consistancy of original 22465 * implementation which included support for getting 22466 * the drive speed of mmc devices but not setting 22467 * the drive speed. Thus EINVAL would be returned 22468 * if a set request was made for an mmc device. 22469 * We no longer support get or set speed for 22470 * mmc but need to remain consistent with regard 22471 * to the error code returned. 22472 */ 22473 err = EINVAL; 22474 } else if (un->un_f_cfg_is_atapi == TRUE) { 22475 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22476 } else { 22477 err = sr_change_speed(dev, cmd, arg, flag); 22478 } 22479 break; 22480 22481 case CDROMCDDA: 22482 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22483 if (!ISCD(un)) { 22484 err = ENOTTY; 22485 } else { 22486 err = sr_read_cdda(dev, (void *)arg, flag); 22487 } 22488 break; 22489 22490 case CDROMCDXA: 22491 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22492 if (!ISCD(un)) { 22493 err = ENOTTY; 22494 } else { 22495 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22496 } 22497 break; 22498 22499 case CDROMSUBCODE: 22500 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22501 if (!ISCD(un)) { 22502 err = ENOTTY; 22503 } else { 22504 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22505 } 22506 break; 22507 22508 22509 #ifdef SDDEBUG 22510 /* RESET/ABORTS testing ioctls */ 22511 case DKIOCRESET: { 22512 int reset_level; 22513 22514 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22515 err = EFAULT; 22516 } else { 22517 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22518 "reset_level = 0x%lx\n", reset_level); 22519 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22520 err = 0; 22521 } else { 22522 err = EIO; 22523 } 22524 } 22525 break; 22526 } 22527 22528 case DKIOCABORT: 22529 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22530 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22531 err = 0; 22532 } else { 22533 err = EIO; 22534 } 22535 break; 22536 #endif 22537 22538 #ifdef SD_FAULT_INJECTION 22539 /* SDIOC FaultInjection testing ioctls */ 22540 case SDIOCSTART: 22541 case SDIOCSTOP: 22542 case SDIOCINSERTPKT: 22543 case SDIOCINSERTXB: 22544 case SDIOCINSERTUN: 22545 case SDIOCINSERTARQ: 22546 case SDIOCPUSH: 22547 case SDIOCRETRIEVE: 22548 case SDIOCRUN: 22549 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22550 "SDIOC detected cmd:0x%X:\n", cmd); 22551 /* call error generator */ 22552 sd_faultinjection_ioctl(cmd, arg, un); 22553 err = 0; 22554 break; 22555 22556 #endif /* SD_FAULT_INJECTION */ 22557 22558 case DKIOCFLUSHWRITECACHE: 22559 { 22560 struct dk_callback *dkc = (struct dk_callback *)arg; 22561 22562 mutex_enter(SD_MUTEX(un)); 22563 if (!un->un_f_sync_cache_supported || 22564 !un->un_f_write_cache_enabled) { 22565 err = un->un_f_sync_cache_supported ? 22566 0 : ENOTSUP; 22567 mutex_exit(SD_MUTEX(un)); 22568 if ((flag & FKIOCTL) && dkc != NULL && 22569 dkc->dkc_callback != NULL) { 22570 (*dkc->dkc_callback)(dkc->dkc_cookie, 22571 err); 22572 /* 22573 * Did callback and reported error. 22574 * Since we did a callback, ioctl 22575 * should return 0. 22576 */ 22577 err = 0; 22578 } 22579 break; 22580 } 22581 mutex_exit(SD_MUTEX(un)); 22582 22583 if ((flag & FKIOCTL) && dkc != NULL && 22584 dkc->dkc_callback != NULL) { 22585 /* async SYNC CACHE request */ 22586 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22587 } else { 22588 /* synchronous SYNC CACHE request */ 22589 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22590 } 22591 } 22592 break; 22593 22594 case DKIOCGETWCE: { 22595 22596 int wce; 22597 22598 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22599 break; 22600 } 22601 22602 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22603 err = EFAULT; 22604 } 22605 break; 22606 } 22607 22608 case DKIOCSETWCE: { 22609 22610 int wce, sync_supported; 22611 22612 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22613 err = EFAULT; 22614 break; 22615 } 22616 22617 /* 22618 * Synchronize multiple threads trying to enable 22619 * or disable the cache via the un_f_wcc_cv 22620 * condition variable. 22621 */ 22622 mutex_enter(SD_MUTEX(un)); 22623 22624 /* 22625 * Don't allow the cache to be enabled if the 22626 * config file has it disabled. 22627 */ 22628 if (un->un_f_opt_disable_cache && wce) { 22629 mutex_exit(SD_MUTEX(un)); 22630 err = EINVAL; 22631 break; 22632 } 22633 22634 /* 22635 * Wait for write cache change in progress 22636 * bit to be clear before proceeding. 22637 */ 22638 while (un->un_f_wcc_inprog) 22639 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22640 22641 un->un_f_wcc_inprog = 1; 22642 22643 if (un->un_f_write_cache_enabled && wce == 0) { 22644 /* 22645 * Disable the write cache. Don't clear 22646 * un_f_write_cache_enabled until after 22647 * the mode select and flush are complete. 22648 */ 22649 sync_supported = un->un_f_sync_cache_supported; 22650 22651 /* 22652 * If cache flush is suppressed, we assume that the 22653 * controller firmware will take care of managing the 22654 * write cache for us: no need to explicitly 22655 * disable it. 22656 */ 22657 if (!un->un_f_suppress_cache_flush) { 22658 mutex_exit(SD_MUTEX(un)); 22659 if ((err = sd_cache_control(ssc, 22660 SD_CACHE_NOCHANGE, 22661 SD_CACHE_DISABLE)) == 0 && 22662 sync_supported) { 22663 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22664 NULL); 22665 } 22666 } else { 22667 mutex_exit(SD_MUTEX(un)); 22668 } 22669 22670 mutex_enter(SD_MUTEX(un)); 22671 if (err == 0) { 22672 un->un_f_write_cache_enabled = 0; 22673 } 22674 22675 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22676 /* 22677 * Set un_f_write_cache_enabled first, so there is 22678 * no window where the cache is enabled, but the 22679 * bit says it isn't. 22680 */ 22681 un->un_f_write_cache_enabled = 1; 22682 22683 /* 22684 * If cache flush is suppressed, we assume that the 22685 * controller firmware will take care of managing the 22686 * write cache for us: no need to explicitly 22687 * enable it. 22688 */ 22689 if (!un->un_f_suppress_cache_flush) { 22690 mutex_exit(SD_MUTEX(un)); 22691 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22692 SD_CACHE_ENABLE); 22693 } else { 22694 mutex_exit(SD_MUTEX(un)); 22695 } 22696 22697 mutex_enter(SD_MUTEX(un)); 22698 22699 if (err) { 22700 un->un_f_write_cache_enabled = 0; 22701 } 22702 } 22703 22704 un->un_f_wcc_inprog = 0; 22705 cv_broadcast(&un->un_wcc_cv); 22706 mutex_exit(SD_MUTEX(un)); 22707 break; 22708 } 22709 22710 default: 22711 err = ENOTTY; 22712 break; 22713 } 22714 mutex_enter(SD_MUTEX(un)); 22715 un->un_ncmds_in_driver--; 22716 ASSERT(un->un_ncmds_in_driver >= 0); 22717 mutex_exit(SD_MUTEX(un)); 22718 22719 22720 done_without_assess: 22721 sd_ssc_fini(ssc); 22722 22723 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22724 return (err); 22725 22726 done_with_assess: 22727 mutex_enter(SD_MUTEX(un)); 22728 un->un_ncmds_in_driver--; 22729 ASSERT(un->un_ncmds_in_driver >= 0); 22730 mutex_exit(SD_MUTEX(un)); 22731 22732 done_quick_assess: 22733 if (err != 0) 22734 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22735 /* Uninitialize sd_ssc_t pointer */ 22736 sd_ssc_fini(ssc); 22737 22738 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22739 return (err); 22740 } 22741 22742 22743 /* 22744 * Function: sd_dkio_ctrl_info 22745 * 22746 * Description: This routine is the driver entry point for handling controller 22747 * information ioctl requests (DKIOCINFO). 22748 * 22749 * Arguments: dev - the device number 22750 * arg - pointer to user provided dk_cinfo structure 22751 * specifying the controller type and attributes. 22752 * flag - this argument is a pass through to ddi_copyxxx() 22753 * directly from the mode argument of ioctl(). 22754 * 22755 * Return Code: 0 22756 * EFAULT 22757 * ENXIO 22758 */ 22759 22760 static int 22761 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22762 { 22763 struct sd_lun *un = NULL; 22764 struct dk_cinfo *info; 22765 dev_info_t *pdip; 22766 int lun, tgt; 22767 22768 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22769 return (ENXIO); 22770 } 22771 22772 info = (struct dk_cinfo *) 22773 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22774 22775 switch (un->un_ctype) { 22776 case CTYPE_CDROM: 22777 info->dki_ctype = DKC_CDROM; 22778 break; 22779 default: 22780 info->dki_ctype = DKC_SCSI_CCS; 22781 break; 22782 } 22783 pdip = ddi_get_parent(SD_DEVINFO(un)); 22784 info->dki_cnum = ddi_get_instance(pdip); 22785 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22786 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22787 } else { 22788 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22789 DK_DEVLEN - 1); 22790 } 22791 22792 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22793 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22794 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22795 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22796 22797 /* Unit Information */ 22798 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22799 info->dki_slave = ((tgt << 3) | lun); 22800 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22801 DK_DEVLEN - 1); 22802 info->dki_flags = DKI_FMTVOL; 22803 info->dki_partition = SDPART(dev); 22804 22805 /* Max Transfer size of this device in blocks */ 22806 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22807 info->dki_addr = 0; 22808 info->dki_space = 0; 22809 info->dki_prio = 0; 22810 info->dki_vec = 0; 22811 22812 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22813 kmem_free(info, sizeof (struct dk_cinfo)); 22814 return (EFAULT); 22815 } else { 22816 kmem_free(info, sizeof (struct dk_cinfo)); 22817 return (0); 22818 } 22819 } 22820 22821 22822 /* 22823 * Function: sd_get_media_info 22824 * 22825 * Description: This routine is the driver entry point for handling ioctl 22826 * requests for the media type or command set profile used by the 22827 * drive to operate on the media (DKIOCGMEDIAINFO). 22828 * 22829 * Arguments: dev - the device number 22830 * arg - pointer to user provided dk_minfo structure 22831 * specifying the media type, logical block size and 22832 * drive capacity. 22833 * flag - this argument is a pass through to ddi_copyxxx() 22834 * directly from the mode argument of ioctl(). 22835 * 22836 * Return Code: 0 22837 * EACCESS 22838 * EFAULT 22839 * ENXIO 22840 * EIO 22841 */ 22842 22843 static int 22844 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22845 { 22846 struct sd_lun *un = NULL; 22847 struct uscsi_cmd com; 22848 struct scsi_inquiry *sinq; 22849 struct dk_minfo media_info; 22850 u_longlong_t media_capacity; 22851 uint64_t capacity; 22852 uint_t lbasize; 22853 uchar_t *out_data; 22854 uchar_t *rqbuf; 22855 int rval = 0; 22856 int rtn; 22857 sd_ssc_t *ssc; 22858 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22859 (un->un_state == SD_STATE_OFFLINE)) { 22860 return (ENXIO); 22861 } 22862 22863 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22864 22865 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22866 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22867 22868 /* Issue a TUR to determine if the drive is ready with media present */ 22869 ssc = sd_ssc_init(un); 22870 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22871 if (rval == ENXIO) { 22872 goto done; 22873 } else if (rval != 0) { 22874 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22875 } 22876 22877 /* Now get configuration data */ 22878 if (ISCD(un)) { 22879 media_info.dki_media_type = DK_CDROM; 22880 22881 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22882 if (un->un_f_mmc_cap == TRUE) { 22883 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22884 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22885 SD_PATH_STANDARD); 22886 22887 if (rtn) { 22888 /* 22889 * We ignore all failures for CD and need to 22890 * put the assessment before processing code 22891 * to avoid missing assessment for FMA. 22892 */ 22893 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22894 /* 22895 * Failed for other than an illegal request 22896 * or command not supported 22897 */ 22898 if ((com.uscsi_status == STATUS_CHECK) && 22899 (com.uscsi_rqstatus == STATUS_GOOD)) { 22900 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22901 (rqbuf[12] != 0x20)) { 22902 rval = EIO; 22903 goto no_assessment; 22904 } 22905 } 22906 } else { 22907 /* 22908 * The GET CONFIGURATION command succeeded 22909 * so set the media type according to the 22910 * returned data 22911 */ 22912 media_info.dki_media_type = out_data[6]; 22913 media_info.dki_media_type <<= 8; 22914 media_info.dki_media_type |= out_data[7]; 22915 } 22916 } 22917 } else { 22918 /* 22919 * The profile list is not available, so we attempt to identify 22920 * the media type based on the inquiry data 22921 */ 22922 sinq = un->un_sd->sd_inq; 22923 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22924 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22925 /* This is a direct access device or optical disk */ 22926 media_info.dki_media_type = DK_FIXED_DISK; 22927 22928 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22929 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22930 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22931 media_info.dki_media_type = DK_ZIP; 22932 } else if ( 22933 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22934 media_info.dki_media_type = DK_JAZ; 22935 } 22936 } 22937 } else { 22938 /* 22939 * Not a CD, direct access or optical disk so return 22940 * unknown media 22941 */ 22942 media_info.dki_media_type = DK_UNKNOWN; 22943 } 22944 } 22945 22946 /* Now read the capacity so we can provide the lbasize and capacity */ 22947 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22948 SD_PATH_DIRECT); 22949 switch (rval) { 22950 case 0: 22951 break; 22952 case EACCES: 22953 rval = EACCES; 22954 goto done; 22955 default: 22956 rval = EIO; 22957 goto done; 22958 } 22959 22960 /* 22961 * If lun is expanded dynamically, update the un structure. 22962 */ 22963 mutex_enter(SD_MUTEX(un)); 22964 if ((un->un_f_blockcount_is_valid == TRUE) && 22965 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22966 (capacity > un->un_blockcount)) { 22967 sd_update_block_info(un, lbasize, capacity); 22968 } 22969 mutex_exit(SD_MUTEX(un)); 22970 22971 media_info.dki_lbsize = lbasize; 22972 media_capacity = capacity; 22973 22974 /* 22975 * sd_send_scsi_READ_CAPACITY() reports capacity in 22976 * un->un_sys_blocksize chunks. So we need to convert it into 22977 * cap.lbasize chunks. 22978 */ 22979 media_capacity *= un->un_sys_blocksize; 22980 media_capacity /= lbasize; 22981 media_info.dki_capacity = media_capacity; 22982 22983 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22984 rval = EFAULT; 22985 /* Put goto. Anybody might add some code below in future */ 22986 goto no_assessment; 22987 } 22988 done: 22989 if (rval != 0) { 22990 if (rval == EIO) 22991 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22992 else 22993 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22994 } 22995 no_assessment: 22996 sd_ssc_fini(ssc); 22997 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22998 kmem_free(rqbuf, SENSE_LENGTH); 22999 return (rval); 23000 } 23001 23002 /* 23003 * Function: sd_get_media_info_ext 23004 * 23005 * Description: This routine is the driver entry point for handling ioctl 23006 * requests for the media type or command set profile used by the 23007 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23008 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23009 * of this ioctl contains both logical block size and physical 23010 * block size. 23011 * 23012 * 23013 * Arguments: dev - the device number 23014 * arg - pointer to user provided dk_minfo_ext structure 23015 * specifying the media type, logical block size, 23016 * physical block size and disk capacity. 23017 * flag - this argument is a pass through to ddi_copyxxx() 23018 * directly from the mode argument of ioctl(). 23019 * 23020 * Return Code: 0 23021 * EACCESS 23022 * EFAULT 23023 * ENXIO 23024 * EIO 23025 */ 23026 23027 static int 23028 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23029 { 23030 struct sd_lun *un = NULL; 23031 struct uscsi_cmd com; 23032 struct scsi_inquiry *sinq; 23033 struct dk_minfo_ext media_info_ext; 23034 u_longlong_t media_capacity; 23035 uint64_t capacity; 23036 uint_t lbasize; 23037 uint_t pbsize; 23038 uchar_t *out_data; 23039 uchar_t *rqbuf; 23040 int rval = 0; 23041 int rtn; 23042 sd_ssc_t *ssc; 23043 23044 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23045 (un->un_state == SD_STATE_OFFLINE)) { 23046 return (ENXIO); 23047 } 23048 23049 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_ext: entry\n"); 23050 23051 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23052 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23053 ssc = sd_ssc_init(un); 23054 23055 /* Issue a TUR to determine if the drive is ready with media present */ 23056 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23057 if (rval == ENXIO) { 23058 goto done; 23059 } else if (rval != 0) { 23060 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23061 } 23062 23063 /* Now get configuration data */ 23064 if (ISCD(un)) { 23065 media_info_ext.dki_media_type = DK_CDROM; 23066 23067 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23068 if (un->un_f_mmc_cap == TRUE) { 23069 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23070 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23071 SD_PATH_STANDARD); 23072 23073 if (rtn) { 23074 /* 23075 * We ignore all failures for CD and need to 23076 * put the assessment before processing code 23077 * to avoid missing assessment for FMA. 23078 */ 23079 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23080 /* 23081 * Failed for other than an illegal request 23082 * or command not supported 23083 */ 23084 if ((com.uscsi_status == STATUS_CHECK) && 23085 (com.uscsi_rqstatus == STATUS_GOOD)) { 23086 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23087 (rqbuf[12] != 0x20)) { 23088 rval = EIO; 23089 goto no_assessment; 23090 } 23091 } 23092 } else { 23093 /* 23094 * The GET CONFIGURATION command succeeded 23095 * so set the media type according to the 23096 * returned data 23097 */ 23098 media_info_ext.dki_media_type = out_data[6]; 23099 media_info_ext.dki_media_type <<= 8; 23100 media_info_ext.dki_media_type |= out_data[7]; 23101 } 23102 } 23103 } else { 23104 /* 23105 * The profile list is not available, so we attempt to identify 23106 * the media type based on the inquiry data 23107 */ 23108 sinq = un->un_sd->sd_inq; 23109 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23110 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23111 /* This is a direct access device or optical disk */ 23112 media_info_ext.dki_media_type = DK_FIXED_DISK; 23113 23114 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23115 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23116 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23117 media_info_ext.dki_media_type = DK_ZIP; 23118 } else if ( 23119 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23120 media_info_ext.dki_media_type = DK_JAZ; 23121 } 23122 } 23123 } else { 23124 /* 23125 * Not a CD, direct access or optical disk so return 23126 * unknown media 23127 */ 23128 media_info_ext.dki_media_type = DK_UNKNOWN; 23129 } 23130 } 23131 23132 /* 23133 * Now read the capacity so we can provide the lbasize, 23134 * pbsize and capacity. 23135 */ 23136 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, &pbsize, 23137 SD_PATH_DIRECT); 23138 23139 if (rval != 0) { 23140 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23141 SD_PATH_DIRECT); 23142 23143 switch (rval) { 23144 case 0: 23145 pbsize = lbasize; 23146 media_capacity = capacity; 23147 /* 23148 * sd_send_scsi_READ_CAPACITY() reports capacity in 23149 * un->un_sys_blocksize chunks. So we need to convert 23150 * it into cap.lbsize chunks. 23151 */ 23152 if (un->un_f_has_removable_media) { 23153 media_capacity *= un->un_sys_blocksize; 23154 media_capacity /= lbasize; 23155 } 23156 break; 23157 case EACCES: 23158 rval = EACCES; 23159 goto done; 23160 default: 23161 rval = EIO; 23162 goto done; 23163 } 23164 } else { 23165 media_capacity = capacity; 23166 } 23167 23168 /* 23169 * If lun is expanded dynamically, update the un structure. 23170 */ 23171 mutex_enter(SD_MUTEX(un)); 23172 if ((un->un_f_blockcount_is_valid == TRUE) && 23173 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23174 (capacity > un->un_blockcount)) { 23175 sd_update_block_info(un, lbasize, capacity); 23176 } 23177 mutex_exit(SD_MUTEX(un)); 23178 23179 media_info_ext.dki_lbsize = lbasize; 23180 media_info_ext.dki_capacity = media_capacity; 23181 media_info_ext.dki_pbsize = pbsize; 23182 23183 if (ddi_copyout(&media_info_ext, arg, sizeof (struct dk_minfo_ext), 23184 flag)) { 23185 rval = EFAULT; 23186 goto no_assessment; 23187 } 23188 done: 23189 if (rval != 0) { 23190 if (rval == EIO) 23191 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23192 else 23193 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23194 } 23195 no_assessment: 23196 sd_ssc_fini(ssc); 23197 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23198 kmem_free(rqbuf, SENSE_LENGTH); 23199 return (rval); 23200 } 23201 23202 /* 23203 * Function: sd_check_media 23204 * 23205 * Description: This utility routine implements the functionality for the 23206 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23207 * driver state changes from that specified by the user 23208 * (inserted or ejected). For example, if the user specifies 23209 * DKIO_EJECTED and the current media state is inserted this 23210 * routine will immediately return DKIO_INSERTED. However, if the 23211 * current media state is not inserted the user thread will be 23212 * blocked until the drive state changes. If DKIO_NONE is specified 23213 * the user thread will block until a drive state change occurs. 23214 * 23215 * Arguments: dev - the device number 23216 * state - user pointer to a dkio_state, updated with the current 23217 * drive state at return. 23218 * 23219 * Return Code: ENXIO 23220 * EIO 23221 * EAGAIN 23222 * EINTR 23223 */ 23224 23225 static int 23226 sd_check_media(dev_t dev, enum dkio_state state) 23227 { 23228 struct sd_lun *un = NULL; 23229 enum dkio_state prev_state; 23230 opaque_t token = NULL; 23231 int rval = 0; 23232 sd_ssc_t *ssc; 23233 dev_t sub_dev; 23234 23235 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23236 return (ENXIO); 23237 } 23238 23239 /* 23240 * sub_dev is used when submitting request to scsi watch. 23241 * All submissions are unified to use same device number. 23242 */ 23243 sub_dev = sd_make_device(SD_DEVINFO(un)); 23244 23245 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23246 23247 ssc = sd_ssc_init(un); 23248 23249 mutex_enter(SD_MUTEX(un)); 23250 23251 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23252 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23253 23254 prev_state = un->un_mediastate; 23255 23256 /* is there anything to do? */ 23257 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23258 /* 23259 * submit the request to the scsi_watch service; 23260 * scsi_media_watch_cb() does the real work 23261 */ 23262 mutex_exit(SD_MUTEX(un)); 23263 23264 /* 23265 * This change handles the case where a scsi watch request is 23266 * added to a device that is powered down. To accomplish this 23267 * we power up the device before adding the scsi watch request, 23268 * since the scsi watch sends a TUR directly to the device 23269 * which the device cannot handle if it is powered down. 23270 */ 23271 if (sd_pm_entry(un) != DDI_SUCCESS) { 23272 mutex_enter(SD_MUTEX(un)); 23273 goto done; 23274 } 23275 23276 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23277 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23278 (caddr_t)sub_dev); 23279 23280 sd_pm_exit(un); 23281 23282 mutex_enter(SD_MUTEX(un)); 23283 if (token == NULL) { 23284 rval = EAGAIN; 23285 goto done; 23286 } 23287 23288 /* 23289 * This is a special case IOCTL that doesn't return 23290 * until the media state changes. Routine sdpower 23291 * knows about and handles this so don't count it 23292 * as an active cmd in the driver, which would 23293 * keep the device busy to the pm framework. 23294 * If the count isn't decremented the device can't 23295 * be powered down. 23296 */ 23297 un->un_ncmds_in_driver--; 23298 ASSERT(un->un_ncmds_in_driver >= 0); 23299 23300 /* 23301 * if a prior request had been made, this will be the same 23302 * token, as scsi_watch was designed that way. 23303 */ 23304 un->un_swr_token = token; 23305 un->un_specified_mediastate = state; 23306 23307 /* 23308 * now wait for media change 23309 * we will not be signalled unless mediastate == state but it is 23310 * still better to test for this condition, since there is a 23311 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23312 */ 23313 SD_TRACE(SD_LOG_COMMON, un, 23314 "sd_check_media: waiting for media state change\n"); 23315 while (un->un_mediastate == state) { 23316 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23317 SD_TRACE(SD_LOG_COMMON, un, 23318 "sd_check_media: waiting for media state " 23319 "was interrupted\n"); 23320 un->un_ncmds_in_driver++; 23321 rval = EINTR; 23322 goto done; 23323 } 23324 SD_TRACE(SD_LOG_COMMON, un, 23325 "sd_check_media: received signal, state=%x\n", 23326 un->un_mediastate); 23327 } 23328 /* 23329 * Inc the counter to indicate the device once again 23330 * has an active outstanding cmd. 23331 */ 23332 un->un_ncmds_in_driver++; 23333 } 23334 23335 /* invalidate geometry */ 23336 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23337 sr_ejected(un); 23338 } 23339 23340 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23341 uint64_t capacity; 23342 uint_t lbasize; 23343 23344 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23345 mutex_exit(SD_MUTEX(un)); 23346 /* 23347 * Since the following routines use SD_PATH_DIRECT, we must 23348 * call PM directly before the upcoming disk accesses. This 23349 * may cause the disk to be power/spin up. 23350 */ 23351 23352 if (sd_pm_entry(un) == DDI_SUCCESS) { 23353 rval = sd_send_scsi_READ_CAPACITY(ssc, 23354 &capacity, &lbasize, SD_PATH_DIRECT); 23355 if (rval != 0) { 23356 sd_pm_exit(un); 23357 if (rval == EIO) 23358 sd_ssc_assessment(ssc, 23359 SD_FMT_STATUS_CHECK); 23360 else 23361 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23362 mutex_enter(SD_MUTEX(un)); 23363 goto done; 23364 } 23365 } else { 23366 rval = EIO; 23367 mutex_enter(SD_MUTEX(un)); 23368 goto done; 23369 } 23370 mutex_enter(SD_MUTEX(un)); 23371 23372 sd_update_block_info(un, lbasize, capacity); 23373 23374 /* 23375 * Check if the media in the device is writable or not 23376 */ 23377 if (ISCD(un)) { 23378 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23379 } 23380 23381 mutex_exit(SD_MUTEX(un)); 23382 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23383 if ((cmlb_validate(un->un_cmlbhandle, 0, 23384 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23385 sd_set_pstats(un); 23386 SD_TRACE(SD_LOG_IO_PARTITION, un, 23387 "sd_check_media: un:0x%p pstats created and " 23388 "set\n", un); 23389 } 23390 23391 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23392 SD_PATH_DIRECT); 23393 23394 sd_pm_exit(un); 23395 23396 if (rval != 0) { 23397 if (rval == EIO) 23398 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23399 else 23400 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23401 } 23402 23403 mutex_enter(SD_MUTEX(un)); 23404 } 23405 done: 23406 sd_ssc_fini(ssc); 23407 un->un_f_watcht_stopped = FALSE; 23408 if (token != NULL && un->un_swr_token != NULL) { 23409 /* 23410 * Use of this local token and the mutex ensures that we avoid 23411 * some race conditions associated with terminating the 23412 * scsi watch. 23413 */ 23414 token = un->un_swr_token; 23415 mutex_exit(SD_MUTEX(un)); 23416 (void) scsi_watch_request_terminate(token, 23417 SCSI_WATCH_TERMINATE_WAIT); 23418 if (scsi_watch_get_ref_count(token) == 0) { 23419 mutex_enter(SD_MUTEX(un)); 23420 un->un_swr_token = (opaque_t)NULL; 23421 } else { 23422 mutex_enter(SD_MUTEX(un)); 23423 } 23424 } 23425 23426 /* 23427 * Update the capacity kstat value, if no media previously 23428 * (capacity kstat is 0) and a media has been inserted 23429 * (un_f_blockcount_is_valid == TRUE) 23430 */ 23431 if (un->un_errstats) { 23432 struct sd_errstats *stp = NULL; 23433 23434 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23435 if ((stp->sd_capacity.value.ui64 == 0) && 23436 (un->un_f_blockcount_is_valid == TRUE)) { 23437 stp->sd_capacity.value.ui64 = 23438 (uint64_t)((uint64_t)un->un_blockcount * 23439 un->un_sys_blocksize); 23440 } 23441 } 23442 mutex_exit(SD_MUTEX(un)); 23443 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23444 return (rval); 23445 } 23446 23447 23448 /* 23449 * Function: sd_delayed_cv_broadcast 23450 * 23451 * Description: Delayed cv_broadcast to allow for target to recover from media 23452 * insertion. 23453 * 23454 * Arguments: arg - driver soft state (unit) structure 23455 */ 23456 23457 static void 23458 sd_delayed_cv_broadcast(void *arg) 23459 { 23460 struct sd_lun *un = arg; 23461 23462 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23463 23464 mutex_enter(SD_MUTEX(un)); 23465 un->un_dcvb_timeid = NULL; 23466 cv_broadcast(&un->un_state_cv); 23467 mutex_exit(SD_MUTEX(un)); 23468 } 23469 23470 23471 /* 23472 * Function: sd_media_watch_cb 23473 * 23474 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23475 * routine processes the TUR sense data and updates the driver 23476 * state if a transition has occurred. The user thread 23477 * (sd_check_media) is then signalled. 23478 * 23479 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23480 * among multiple watches that share this callback function 23481 * resultp - scsi watch facility result packet containing scsi 23482 * packet, status byte and sense data 23483 * 23484 * Return Code: 0 for success, -1 for failure 23485 */ 23486 23487 static int 23488 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23489 { 23490 struct sd_lun *un; 23491 struct scsi_status *statusp = resultp->statusp; 23492 uint8_t *sensep = (uint8_t *)resultp->sensep; 23493 enum dkio_state state = DKIO_NONE; 23494 dev_t dev = (dev_t)arg; 23495 uchar_t actual_sense_length; 23496 uint8_t skey, asc, ascq; 23497 23498 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23499 return (-1); 23500 } 23501 actual_sense_length = resultp->actual_sense_length; 23502 23503 mutex_enter(SD_MUTEX(un)); 23504 SD_TRACE(SD_LOG_COMMON, un, 23505 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23506 *((char *)statusp), (void *)sensep, actual_sense_length); 23507 23508 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23509 un->un_mediastate = DKIO_DEV_GONE; 23510 cv_broadcast(&un->un_state_cv); 23511 mutex_exit(SD_MUTEX(un)); 23512 23513 return (0); 23514 } 23515 23516 /* 23517 * If there was a check condition then sensep points to valid sense data 23518 * If status was not a check condition but a reservation or busy status 23519 * then the new state is DKIO_NONE 23520 */ 23521 if (sensep != NULL) { 23522 skey = scsi_sense_key(sensep); 23523 asc = scsi_sense_asc(sensep); 23524 ascq = scsi_sense_ascq(sensep); 23525 23526 SD_INFO(SD_LOG_COMMON, un, 23527 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23528 skey, asc, ascq); 23529 /* This routine only uses up to 13 bytes of sense data. */ 23530 if (actual_sense_length >= 13) { 23531 if (skey == KEY_UNIT_ATTENTION) { 23532 if (asc == 0x28) { 23533 state = DKIO_INSERTED; 23534 } 23535 } else if (skey == KEY_NOT_READY) { 23536 /* 23537 * Sense data of 02/06/00 means that the 23538 * drive could not read the media (No 23539 * reference position found). In this case 23540 * to prevent a hang on the DKIOCSTATE IOCTL 23541 * we set the media state to DKIO_INSERTED. 23542 */ 23543 if (asc == 0x06 && ascq == 0x00) 23544 state = DKIO_INSERTED; 23545 23546 /* 23547 * if 02/04/02 means that the host 23548 * should send start command. Explicitly 23549 * leave the media state as is 23550 * (inserted) as the media is inserted 23551 * and host has stopped device for PM 23552 * reasons. Upon next true read/write 23553 * to this media will bring the 23554 * device to the right state good for 23555 * media access. 23556 */ 23557 if (asc == 0x3a) { 23558 state = DKIO_EJECTED; 23559 } else { 23560 /* 23561 * If the drive is busy with an 23562 * operation or long write, keep the 23563 * media in an inserted state. 23564 */ 23565 23566 if ((asc == 0x04) && 23567 ((ascq == 0x02) || 23568 (ascq == 0x07) || 23569 (ascq == 0x08))) { 23570 state = DKIO_INSERTED; 23571 } 23572 } 23573 } else if (skey == KEY_NO_SENSE) { 23574 if ((asc == 0x00) && (ascq == 0x00)) { 23575 /* 23576 * Sense Data 00/00/00 does not provide 23577 * any information about the state of 23578 * the media. Ignore it. 23579 */ 23580 mutex_exit(SD_MUTEX(un)); 23581 return (0); 23582 } 23583 } 23584 } 23585 } else if ((*((char *)statusp) == STATUS_GOOD) && 23586 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23587 state = DKIO_INSERTED; 23588 } 23589 23590 SD_TRACE(SD_LOG_COMMON, un, 23591 "sd_media_watch_cb: state=%x, specified=%x\n", 23592 state, un->un_specified_mediastate); 23593 23594 /* 23595 * now signal the waiting thread if this is *not* the specified state; 23596 * delay the signal if the state is DKIO_INSERTED to allow the target 23597 * to recover 23598 */ 23599 if (state != un->un_specified_mediastate) { 23600 un->un_mediastate = state; 23601 if (state == DKIO_INSERTED) { 23602 /* 23603 * delay the signal to give the drive a chance 23604 * to do what it apparently needs to do 23605 */ 23606 SD_TRACE(SD_LOG_COMMON, un, 23607 "sd_media_watch_cb: delayed cv_broadcast\n"); 23608 if (un->un_dcvb_timeid == NULL) { 23609 un->un_dcvb_timeid = 23610 timeout(sd_delayed_cv_broadcast, un, 23611 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23612 } 23613 } else { 23614 SD_TRACE(SD_LOG_COMMON, un, 23615 "sd_media_watch_cb: immediate cv_broadcast\n"); 23616 cv_broadcast(&un->un_state_cv); 23617 } 23618 } 23619 mutex_exit(SD_MUTEX(un)); 23620 return (0); 23621 } 23622 23623 23624 /* 23625 * Function: sd_dkio_get_temp 23626 * 23627 * Description: This routine is the driver entry point for handling ioctl 23628 * requests to get the disk temperature. 23629 * 23630 * Arguments: dev - the device number 23631 * arg - pointer to user provided dk_temperature structure. 23632 * flag - this argument is a pass through to ddi_copyxxx() 23633 * directly from the mode argument of ioctl(). 23634 * 23635 * Return Code: 0 23636 * EFAULT 23637 * ENXIO 23638 * EAGAIN 23639 */ 23640 23641 static int 23642 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23643 { 23644 struct sd_lun *un = NULL; 23645 struct dk_temperature *dktemp = NULL; 23646 uchar_t *temperature_page; 23647 int rval = 0; 23648 int path_flag = SD_PATH_STANDARD; 23649 sd_ssc_t *ssc; 23650 23651 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23652 return (ENXIO); 23653 } 23654 23655 ssc = sd_ssc_init(un); 23656 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23657 23658 /* copyin the disk temp argument to get the user flags */ 23659 if (ddi_copyin((void *)arg, dktemp, 23660 sizeof (struct dk_temperature), flag) != 0) { 23661 rval = EFAULT; 23662 goto done; 23663 } 23664 23665 /* Initialize the temperature to invalid. */ 23666 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23667 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23668 23669 /* 23670 * Note: Investigate removing the "bypass pm" semantic. 23671 * Can we just bypass PM always? 23672 */ 23673 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23674 path_flag = SD_PATH_DIRECT; 23675 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23676 mutex_enter(&un->un_pm_mutex); 23677 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23678 /* 23679 * If DKT_BYPASS_PM is set, and the drive happens to be 23680 * in low power mode, we can not wake it up, Need to 23681 * return EAGAIN. 23682 */ 23683 mutex_exit(&un->un_pm_mutex); 23684 rval = EAGAIN; 23685 goto done; 23686 } else { 23687 /* 23688 * Indicate to PM the device is busy. This is required 23689 * to avoid a race - i.e. the ioctl is issuing a 23690 * command and the pm framework brings down the device 23691 * to low power mode (possible power cut-off on some 23692 * platforms). 23693 */ 23694 mutex_exit(&un->un_pm_mutex); 23695 if (sd_pm_entry(un) != DDI_SUCCESS) { 23696 rval = EAGAIN; 23697 goto done; 23698 } 23699 } 23700 } 23701 23702 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23703 23704 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 23705 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 23706 if (rval != 0) 23707 goto done2; 23708 23709 /* 23710 * For the current temperature verify that the parameter length is 0x02 23711 * and the parameter code is 0x00 23712 */ 23713 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23714 (temperature_page[5] == 0x00)) { 23715 if (temperature_page[9] == 0xFF) { 23716 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23717 } else { 23718 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23719 } 23720 } 23721 23722 /* 23723 * For the reference temperature verify that the parameter 23724 * length is 0x02 and the parameter code is 0x01 23725 */ 23726 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23727 (temperature_page[11] == 0x01)) { 23728 if (temperature_page[15] == 0xFF) { 23729 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23730 } else { 23731 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23732 } 23733 } 23734 23735 /* Do the copyout regardless of the temperature commands status. */ 23736 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23737 flag) != 0) { 23738 rval = EFAULT; 23739 goto done1; 23740 } 23741 23742 done2: 23743 if (rval != 0) { 23744 if (rval == EIO) 23745 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23746 else 23747 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23748 } 23749 done1: 23750 if (path_flag == SD_PATH_DIRECT) { 23751 sd_pm_exit(un); 23752 } 23753 23754 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23755 done: 23756 sd_ssc_fini(ssc); 23757 if (dktemp != NULL) { 23758 kmem_free(dktemp, sizeof (struct dk_temperature)); 23759 } 23760 23761 return (rval); 23762 } 23763 23764 23765 /* 23766 * Function: sd_log_page_supported 23767 * 23768 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23769 * supported log pages. 23770 * 23771 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23772 * structure for this target. 23773 * log_page - 23774 * 23775 * Return Code: -1 - on error (log sense is optional and may not be supported). 23776 * 0 - log page not found. 23777 * 1 - log page found. 23778 */ 23779 23780 static int 23781 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23782 { 23783 uchar_t *log_page_data; 23784 int i; 23785 int match = 0; 23786 int log_size; 23787 int status = 0; 23788 struct sd_lun *un; 23789 23790 ASSERT(ssc != NULL); 23791 un = ssc->ssc_un; 23792 ASSERT(un != NULL); 23793 23794 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23795 23796 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23797 SD_PATH_DIRECT); 23798 23799 if (status != 0) { 23800 if (status == EIO) { 23801 /* 23802 * Some disks do not support log sense, we 23803 * should ignore this kind of error(sense key is 23804 * 0x5 - illegal request). 23805 */ 23806 uint8_t *sensep; 23807 int senlen; 23808 23809 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23810 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23811 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23812 23813 if (senlen > 0 && 23814 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23815 sd_ssc_assessment(ssc, 23816 SD_FMT_IGNORE_COMPROMISE); 23817 } else { 23818 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23819 } 23820 } else { 23821 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23822 } 23823 23824 SD_ERROR(SD_LOG_COMMON, un, 23825 "sd_log_page_supported: failed log page retrieval\n"); 23826 kmem_free(log_page_data, 0xFF); 23827 return (-1); 23828 } 23829 23830 log_size = log_page_data[3]; 23831 23832 /* 23833 * The list of supported log pages start from the fourth byte. Check 23834 * until we run out of log pages or a match is found. 23835 */ 23836 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23837 if (log_page_data[i] == log_page) { 23838 match++; 23839 } 23840 } 23841 kmem_free(log_page_data, 0xFF); 23842 return (match); 23843 } 23844 23845 23846 /* 23847 * Function: sd_mhdioc_failfast 23848 * 23849 * Description: This routine is the driver entry point for handling ioctl 23850 * requests to enable/disable the multihost failfast option. 23851 * (MHIOCENFAILFAST) 23852 * 23853 * Arguments: dev - the device number 23854 * arg - user specified probing interval. 23855 * flag - this argument is a pass through to ddi_copyxxx() 23856 * directly from the mode argument of ioctl(). 23857 * 23858 * Return Code: 0 23859 * EFAULT 23860 * ENXIO 23861 */ 23862 23863 static int 23864 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23865 { 23866 struct sd_lun *un = NULL; 23867 int mh_time; 23868 int rval = 0; 23869 23870 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23871 return (ENXIO); 23872 } 23873 23874 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23875 return (EFAULT); 23876 23877 if (mh_time) { 23878 mutex_enter(SD_MUTEX(un)); 23879 un->un_resvd_status |= SD_FAILFAST; 23880 mutex_exit(SD_MUTEX(un)); 23881 /* 23882 * If mh_time is INT_MAX, then this ioctl is being used for 23883 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23884 */ 23885 if (mh_time != INT_MAX) { 23886 rval = sd_check_mhd(dev, mh_time); 23887 } 23888 } else { 23889 (void) sd_check_mhd(dev, 0); 23890 mutex_enter(SD_MUTEX(un)); 23891 un->un_resvd_status &= ~SD_FAILFAST; 23892 mutex_exit(SD_MUTEX(un)); 23893 } 23894 return (rval); 23895 } 23896 23897 23898 /* 23899 * Function: sd_mhdioc_takeown 23900 * 23901 * Description: This routine is the driver entry point for handling ioctl 23902 * requests to forcefully acquire exclusive access rights to the 23903 * multihost disk (MHIOCTKOWN). 23904 * 23905 * Arguments: dev - the device number 23906 * arg - user provided structure specifying the delay 23907 * parameters in milliseconds 23908 * flag - this argument is a pass through to ddi_copyxxx() 23909 * directly from the mode argument of ioctl(). 23910 * 23911 * Return Code: 0 23912 * EFAULT 23913 * ENXIO 23914 */ 23915 23916 static int 23917 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23918 { 23919 struct sd_lun *un = NULL; 23920 struct mhioctkown *tkown = NULL; 23921 int rval = 0; 23922 23923 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23924 return (ENXIO); 23925 } 23926 23927 if (arg != NULL) { 23928 tkown = (struct mhioctkown *) 23929 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23930 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23931 if (rval != 0) { 23932 rval = EFAULT; 23933 goto error; 23934 } 23935 } 23936 23937 rval = sd_take_ownership(dev, tkown); 23938 mutex_enter(SD_MUTEX(un)); 23939 if (rval == 0) { 23940 un->un_resvd_status |= SD_RESERVE; 23941 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23942 sd_reinstate_resv_delay = 23943 tkown->reinstate_resv_delay * 1000; 23944 } else { 23945 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23946 } 23947 /* 23948 * Give the scsi_watch routine interval set by 23949 * the MHIOCENFAILFAST ioctl precedence here. 23950 */ 23951 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23952 mutex_exit(SD_MUTEX(un)); 23953 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23954 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23955 "sd_mhdioc_takeown : %d\n", 23956 sd_reinstate_resv_delay); 23957 } else { 23958 mutex_exit(SD_MUTEX(un)); 23959 } 23960 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23961 sd_mhd_reset_notify_cb, (caddr_t)un); 23962 } else { 23963 un->un_resvd_status &= ~SD_RESERVE; 23964 mutex_exit(SD_MUTEX(un)); 23965 } 23966 23967 error: 23968 if (tkown != NULL) { 23969 kmem_free(tkown, sizeof (struct mhioctkown)); 23970 } 23971 return (rval); 23972 } 23973 23974 23975 /* 23976 * Function: sd_mhdioc_release 23977 * 23978 * Description: This routine is the driver entry point for handling ioctl 23979 * requests to release exclusive access rights to the multihost 23980 * disk (MHIOCRELEASE). 23981 * 23982 * Arguments: dev - the device number 23983 * 23984 * Return Code: 0 23985 * ENXIO 23986 */ 23987 23988 static int 23989 sd_mhdioc_release(dev_t dev) 23990 { 23991 struct sd_lun *un = NULL; 23992 timeout_id_t resvd_timeid_save; 23993 int resvd_status_save; 23994 int rval = 0; 23995 23996 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23997 return (ENXIO); 23998 } 23999 24000 mutex_enter(SD_MUTEX(un)); 24001 resvd_status_save = un->un_resvd_status; 24002 un->un_resvd_status &= 24003 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24004 if (un->un_resvd_timeid) { 24005 resvd_timeid_save = un->un_resvd_timeid; 24006 un->un_resvd_timeid = NULL; 24007 mutex_exit(SD_MUTEX(un)); 24008 (void) untimeout(resvd_timeid_save); 24009 } else { 24010 mutex_exit(SD_MUTEX(un)); 24011 } 24012 24013 /* 24014 * destroy any pending timeout thread that may be attempting to 24015 * reinstate reservation on this device. 24016 */ 24017 sd_rmv_resv_reclaim_req(dev); 24018 24019 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24020 mutex_enter(SD_MUTEX(un)); 24021 if ((un->un_mhd_token) && 24022 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24023 mutex_exit(SD_MUTEX(un)); 24024 (void) sd_check_mhd(dev, 0); 24025 } else { 24026 mutex_exit(SD_MUTEX(un)); 24027 } 24028 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24029 sd_mhd_reset_notify_cb, (caddr_t)un); 24030 } else { 24031 /* 24032 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24033 */ 24034 mutex_enter(SD_MUTEX(un)); 24035 un->un_resvd_status = resvd_status_save; 24036 mutex_exit(SD_MUTEX(un)); 24037 } 24038 return (rval); 24039 } 24040 24041 24042 /* 24043 * Function: sd_mhdioc_register_devid 24044 * 24045 * Description: This routine is the driver entry point for handling ioctl 24046 * requests to register the device id (MHIOCREREGISTERDEVID). 24047 * 24048 * Note: The implementation for this ioctl has been updated to 24049 * be consistent with the original PSARC case (1999/357) 24050 * (4375899, 4241671, 4220005) 24051 * 24052 * Arguments: dev - the device number 24053 * 24054 * Return Code: 0 24055 * ENXIO 24056 */ 24057 24058 static int 24059 sd_mhdioc_register_devid(dev_t dev) 24060 { 24061 struct sd_lun *un = NULL; 24062 int rval = 0; 24063 sd_ssc_t *ssc; 24064 24065 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24066 return (ENXIO); 24067 } 24068 24069 ASSERT(!mutex_owned(SD_MUTEX(un))); 24070 24071 mutex_enter(SD_MUTEX(un)); 24072 24073 /* If a devid already exists, de-register it */ 24074 if (un->un_devid != NULL) { 24075 ddi_devid_unregister(SD_DEVINFO(un)); 24076 /* 24077 * After unregister devid, needs to free devid memory 24078 */ 24079 ddi_devid_free(un->un_devid); 24080 un->un_devid = NULL; 24081 } 24082 24083 /* Check for reservation conflict */ 24084 mutex_exit(SD_MUTEX(un)); 24085 ssc = sd_ssc_init(un); 24086 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24087 mutex_enter(SD_MUTEX(un)); 24088 24089 switch (rval) { 24090 case 0: 24091 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24092 break; 24093 case EACCES: 24094 break; 24095 default: 24096 rval = EIO; 24097 } 24098 24099 mutex_exit(SD_MUTEX(un)); 24100 if (rval != 0) { 24101 if (rval == EIO) 24102 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24103 else 24104 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24105 } 24106 sd_ssc_fini(ssc); 24107 return (rval); 24108 } 24109 24110 24111 /* 24112 * Function: sd_mhdioc_inkeys 24113 * 24114 * Description: This routine is the driver entry point for handling ioctl 24115 * requests to issue the SCSI-3 Persistent In Read Keys command 24116 * to the device (MHIOCGRP_INKEYS). 24117 * 24118 * Arguments: dev - the device number 24119 * arg - user provided in_keys structure 24120 * flag - this argument is a pass through to ddi_copyxxx() 24121 * directly from the mode argument of ioctl(). 24122 * 24123 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24124 * ENXIO 24125 * EFAULT 24126 */ 24127 24128 static int 24129 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24130 { 24131 struct sd_lun *un; 24132 mhioc_inkeys_t inkeys; 24133 int rval = 0; 24134 24135 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24136 return (ENXIO); 24137 } 24138 24139 #ifdef _MULTI_DATAMODEL 24140 switch (ddi_model_convert_from(flag & FMODELS)) { 24141 case DDI_MODEL_ILP32: { 24142 struct mhioc_inkeys32 inkeys32; 24143 24144 if (ddi_copyin(arg, &inkeys32, 24145 sizeof (struct mhioc_inkeys32), flag) != 0) { 24146 return (EFAULT); 24147 } 24148 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24149 if ((rval = sd_persistent_reservation_in_read_keys(un, 24150 &inkeys, flag)) != 0) { 24151 return (rval); 24152 } 24153 inkeys32.generation = inkeys.generation; 24154 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24155 flag) != 0) { 24156 return (EFAULT); 24157 } 24158 break; 24159 } 24160 case DDI_MODEL_NONE: 24161 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24162 flag) != 0) { 24163 return (EFAULT); 24164 } 24165 if ((rval = sd_persistent_reservation_in_read_keys(un, 24166 &inkeys, flag)) != 0) { 24167 return (rval); 24168 } 24169 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24170 flag) != 0) { 24171 return (EFAULT); 24172 } 24173 break; 24174 } 24175 24176 #else /* ! _MULTI_DATAMODEL */ 24177 24178 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24179 return (EFAULT); 24180 } 24181 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24182 if (rval != 0) { 24183 return (rval); 24184 } 24185 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24186 return (EFAULT); 24187 } 24188 24189 #endif /* _MULTI_DATAMODEL */ 24190 24191 return (rval); 24192 } 24193 24194 24195 /* 24196 * Function: sd_mhdioc_inresv 24197 * 24198 * Description: This routine is the driver entry point for handling ioctl 24199 * requests to issue the SCSI-3 Persistent In Read Reservations 24200 * command to the device (MHIOCGRP_INKEYS). 24201 * 24202 * Arguments: dev - the device number 24203 * arg - user provided in_resv structure 24204 * flag - this argument is a pass through to ddi_copyxxx() 24205 * directly from the mode argument of ioctl(). 24206 * 24207 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24208 * ENXIO 24209 * EFAULT 24210 */ 24211 24212 static int 24213 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24214 { 24215 struct sd_lun *un; 24216 mhioc_inresvs_t inresvs; 24217 int rval = 0; 24218 24219 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24220 return (ENXIO); 24221 } 24222 24223 #ifdef _MULTI_DATAMODEL 24224 24225 switch (ddi_model_convert_from(flag & FMODELS)) { 24226 case DDI_MODEL_ILP32: { 24227 struct mhioc_inresvs32 inresvs32; 24228 24229 if (ddi_copyin(arg, &inresvs32, 24230 sizeof (struct mhioc_inresvs32), flag) != 0) { 24231 return (EFAULT); 24232 } 24233 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24234 if ((rval = sd_persistent_reservation_in_read_resv(un, 24235 &inresvs, flag)) != 0) { 24236 return (rval); 24237 } 24238 inresvs32.generation = inresvs.generation; 24239 if (ddi_copyout(&inresvs32, arg, 24240 sizeof (struct mhioc_inresvs32), flag) != 0) { 24241 return (EFAULT); 24242 } 24243 break; 24244 } 24245 case DDI_MODEL_NONE: 24246 if (ddi_copyin(arg, &inresvs, 24247 sizeof (mhioc_inresvs_t), flag) != 0) { 24248 return (EFAULT); 24249 } 24250 if ((rval = sd_persistent_reservation_in_read_resv(un, 24251 &inresvs, flag)) != 0) { 24252 return (rval); 24253 } 24254 if (ddi_copyout(&inresvs, arg, 24255 sizeof (mhioc_inresvs_t), flag) != 0) { 24256 return (EFAULT); 24257 } 24258 break; 24259 } 24260 24261 #else /* ! _MULTI_DATAMODEL */ 24262 24263 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24264 return (EFAULT); 24265 } 24266 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24267 if (rval != 0) { 24268 return (rval); 24269 } 24270 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24271 return (EFAULT); 24272 } 24273 24274 #endif /* ! _MULTI_DATAMODEL */ 24275 24276 return (rval); 24277 } 24278 24279 24280 /* 24281 * The following routines support the clustering functionality described below 24282 * and implement lost reservation reclaim functionality. 24283 * 24284 * Clustering 24285 * ---------- 24286 * The clustering code uses two different, independent forms of SCSI 24287 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24288 * Persistent Group Reservations. For any particular disk, it will use either 24289 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24290 * 24291 * SCSI-2 24292 * The cluster software takes ownership of a multi-hosted disk by issuing the 24293 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24294 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24295 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24296 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24297 * driver. The meaning of failfast is that if the driver (on this host) ever 24298 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24299 * it should immediately panic the host. The motivation for this ioctl is that 24300 * if this host does encounter reservation conflict, the underlying cause is 24301 * that some other host of the cluster has decided that this host is no longer 24302 * in the cluster and has seized control of the disks for itself. Since this 24303 * host is no longer in the cluster, it ought to panic itself. The 24304 * MHIOCENFAILFAST ioctl does two things: 24305 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24306 * error to panic the host 24307 * (b) it sets up a periodic timer to test whether this host still has 24308 * "access" (in that no other host has reserved the device): if the 24309 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24310 * purpose of that periodic timer is to handle scenarios where the host is 24311 * otherwise temporarily quiescent, temporarily doing no real i/o. 24312 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24313 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24314 * the device itself. 24315 * 24316 * SCSI-3 PGR 24317 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24318 * facility is supported through the shared multihost disk ioctls 24319 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24320 * MHIOCGRP_PREEMPTANDABORT) 24321 * 24322 * Reservation Reclaim: 24323 * -------------------- 24324 * To support the lost reservation reclaim operations this driver creates a 24325 * single thread to handle reinstating reservations on all devices that have 24326 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24327 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24328 * and the reservation reclaim thread loops through the requests to regain the 24329 * lost reservations. 24330 */ 24331 24332 /* 24333 * Function: sd_check_mhd() 24334 * 24335 * Description: This function sets up and submits a scsi watch request or 24336 * terminates an existing watch request. This routine is used in 24337 * support of reservation reclaim. 24338 * 24339 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24340 * among multiple watches that share the callback function 24341 * interval - the number of microseconds specifying the watch 24342 * interval for issuing TEST UNIT READY commands. If 24343 * set to 0 the watch should be terminated. If the 24344 * interval is set to 0 and if the device is required 24345 * to hold reservation while disabling failfast, the 24346 * watch is restarted with an interval of 24347 * reinstate_resv_delay. 24348 * 24349 * Return Code: 0 - Successful submit/terminate of scsi watch request 24350 * ENXIO - Indicates an invalid device was specified 24351 * EAGAIN - Unable to submit the scsi watch request 24352 */ 24353 24354 static int 24355 sd_check_mhd(dev_t dev, int interval) 24356 { 24357 struct sd_lun *un; 24358 opaque_t token; 24359 24360 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24361 return (ENXIO); 24362 } 24363 24364 /* is this a watch termination request? */ 24365 if (interval == 0) { 24366 mutex_enter(SD_MUTEX(un)); 24367 /* if there is an existing watch task then terminate it */ 24368 if (un->un_mhd_token) { 24369 token = un->un_mhd_token; 24370 un->un_mhd_token = NULL; 24371 mutex_exit(SD_MUTEX(un)); 24372 (void) scsi_watch_request_terminate(token, 24373 SCSI_WATCH_TERMINATE_ALL_WAIT); 24374 mutex_enter(SD_MUTEX(un)); 24375 } else { 24376 mutex_exit(SD_MUTEX(un)); 24377 /* 24378 * Note: If we return here we don't check for the 24379 * failfast case. This is the original legacy 24380 * implementation but perhaps we should be checking 24381 * the failfast case. 24382 */ 24383 return (0); 24384 } 24385 /* 24386 * If the device is required to hold reservation while 24387 * disabling failfast, we need to restart the scsi_watch 24388 * routine with an interval of reinstate_resv_delay. 24389 */ 24390 if (un->un_resvd_status & SD_RESERVE) { 24391 interval = sd_reinstate_resv_delay/1000; 24392 } else { 24393 /* no failfast so bail */ 24394 mutex_exit(SD_MUTEX(un)); 24395 return (0); 24396 } 24397 mutex_exit(SD_MUTEX(un)); 24398 } 24399 24400 /* 24401 * adjust minimum time interval to 1 second, 24402 * and convert from msecs to usecs 24403 */ 24404 if (interval > 0 && interval < 1000) { 24405 interval = 1000; 24406 } 24407 interval *= 1000; 24408 24409 /* 24410 * submit the request to the scsi_watch service 24411 */ 24412 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24413 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24414 if (token == NULL) { 24415 return (EAGAIN); 24416 } 24417 24418 /* 24419 * save token for termination later on 24420 */ 24421 mutex_enter(SD_MUTEX(un)); 24422 un->un_mhd_token = token; 24423 mutex_exit(SD_MUTEX(un)); 24424 return (0); 24425 } 24426 24427 24428 /* 24429 * Function: sd_mhd_watch_cb() 24430 * 24431 * Description: This function is the call back function used by the scsi watch 24432 * facility. The scsi watch facility sends the "Test Unit Ready" 24433 * and processes the status. If applicable (i.e. a "Unit Attention" 24434 * status and automatic "Request Sense" not used) the scsi watch 24435 * facility will send a "Request Sense" and retrieve the sense data 24436 * to be passed to this callback function. In either case the 24437 * automatic "Request Sense" or the facility submitting one, this 24438 * callback is passed the status and sense data. 24439 * 24440 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24441 * among multiple watches that share this callback function 24442 * resultp - scsi watch facility result packet containing scsi 24443 * packet, status byte and sense data 24444 * 24445 * Return Code: 0 - continue the watch task 24446 * non-zero - terminate the watch task 24447 */ 24448 24449 static int 24450 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24451 { 24452 struct sd_lun *un; 24453 struct scsi_status *statusp; 24454 uint8_t *sensep; 24455 struct scsi_pkt *pkt; 24456 uchar_t actual_sense_length; 24457 dev_t dev = (dev_t)arg; 24458 24459 ASSERT(resultp != NULL); 24460 statusp = resultp->statusp; 24461 sensep = (uint8_t *)resultp->sensep; 24462 pkt = resultp->pkt; 24463 actual_sense_length = resultp->actual_sense_length; 24464 24465 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24466 return (ENXIO); 24467 } 24468 24469 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24470 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24471 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24472 24473 /* Begin processing of the status and/or sense data */ 24474 if (pkt->pkt_reason != CMD_CMPLT) { 24475 /* Handle the incomplete packet */ 24476 sd_mhd_watch_incomplete(un, pkt); 24477 return (0); 24478 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24479 if (*((unsigned char *)statusp) 24480 == STATUS_RESERVATION_CONFLICT) { 24481 /* 24482 * Handle a reservation conflict by panicking if 24483 * configured for failfast or by logging the conflict 24484 * and updating the reservation status 24485 */ 24486 mutex_enter(SD_MUTEX(un)); 24487 if ((un->un_resvd_status & SD_FAILFAST) && 24488 (sd_failfast_enable)) { 24489 sd_panic_for_res_conflict(un); 24490 /*NOTREACHED*/ 24491 } 24492 SD_INFO(SD_LOG_IOCTL_MHD, un, 24493 "sd_mhd_watch_cb: Reservation Conflict\n"); 24494 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24495 mutex_exit(SD_MUTEX(un)); 24496 } 24497 } 24498 24499 if (sensep != NULL) { 24500 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24501 mutex_enter(SD_MUTEX(un)); 24502 if ((scsi_sense_asc(sensep) == 24503 SD_SCSI_RESET_SENSE_CODE) && 24504 (un->un_resvd_status & SD_RESERVE)) { 24505 /* 24506 * The additional sense code indicates a power 24507 * on or bus device reset has occurred; update 24508 * the reservation status. 24509 */ 24510 un->un_resvd_status |= 24511 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24512 SD_INFO(SD_LOG_IOCTL_MHD, un, 24513 "sd_mhd_watch_cb: Lost Reservation\n"); 24514 } 24515 } else { 24516 return (0); 24517 } 24518 } else { 24519 mutex_enter(SD_MUTEX(un)); 24520 } 24521 24522 if ((un->un_resvd_status & SD_RESERVE) && 24523 (un->un_resvd_status & SD_LOST_RESERVE)) { 24524 if (un->un_resvd_status & SD_WANT_RESERVE) { 24525 /* 24526 * A reset occurred in between the last probe and this 24527 * one so if a timeout is pending cancel it. 24528 */ 24529 if (un->un_resvd_timeid) { 24530 timeout_id_t temp_id = un->un_resvd_timeid; 24531 un->un_resvd_timeid = NULL; 24532 mutex_exit(SD_MUTEX(un)); 24533 (void) untimeout(temp_id); 24534 mutex_enter(SD_MUTEX(un)); 24535 } 24536 un->un_resvd_status &= ~SD_WANT_RESERVE; 24537 } 24538 if (un->un_resvd_timeid == 0) { 24539 /* Schedule a timeout to handle the lost reservation */ 24540 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24541 (void *)dev, 24542 drv_usectohz(sd_reinstate_resv_delay)); 24543 } 24544 } 24545 mutex_exit(SD_MUTEX(un)); 24546 return (0); 24547 } 24548 24549 24550 /* 24551 * Function: sd_mhd_watch_incomplete() 24552 * 24553 * Description: This function is used to find out why a scsi pkt sent by the 24554 * scsi watch facility was not completed. Under some scenarios this 24555 * routine will return. Otherwise it will send a bus reset to see 24556 * if the drive is still online. 24557 * 24558 * Arguments: un - driver soft state (unit) structure 24559 * pkt - incomplete scsi pkt 24560 */ 24561 24562 static void 24563 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24564 { 24565 int be_chatty; 24566 int perr; 24567 24568 ASSERT(pkt != NULL); 24569 ASSERT(un != NULL); 24570 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24571 perr = (pkt->pkt_statistics & STAT_PERR); 24572 24573 mutex_enter(SD_MUTEX(un)); 24574 if (un->un_state == SD_STATE_DUMPING) { 24575 mutex_exit(SD_MUTEX(un)); 24576 return; 24577 } 24578 24579 switch (pkt->pkt_reason) { 24580 case CMD_UNX_BUS_FREE: 24581 /* 24582 * If we had a parity error that caused the target to drop BSY*, 24583 * don't be chatty about it. 24584 */ 24585 if (perr && be_chatty) { 24586 be_chatty = 0; 24587 } 24588 break; 24589 case CMD_TAG_REJECT: 24590 /* 24591 * The SCSI-2 spec states that a tag reject will be sent by the 24592 * target if tagged queuing is not supported. A tag reject may 24593 * also be sent during certain initialization periods or to 24594 * control internal resources. For the latter case the target 24595 * may also return Queue Full. 24596 * 24597 * If this driver receives a tag reject from a target that is 24598 * going through an init period or controlling internal 24599 * resources tagged queuing will be disabled. This is a less 24600 * than optimal behavior but the driver is unable to determine 24601 * the target state and assumes tagged queueing is not supported 24602 */ 24603 pkt->pkt_flags = 0; 24604 un->un_tagflags = 0; 24605 24606 if (un->un_f_opt_queueing == TRUE) { 24607 un->un_throttle = min(un->un_throttle, 3); 24608 } else { 24609 un->un_throttle = 1; 24610 } 24611 mutex_exit(SD_MUTEX(un)); 24612 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24613 mutex_enter(SD_MUTEX(un)); 24614 break; 24615 case CMD_INCOMPLETE: 24616 /* 24617 * The transport stopped with an abnormal state, fallthrough and 24618 * reset the target and/or bus unless selection did not complete 24619 * (indicated by STATE_GOT_BUS) in which case we don't want to 24620 * go through a target/bus reset 24621 */ 24622 if (pkt->pkt_state == STATE_GOT_BUS) { 24623 break; 24624 } 24625 /*FALLTHROUGH*/ 24626 24627 case CMD_TIMEOUT: 24628 default: 24629 /* 24630 * The lun may still be running the command, so a lun reset 24631 * should be attempted. If the lun reset fails or cannot be 24632 * issued, than try a target reset. Lastly try a bus reset. 24633 */ 24634 if ((pkt->pkt_statistics & 24635 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24636 int reset_retval = 0; 24637 mutex_exit(SD_MUTEX(un)); 24638 if (un->un_f_allow_bus_device_reset == TRUE) { 24639 if (un->un_f_lun_reset_enabled == TRUE) { 24640 reset_retval = 24641 scsi_reset(SD_ADDRESS(un), 24642 RESET_LUN); 24643 } 24644 if (reset_retval == 0) { 24645 reset_retval = 24646 scsi_reset(SD_ADDRESS(un), 24647 RESET_TARGET); 24648 } 24649 } 24650 if (reset_retval == 0) { 24651 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24652 } 24653 mutex_enter(SD_MUTEX(un)); 24654 } 24655 break; 24656 } 24657 24658 /* A device/bus reset has occurred; update the reservation status. */ 24659 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24660 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24661 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24662 un->un_resvd_status |= 24663 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24664 SD_INFO(SD_LOG_IOCTL_MHD, un, 24665 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24666 } 24667 } 24668 24669 /* 24670 * The disk has been turned off; Update the device state. 24671 * 24672 * Note: Should we be offlining the disk here? 24673 */ 24674 if (pkt->pkt_state == STATE_GOT_BUS) { 24675 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24676 "Disk not responding to selection\n"); 24677 if (un->un_state != SD_STATE_OFFLINE) { 24678 New_state(un, SD_STATE_OFFLINE); 24679 } 24680 } else if (be_chatty) { 24681 /* 24682 * suppress messages if they are all the same pkt reason; 24683 * with TQ, many (up to 256) are returned with the same 24684 * pkt_reason 24685 */ 24686 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24687 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24688 "sd_mhd_watch_incomplete: " 24689 "SCSI transport failed: reason '%s'\n", 24690 scsi_rname(pkt->pkt_reason)); 24691 } 24692 } 24693 un->un_last_pkt_reason = pkt->pkt_reason; 24694 mutex_exit(SD_MUTEX(un)); 24695 } 24696 24697 24698 /* 24699 * Function: sd_sname() 24700 * 24701 * Description: This is a simple little routine to return a string containing 24702 * a printable description of command status byte for use in 24703 * logging. 24704 * 24705 * Arguments: status - pointer to a status byte 24706 * 24707 * Return Code: char * - string containing status description. 24708 */ 24709 24710 static char * 24711 sd_sname(uchar_t status) 24712 { 24713 switch (status & STATUS_MASK) { 24714 case STATUS_GOOD: 24715 return ("good status"); 24716 case STATUS_CHECK: 24717 return ("check condition"); 24718 case STATUS_MET: 24719 return ("condition met"); 24720 case STATUS_BUSY: 24721 return ("busy"); 24722 case STATUS_INTERMEDIATE: 24723 return ("intermediate"); 24724 case STATUS_INTERMEDIATE_MET: 24725 return ("intermediate - condition met"); 24726 case STATUS_RESERVATION_CONFLICT: 24727 return ("reservation_conflict"); 24728 case STATUS_TERMINATED: 24729 return ("command terminated"); 24730 case STATUS_QFULL: 24731 return ("queue full"); 24732 default: 24733 return ("<unknown status>"); 24734 } 24735 } 24736 24737 24738 /* 24739 * Function: sd_mhd_resvd_recover() 24740 * 24741 * Description: This function adds a reservation entry to the 24742 * sd_resv_reclaim_request list and signals the reservation 24743 * reclaim thread that there is work pending. If the reservation 24744 * reclaim thread has not been previously created this function 24745 * will kick it off. 24746 * 24747 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24748 * among multiple watches that share this callback function 24749 * 24750 * Context: This routine is called by timeout() and is run in interrupt 24751 * context. It must not sleep or call other functions which may 24752 * sleep. 24753 */ 24754 24755 static void 24756 sd_mhd_resvd_recover(void *arg) 24757 { 24758 dev_t dev = (dev_t)arg; 24759 struct sd_lun *un; 24760 struct sd_thr_request *sd_treq = NULL; 24761 struct sd_thr_request *sd_cur = NULL; 24762 struct sd_thr_request *sd_prev = NULL; 24763 int already_there = 0; 24764 24765 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24766 return; 24767 } 24768 24769 mutex_enter(SD_MUTEX(un)); 24770 un->un_resvd_timeid = NULL; 24771 if (un->un_resvd_status & SD_WANT_RESERVE) { 24772 /* 24773 * There was a reset so don't issue the reserve, allow the 24774 * sd_mhd_watch_cb callback function to notice this and 24775 * reschedule the timeout for reservation. 24776 */ 24777 mutex_exit(SD_MUTEX(un)); 24778 return; 24779 } 24780 mutex_exit(SD_MUTEX(un)); 24781 24782 /* 24783 * Add this device to the sd_resv_reclaim_request list and the 24784 * sd_resv_reclaim_thread should take care of the rest. 24785 * 24786 * Note: We can't sleep in this context so if the memory allocation 24787 * fails allow the sd_mhd_watch_cb callback function to notice this and 24788 * reschedule the timeout for reservation. (4378460) 24789 */ 24790 sd_treq = (struct sd_thr_request *) 24791 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24792 if (sd_treq == NULL) { 24793 return; 24794 } 24795 24796 sd_treq->sd_thr_req_next = NULL; 24797 sd_treq->dev = dev; 24798 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24799 if (sd_tr.srq_thr_req_head == NULL) { 24800 sd_tr.srq_thr_req_head = sd_treq; 24801 } else { 24802 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24803 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24804 if (sd_cur->dev == dev) { 24805 /* 24806 * already in Queue so don't log 24807 * another request for the device 24808 */ 24809 already_there = 1; 24810 break; 24811 } 24812 sd_prev = sd_cur; 24813 } 24814 if (!already_there) { 24815 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24816 "logging request for %lx\n", dev); 24817 sd_prev->sd_thr_req_next = sd_treq; 24818 } else { 24819 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24820 } 24821 } 24822 24823 /* 24824 * Create a kernel thread to do the reservation reclaim and free up this 24825 * thread. We cannot block this thread while we go away to do the 24826 * reservation reclaim 24827 */ 24828 if (sd_tr.srq_resv_reclaim_thread == NULL) 24829 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24830 sd_resv_reclaim_thread, NULL, 24831 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24832 24833 /* Tell the reservation reclaim thread that it has work to do */ 24834 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24835 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24836 } 24837 24838 /* 24839 * Function: sd_resv_reclaim_thread() 24840 * 24841 * Description: This function implements the reservation reclaim operations 24842 * 24843 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24844 * among multiple watches that share this callback function 24845 */ 24846 24847 static void 24848 sd_resv_reclaim_thread() 24849 { 24850 struct sd_lun *un; 24851 struct sd_thr_request *sd_mhreq; 24852 24853 /* Wait for work */ 24854 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24855 if (sd_tr.srq_thr_req_head == NULL) { 24856 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24857 &sd_tr.srq_resv_reclaim_mutex); 24858 } 24859 24860 /* Loop while we have work */ 24861 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24862 un = ddi_get_soft_state(sd_state, 24863 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24864 if (un == NULL) { 24865 /* 24866 * softstate structure is NULL so just 24867 * dequeue the request and continue 24868 */ 24869 sd_tr.srq_thr_req_head = 24870 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24871 kmem_free(sd_tr.srq_thr_cur_req, 24872 sizeof (struct sd_thr_request)); 24873 continue; 24874 } 24875 24876 /* dequeue the request */ 24877 sd_mhreq = sd_tr.srq_thr_cur_req; 24878 sd_tr.srq_thr_req_head = 24879 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24880 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24881 24882 /* 24883 * Reclaim reservation only if SD_RESERVE is still set. There 24884 * may have been a call to MHIOCRELEASE before we got here. 24885 */ 24886 mutex_enter(SD_MUTEX(un)); 24887 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24888 /* 24889 * Note: The SD_LOST_RESERVE flag is cleared before 24890 * reclaiming the reservation. If this is done after the 24891 * call to sd_reserve_release a reservation loss in the 24892 * window between pkt completion of reserve cmd and 24893 * mutex_enter below may not be recognized 24894 */ 24895 un->un_resvd_status &= ~SD_LOST_RESERVE; 24896 mutex_exit(SD_MUTEX(un)); 24897 24898 if (sd_reserve_release(sd_mhreq->dev, 24899 SD_RESERVE) == 0) { 24900 mutex_enter(SD_MUTEX(un)); 24901 un->un_resvd_status |= SD_RESERVE; 24902 mutex_exit(SD_MUTEX(un)); 24903 SD_INFO(SD_LOG_IOCTL_MHD, un, 24904 "sd_resv_reclaim_thread: " 24905 "Reservation Recovered\n"); 24906 } else { 24907 mutex_enter(SD_MUTEX(un)); 24908 un->un_resvd_status |= SD_LOST_RESERVE; 24909 mutex_exit(SD_MUTEX(un)); 24910 SD_INFO(SD_LOG_IOCTL_MHD, un, 24911 "sd_resv_reclaim_thread: Failed " 24912 "Reservation Recovery\n"); 24913 } 24914 } else { 24915 mutex_exit(SD_MUTEX(un)); 24916 } 24917 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24918 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24919 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24920 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24921 /* 24922 * wakeup the destroy thread if anyone is waiting on 24923 * us to complete. 24924 */ 24925 cv_signal(&sd_tr.srq_inprocess_cv); 24926 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24927 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24928 } 24929 24930 /* 24931 * cleanup the sd_tr structure now that this thread will not exist 24932 */ 24933 ASSERT(sd_tr.srq_thr_req_head == NULL); 24934 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24935 sd_tr.srq_resv_reclaim_thread = NULL; 24936 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24937 thread_exit(); 24938 } 24939 24940 24941 /* 24942 * Function: sd_rmv_resv_reclaim_req() 24943 * 24944 * Description: This function removes any pending reservation reclaim requests 24945 * for the specified device. 24946 * 24947 * Arguments: dev - the device 'dev_t' 24948 */ 24949 24950 static void 24951 sd_rmv_resv_reclaim_req(dev_t dev) 24952 { 24953 struct sd_thr_request *sd_mhreq; 24954 struct sd_thr_request *sd_prev; 24955 24956 /* Remove a reservation reclaim request from the list */ 24957 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24958 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24959 /* 24960 * We are attempting to reinstate reservation for 24961 * this device. We wait for sd_reserve_release() 24962 * to return before we return. 24963 */ 24964 cv_wait(&sd_tr.srq_inprocess_cv, 24965 &sd_tr.srq_resv_reclaim_mutex); 24966 } else { 24967 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24968 if (sd_mhreq && sd_mhreq->dev == dev) { 24969 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24970 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24971 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24972 return; 24973 } 24974 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24975 if (sd_mhreq && sd_mhreq->dev == dev) { 24976 break; 24977 } 24978 sd_prev = sd_mhreq; 24979 } 24980 if (sd_mhreq != NULL) { 24981 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24982 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24983 } 24984 } 24985 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24986 } 24987 24988 24989 /* 24990 * Function: sd_mhd_reset_notify_cb() 24991 * 24992 * Description: This is a call back function for scsi_reset_notify. This 24993 * function updates the softstate reserved status and logs the 24994 * reset. The driver scsi watch facility callback function 24995 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24996 * will reclaim the reservation. 24997 * 24998 * Arguments: arg - driver soft state (unit) structure 24999 */ 25000 25001 static void 25002 sd_mhd_reset_notify_cb(caddr_t arg) 25003 { 25004 struct sd_lun *un = (struct sd_lun *)arg; 25005 25006 mutex_enter(SD_MUTEX(un)); 25007 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25008 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25009 SD_INFO(SD_LOG_IOCTL_MHD, un, 25010 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25011 } 25012 mutex_exit(SD_MUTEX(un)); 25013 } 25014 25015 25016 /* 25017 * Function: sd_take_ownership() 25018 * 25019 * Description: This routine implements an algorithm to achieve a stable 25020 * reservation on disks which don't implement priority reserve, 25021 * and makes sure that other host lose re-reservation attempts. 25022 * This algorithm contains of a loop that keeps issuing the RESERVE 25023 * for some period of time (min_ownership_delay, default 6 seconds) 25024 * During that loop, it looks to see if there has been a bus device 25025 * reset or bus reset (both of which cause an existing reservation 25026 * to be lost). If the reservation is lost issue RESERVE until a 25027 * period of min_ownership_delay with no resets has gone by, or 25028 * until max_ownership_delay has expired. This loop ensures that 25029 * the host really did manage to reserve the device, in spite of 25030 * resets. The looping for min_ownership_delay (default six 25031 * seconds) is important to early generation clustering products, 25032 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25033 * MHIOCENFAILFAST periodic timer of two seconds. By having 25034 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25035 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25036 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25037 * have already noticed, via the MHIOCENFAILFAST polling, that it 25038 * no longer "owns" the disk and will have panicked itself. Thus, 25039 * the host issuing the MHIOCTKOWN is assured (with timing 25040 * dependencies) that by the time it actually starts to use the 25041 * disk for real work, the old owner is no longer accessing it. 25042 * 25043 * min_ownership_delay is the minimum amount of time for which the 25044 * disk must be reserved continuously devoid of resets before the 25045 * MHIOCTKOWN ioctl will return success. 25046 * 25047 * max_ownership_delay indicates the amount of time by which the 25048 * take ownership should succeed or timeout with an error. 25049 * 25050 * Arguments: dev - the device 'dev_t' 25051 * *p - struct containing timing info. 25052 * 25053 * Return Code: 0 for success or error code 25054 */ 25055 25056 static int 25057 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25058 { 25059 struct sd_lun *un; 25060 int rval; 25061 int err; 25062 int reservation_count = 0; 25063 int min_ownership_delay = 6000000; /* in usec */ 25064 int max_ownership_delay = 30000000; /* in usec */ 25065 clock_t start_time; /* starting time of this algorithm */ 25066 clock_t end_time; /* time limit for giving up */ 25067 clock_t ownership_time; /* time limit for stable ownership */ 25068 clock_t current_time; 25069 clock_t previous_current_time; 25070 25071 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25072 return (ENXIO); 25073 } 25074 25075 /* 25076 * Attempt a device reservation. A priority reservation is requested. 25077 */ 25078 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25079 != SD_SUCCESS) { 25080 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25081 "sd_take_ownership: return(1)=%d\n", rval); 25082 return (rval); 25083 } 25084 25085 /* Update the softstate reserved status to indicate the reservation */ 25086 mutex_enter(SD_MUTEX(un)); 25087 un->un_resvd_status |= SD_RESERVE; 25088 un->un_resvd_status &= 25089 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25090 mutex_exit(SD_MUTEX(un)); 25091 25092 if (p != NULL) { 25093 if (p->min_ownership_delay != 0) { 25094 min_ownership_delay = p->min_ownership_delay * 1000; 25095 } 25096 if (p->max_ownership_delay != 0) { 25097 max_ownership_delay = p->max_ownership_delay * 1000; 25098 } 25099 } 25100 SD_INFO(SD_LOG_IOCTL_MHD, un, 25101 "sd_take_ownership: min, max delays: %d, %d\n", 25102 min_ownership_delay, max_ownership_delay); 25103 25104 start_time = ddi_get_lbolt(); 25105 current_time = start_time; 25106 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25107 end_time = start_time + drv_usectohz(max_ownership_delay); 25108 25109 while (current_time - end_time < 0) { 25110 delay(drv_usectohz(500000)); 25111 25112 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25113 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25114 mutex_enter(SD_MUTEX(un)); 25115 rval = (un->un_resvd_status & 25116 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25117 mutex_exit(SD_MUTEX(un)); 25118 break; 25119 } 25120 } 25121 previous_current_time = current_time; 25122 current_time = ddi_get_lbolt(); 25123 mutex_enter(SD_MUTEX(un)); 25124 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25125 ownership_time = ddi_get_lbolt() + 25126 drv_usectohz(min_ownership_delay); 25127 reservation_count = 0; 25128 } else { 25129 reservation_count++; 25130 } 25131 un->un_resvd_status |= SD_RESERVE; 25132 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25133 mutex_exit(SD_MUTEX(un)); 25134 25135 SD_INFO(SD_LOG_IOCTL_MHD, un, 25136 "sd_take_ownership: ticks for loop iteration=%ld, " 25137 "reservation=%s\n", (current_time - previous_current_time), 25138 reservation_count ? "ok" : "reclaimed"); 25139 25140 if (current_time - ownership_time >= 0 && 25141 reservation_count >= 4) { 25142 rval = 0; /* Achieved a stable ownership */ 25143 break; 25144 } 25145 if (current_time - end_time >= 0) { 25146 rval = EACCES; /* No ownership in max possible time */ 25147 break; 25148 } 25149 } 25150 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25151 "sd_take_ownership: return(2)=%d\n", rval); 25152 return (rval); 25153 } 25154 25155 25156 /* 25157 * Function: sd_reserve_release() 25158 * 25159 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25160 * PRIORITY RESERVE commands based on a user specified command type 25161 * 25162 * Arguments: dev - the device 'dev_t' 25163 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25164 * SD_RESERVE, SD_RELEASE 25165 * 25166 * Return Code: 0 or Error Code 25167 */ 25168 25169 static int 25170 sd_reserve_release(dev_t dev, int cmd) 25171 { 25172 struct uscsi_cmd *com = NULL; 25173 struct sd_lun *un = NULL; 25174 char cdb[CDB_GROUP0]; 25175 int rval; 25176 25177 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25178 (cmd == SD_PRIORITY_RESERVE)); 25179 25180 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25181 return (ENXIO); 25182 } 25183 25184 /* instantiate and initialize the command and cdb */ 25185 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25186 bzero(cdb, CDB_GROUP0); 25187 com->uscsi_flags = USCSI_SILENT; 25188 com->uscsi_timeout = un->un_reserve_release_time; 25189 com->uscsi_cdblen = CDB_GROUP0; 25190 com->uscsi_cdb = cdb; 25191 if (cmd == SD_RELEASE) { 25192 cdb[0] = SCMD_RELEASE; 25193 } else { 25194 cdb[0] = SCMD_RESERVE; 25195 } 25196 25197 /* Send the command. */ 25198 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25199 SD_PATH_STANDARD); 25200 25201 /* 25202 * "break" a reservation that is held by another host, by issuing a 25203 * reset if priority reserve is desired, and we could not get the 25204 * device. 25205 */ 25206 if ((cmd == SD_PRIORITY_RESERVE) && 25207 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25208 /* 25209 * First try to reset the LUN. If we cannot, then try a target 25210 * reset, followed by a bus reset if the target reset fails. 25211 */ 25212 int reset_retval = 0; 25213 if (un->un_f_lun_reset_enabled == TRUE) { 25214 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25215 } 25216 if (reset_retval == 0) { 25217 /* The LUN reset either failed or was not issued */ 25218 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25219 } 25220 if ((reset_retval == 0) && 25221 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25222 rval = EIO; 25223 kmem_free(com, sizeof (*com)); 25224 return (rval); 25225 } 25226 25227 bzero(com, sizeof (struct uscsi_cmd)); 25228 com->uscsi_flags = USCSI_SILENT; 25229 com->uscsi_cdb = cdb; 25230 com->uscsi_cdblen = CDB_GROUP0; 25231 com->uscsi_timeout = 5; 25232 25233 /* 25234 * Reissue the last reserve command, this time without request 25235 * sense. Assume that it is just a regular reserve command. 25236 */ 25237 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25238 SD_PATH_STANDARD); 25239 } 25240 25241 /* Return an error if still getting a reservation conflict. */ 25242 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25243 rval = EACCES; 25244 } 25245 25246 kmem_free(com, sizeof (*com)); 25247 return (rval); 25248 } 25249 25250 25251 #define SD_NDUMP_RETRIES 12 25252 /* 25253 * System Crash Dump routine 25254 */ 25255 25256 static int 25257 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25258 { 25259 int instance; 25260 int partition; 25261 int i; 25262 int err; 25263 struct sd_lun *un; 25264 struct scsi_pkt *wr_pktp; 25265 struct buf *wr_bp; 25266 struct buf wr_buf; 25267 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25268 daddr_t tgt_blkno; /* rmw - blkno for target */ 25269 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25270 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25271 size_t io_start_offset; 25272 int doing_rmw = FALSE; 25273 int rval; 25274 ssize_t dma_resid; 25275 daddr_t oblkno; 25276 diskaddr_t nblks = 0; 25277 diskaddr_t start_block; 25278 25279 instance = SDUNIT(dev); 25280 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25281 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25282 return (ENXIO); 25283 } 25284 25285 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25286 25287 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25288 25289 partition = SDPART(dev); 25290 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25291 25292 if (!(NOT_DEVBSIZE(un))) { 25293 int secmask = 0; 25294 int blknomask = 0; 25295 25296 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25297 secmask = un->un_tgt_blocksize - 1; 25298 25299 if (blkno & blknomask) { 25300 SD_TRACE(SD_LOG_DUMP, un, 25301 "sddump: dump start block not modulo %d\n", 25302 un->un_tgt_blocksize); 25303 return (EINVAL); 25304 } 25305 25306 if ((nblk * DEV_BSIZE) & secmask) { 25307 SD_TRACE(SD_LOG_DUMP, un, 25308 "sddump: dump length not modulo %d\n", 25309 un->un_tgt_blocksize); 25310 return (EINVAL); 25311 } 25312 25313 } 25314 25315 /* Validate blocks to dump at against partition size. */ 25316 25317 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25318 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25319 25320 if (NOT_DEVBSIZE(un)) { 25321 if ((blkno + nblk) > nblks) { 25322 SD_TRACE(SD_LOG_DUMP, un, 25323 "sddump: dump range larger than partition: " 25324 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25325 blkno, nblk, nblks); 25326 return (EINVAL); 25327 } 25328 } else { 25329 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25330 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25331 SD_TRACE(SD_LOG_DUMP, un, 25332 "sddump: dump range larger than partition: " 25333 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25334 blkno, nblk, nblks); 25335 return (EINVAL); 25336 } 25337 } 25338 25339 mutex_enter(&un->un_pm_mutex); 25340 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25341 struct scsi_pkt *start_pktp; 25342 25343 mutex_exit(&un->un_pm_mutex); 25344 25345 /* 25346 * use pm framework to power on HBA 1st 25347 */ 25348 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25349 25350 /* 25351 * Dump no long uses sdpower to power on a device, it's 25352 * in-line here so it can be done in polled mode. 25353 */ 25354 25355 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25356 25357 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25358 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25359 25360 if (start_pktp == NULL) { 25361 /* We were not given a SCSI packet, fail. */ 25362 return (EIO); 25363 } 25364 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25365 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25366 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25367 start_pktp->pkt_flags = FLAG_NOINTR; 25368 25369 mutex_enter(SD_MUTEX(un)); 25370 SD_FILL_SCSI1_LUN(un, start_pktp); 25371 mutex_exit(SD_MUTEX(un)); 25372 /* 25373 * Scsi_poll returns 0 (success) if the command completes and 25374 * the status block is STATUS_GOOD. 25375 */ 25376 if (sd_scsi_poll(un, start_pktp) != 0) { 25377 scsi_destroy_pkt(start_pktp); 25378 return (EIO); 25379 } 25380 scsi_destroy_pkt(start_pktp); 25381 (void) sd_ddi_pm_resume(un); 25382 } else { 25383 mutex_exit(&un->un_pm_mutex); 25384 } 25385 25386 mutex_enter(SD_MUTEX(un)); 25387 un->un_throttle = 0; 25388 25389 /* 25390 * The first time through, reset the specific target device. 25391 * However, when cpr calls sddump we know that sd is in a 25392 * a good state so no bus reset is required. 25393 * Clear sense data via Request Sense cmd. 25394 * In sddump we don't care about allow_bus_device_reset anymore 25395 */ 25396 25397 if ((un->un_state != SD_STATE_SUSPENDED) && 25398 (un->un_state != SD_STATE_DUMPING)) { 25399 25400 New_state(un, SD_STATE_DUMPING); 25401 25402 if (un->un_f_is_fibre == FALSE) { 25403 mutex_exit(SD_MUTEX(un)); 25404 /* 25405 * Attempt a bus reset for parallel scsi. 25406 * 25407 * Note: A bus reset is required because on some host 25408 * systems (i.e. E420R) a bus device reset is 25409 * insufficient to reset the state of the target. 25410 * 25411 * Note: Don't issue the reset for fibre-channel, 25412 * because this tends to hang the bus (loop) for 25413 * too long while everyone is logging out and in 25414 * and the deadman timer for dumping will fire 25415 * before the dump is complete. 25416 */ 25417 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25418 mutex_enter(SD_MUTEX(un)); 25419 Restore_state(un); 25420 mutex_exit(SD_MUTEX(un)); 25421 return (EIO); 25422 } 25423 25424 /* Delay to give the device some recovery time. */ 25425 drv_usecwait(10000); 25426 25427 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25428 SD_INFO(SD_LOG_DUMP, un, 25429 "sddump: sd_send_polled_RQS failed\n"); 25430 } 25431 mutex_enter(SD_MUTEX(un)); 25432 } 25433 } 25434 25435 /* 25436 * Convert the partition-relative block number to a 25437 * disk physical block number. 25438 */ 25439 if (NOT_DEVBSIZE(un)) { 25440 blkno += start_block; 25441 } else { 25442 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25443 blkno += start_block; 25444 } 25445 25446 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25447 25448 25449 /* 25450 * Check if the device has a non-512 block size. 25451 */ 25452 wr_bp = NULL; 25453 if (NOT_DEVBSIZE(un)) { 25454 tgt_byte_offset = blkno * un->un_sys_blocksize; 25455 tgt_byte_count = nblk * un->un_sys_blocksize; 25456 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25457 (tgt_byte_count % un->un_tgt_blocksize)) { 25458 doing_rmw = TRUE; 25459 /* 25460 * Calculate the block number and number of block 25461 * in terms of the media block size. 25462 */ 25463 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25464 tgt_nblk = 25465 ((tgt_byte_offset + tgt_byte_count + 25466 (un->un_tgt_blocksize - 1)) / 25467 un->un_tgt_blocksize) - tgt_blkno; 25468 25469 /* 25470 * Invoke the routine which is going to do read part 25471 * of read-modify-write. 25472 * Note that this routine returns a pointer to 25473 * a valid bp in wr_bp. 25474 */ 25475 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25476 &wr_bp); 25477 if (err) { 25478 mutex_exit(SD_MUTEX(un)); 25479 return (err); 25480 } 25481 /* 25482 * Offset is being calculated as - 25483 * (original block # * system block size) - 25484 * (new block # * target block size) 25485 */ 25486 io_start_offset = 25487 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25488 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25489 25490 ASSERT((io_start_offset >= 0) && 25491 (io_start_offset < un->un_tgt_blocksize)); 25492 /* 25493 * Do the modify portion of read modify write. 25494 */ 25495 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25496 (size_t)nblk * un->un_sys_blocksize); 25497 } else { 25498 doing_rmw = FALSE; 25499 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25500 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25501 } 25502 25503 /* Convert blkno and nblk to target blocks */ 25504 blkno = tgt_blkno; 25505 nblk = tgt_nblk; 25506 } else { 25507 wr_bp = &wr_buf; 25508 bzero(wr_bp, sizeof (struct buf)); 25509 wr_bp->b_flags = B_BUSY; 25510 wr_bp->b_un.b_addr = addr; 25511 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25512 wr_bp->b_resid = 0; 25513 } 25514 25515 mutex_exit(SD_MUTEX(un)); 25516 25517 /* 25518 * Obtain a SCSI packet for the write command. 25519 * It should be safe to call the allocator here without 25520 * worrying about being locked for DVMA mapping because 25521 * the address we're passed is already a DVMA mapping 25522 * 25523 * We are also not going to worry about semaphore ownership 25524 * in the dump buffer. Dumping is single threaded at present. 25525 */ 25526 25527 wr_pktp = NULL; 25528 25529 dma_resid = wr_bp->b_bcount; 25530 oblkno = blkno; 25531 25532 if (!(NOT_DEVBSIZE(un))) { 25533 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 25534 } 25535 25536 while (dma_resid != 0) { 25537 25538 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25539 wr_bp->b_flags &= ~B_ERROR; 25540 25541 if (un->un_partial_dma_supported == 1) { 25542 blkno = oblkno + 25543 ((wr_bp->b_bcount - dma_resid) / 25544 un->un_tgt_blocksize); 25545 nblk = dma_resid / un->un_tgt_blocksize; 25546 25547 if (wr_pktp) { 25548 /* 25549 * Partial DMA transfers after initial transfer 25550 */ 25551 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25552 blkno, nblk); 25553 } else { 25554 /* Initial transfer */ 25555 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25556 un->un_pkt_flags, NULL_FUNC, NULL, 25557 blkno, nblk); 25558 } 25559 } else { 25560 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25561 0, NULL_FUNC, NULL, blkno, nblk); 25562 } 25563 25564 if (rval == 0) { 25565 /* We were given a SCSI packet, continue. */ 25566 break; 25567 } 25568 25569 if (i == 0) { 25570 if (wr_bp->b_flags & B_ERROR) { 25571 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25572 "no resources for dumping; " 25573 "error code: 0x%x, retrying", 25574 geterror(wr_bp)); 25575 } else { 25576 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25577 "no resources for dumping; retrying"); 25578 } 25579 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25580 if (wr_bp->b_flags & B_ERROR) { 25581 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25582 "no resources for dumping; error code: " 25583 "0x%x, retrying\n", geterror(wr_bp)); 25584 } 25585 } else { 25586 if (wr_bp->b_flags & B_ERROR) { 25587 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25588 "no resources for dumping; " 25589 "error code: 0x%x, retries failed, " 25590 "giving up.\n", geterror(wr_bp)); 25591 } else { 25592 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25593 "no resources for dumping; " 25594 "retries failed, giving up.\n"); 25595 } 25596 mutex_enter(SD_MUTEX(un)); 25597 Restore_state(un); 25598 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25599 mutex_exit(SD_MUTEX(un)); 25600 scsi_free_consistent_buf(wr_bp); 25601 } else { 25602 mutex_exit(SD_MUTEX(un)); 25603 } 25604 return (EIO); 25605 } 25606 drv_usecwait(10000); 25607 } 25608 25609 if (un->un_partial_dma_supported == 1) { 25610 /* 25611 * save the resid from PARTIAL_DMA 25612 */ 25613 dma_resid = wr_pktp->pkt_resid; 25614 if (dma_resid != 0) 25615 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25616 wr_pktp->pkt_resid = 0; 25617 } else { 25618 dma_resid = 0; 25619 } 25620 25621 /* SunBug 1222170 */ 25622 wr_pktp->pkt_flags = FLAG_NOINTR; 25623 25624 err = EIO; 25625 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25626 25627 /* 25628 * Scsi_poll returns 0 (success) if the command completes and 25629 * the status block is STATUS_GOOD. We should only check 25630 * errors if this condition is not true. Even then we should 25631 * send our own request sense packet only if we have a check 25632 * condition and auto request sense has not been performed by 25633 * the hba. 25634 */ 25635 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25636 25637 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25638 (wr_pktp->pkt_resid == 0)) { 25639 err = SD_SUCCESS; 25640 break; 25641 } 25642 25643 /* 25644 * Check CMD_DEV_GONE 1st, give up if device is gone. 25645 */ 25646 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25647 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25648 "Error while dumping state...Device is gone\n"); 25649 break; 25650 } 25651 25652 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25653 SD_INFO(SD_LOG_DUMP, un, 25654 "sddump: write failed with CHECK, try # %d\n", i); 25655 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25656 (void) sd_send_polled_RQS(un); 25657 } 25658 25659 continue; 25660 } 25661 25662 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25663 int reset_retval = 0; 25664 25665 SD_INFO(SD_LOG_DUMP, un, 25666 "sddump: write failed with BUSY, try # %d\n", i); 25667 25668 if (un->un_f_lun_reset_enabled == TRUE) { 25669 reset_retval = scsi_reset(SD_ADDRESS(un), 25670 RESET_LUN); 25671 } 25672 if (reset_retval == 0) { 25673 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25674 } 25675 (void) sd_send_polled_RQS(un); 25676 25677 } else { 25678 SD_INFO(SD_LOG_DUMP, un, 25679 "sddump: write failed with 0x%x, try # %d\n", 25680 SD_GET_PKT_STATUS(wr_pktp), i); 25681 mutex_enter(SD_MUTEX(un)); 25682 sd_reset_target(un, wr_pktp); 25683 mutex_exit(SD_MUTEX(un)); 25684 } 25685 25686 /* 25687 * If we are not getting anywhere with lun/target resets, 25688 * let's reset the bus. 25689 */ 25690 if (i == SD_NDUMP_RETRIES/2) { 25691 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25692 (void) sd_send_polled_RQS(un); 25693 } 25694 } 25695 } 25696 25697 scsi_destroy_pkt(wr_pktp); 25698 mutex_enter(SD_MUTEX(un)); 25699 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25700 mutex_exit(SD_MUTEX(un)); 25701 scsi_free_consistent_buf(wr_bp); 25702 } else { 25703 mutex_exit(SD_MUTEX(un)); 25704 } 25705 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25706 return (err); 25707 } 25708 25709 /* 25710 * Function: sd_scsi_poll() 25711 * 25712 * Description: This is a wrapper for the scsi_poll call. 25713 * 25714 * Arguments: sd_lun - The unit structure 25715 * scsi_pkt - The scsi packet being sent to the device. 25716 * 25717 * Return Code: 0 - Command completed successfully with good status 25718 * -1 - Command failed. This could indicate a check condition 25719 * or other status value requiring recovery action. 25720 * 25721 * NOTE: This code is only called off sddump(). 25722 */ 25723 25724 static int 25725 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25726 { 25727 int status; 25728 25729 ASSERT(un != NULL); 25730 ASSERT(!mutex_owned(SD_MUTEX(un))); 25731 ASSERT(pktp != NULL); 25732 25733 status = SD_SUCCESS; 25734 25735 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25736 pktp->pkt_flags |= un->un_tagflags; 25737 pktp->pkt_flags &= ~FLAG_NODISCON; 25738 } 25739 25740 status = sd_ddi_scsi_poll(pktp); 25741 /* 25742 * Scsi_poll returns 0 (success) if the command completes and the 25743 * status block is STATUS_GOOD. We should only check errors if this 25744 * condition is not true. Even then we should send our own request 25745 * sense packet only if we have a check condition and auto 25746 * request sense has not been performed by the hba. 25747 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25748 */ 25749 if ((status != SD_SUCCESS) && 25750 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25751 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25752 (pktp->pkt_reason != CMD_DEV_GONE)) 25753 (void) sd_send_polled_RQS(un); 25754 25755 return (status); 25756 } 25757 25758 /* 25759 * Function: sd_send_polled_RQS() 25760 * 25761 * Description: This sends the request sense command to a device. 25762 * 25763 * Arguments: sd_lun - The unit structure 25764 * 25765 * Return Code: 0 - Command completed successfully with good status 25766 * -1 - Command failed. 25767 * 25768 */ 25769 25770 static int 25771 sd_send_polled_RQS(struct sd_lun *un) 25772 { 25773 int ret_val; 25774 struct scsi_pkt *rqs_pktp; 25775 struct buf *rqs_bp; 25776 25777 ASSERT(un != NULL); 25778 ASSERT(!mutex_owned(SD_MUTEX(un))); 25779 25780 ret_val = SD_SUCCESS; 25781 25782 rqs_pktp = un->un_rqs_pktp; 25783 rqs_bp = un->un_rqs_bp; 25784 25785 mutex_enter(SD_MUTEX(un)); 25786 25787 if (un->un_sense_isbusy) { 25788 ret_val = SD_FAILURE; 25789 mutex_exit(SD_MUTEX(un)); 25790 return (ret_val); 25791 } 25792 25793 /* 25794 * If the request sense buffer (and packet) is not in use, 25795 * let's set the un_sense_isbusy and send our packet 25796 */ 25797 un->un_sense_isbusy = 1; 25798 rqs_pktp->pkt_resid = 0; 25799 rqs_pktp->pkt_reason = 0; 25800 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25801 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25802 25803 mutex_exit(SD_MUTEX(un)); 25804 25805 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25806 " 0x%p\n", rqs_bp->b_un.b_addr); 25807 25808 /* 25809 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25810 * axle - it has a call into us! 25811 */ 25812 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25813 SD_INFO(SD_LOG_COMMON, un, 25814 "sd_send_polled_RQS: RQS failed\n"); 25815 } 25816 25817 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25818 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25819 25820 mutex_enter(SD_MUTEX(un)); 25821 un->un_sense_isbusy = 0; 25822 mutex_exit(SD_MUTEX(un)); 25823 25824 return (ret_val); 25825 } 25826 25827 /* 25828 * Defines needed for localized version of the scsi_poll routine. 25829 */ 25830 #define CSEC 10000 /* usecs */ 25831 #define SEC_TO_CSEC (1000000/CSEC) 25832 25833 /* 25834 * Function: sd_ddi_scsi_poll() 25835 * 25836 * Description: Localized version of the scsi_poll routine. The purpose is to 25837 * send a scsi_pkt to a device as a polled command. This version 25838 * is to ensure more robust handling of transport errors. 25839 * Specifically this routine cures not ready, coming ready 25840 * transition for power up and reset of sonoma's. This can take 25841 * up to 45 seconds for power-on and 20 seconds for reset of a 25842 * sonoma lun. 25843 * 25844 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25845 * 25846 * Return Code: 0 - Command completed successfully with good status 25847 * -1 - Command failed. 25848 * 25849 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25850 * be fixed (removing this code), we need to determine how to handle the 25851 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25852 * 25853 * NOTE: This code is only called off sddump(). 25854 */ 25855 static int 25856 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25857 { 25858 int rval = -1; 25859 int savef; 25860 long savet; 25861 void (*savec)(); 25862 int timeout; 25863 int busy_count; 25864 int poll_delay; 25865 int rc; 25866 uint8_t *sensep; 25867 struct scsi_arq_status *arqstat; 25868 extern int do_polled_io; 25869 25870 ASSERT(pkt->pkt_scbp); 25871 25872 /* 25873 * save old flags.. 25874 */ 25875 savef = pkt->pkt_flags; 25876 savec = pkt->pkt_comp; 25877 savet = pkt->pkt_time; 25878 25879 pkt->pkt_flags |= FLAG_NOINTR; 25880 25881 /* 25882 * XXX there is nothing in the SCSA spec that states that we should not 25883 * do a callback for polled cmds; however, removing this will break sd 25884 * and probably other target drivers 25885 */ 25886 pkt->pkt_comp = NULL; 25887 25888 /* 25889 * we don't like a polled command without timeout. 25890 * 60 seconds seems long enough. 25891 */ 25892 if (pkt->pkt_time == 0) 25893 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25894 25895 /* 25896 * Send polled cmd. 25897 * 25898 * We do some error recovery for various errors. Tran_busy, 25899 * queue full, and non-dispatched commands are retried every 10 msec. 25900 * as they are typically transient failures. Busy status and Not 25901 * Ready are retried every second as this status takes a while to 25902 * change. 25903 */ 25904 timeout = pkt->pkt_time * SEC_TO_CSEC; 25905 25906 for (busy_count = 0; busy_count < timeout; busy_count++) { 25907 /* 25908 * Initialize pkt status variables. 25909 */ 25910 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25911 25912 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25913 if (rc != TRAN_BUSY) { 25914 /* Transport failed - give up. */ 25915 break; 25916 } else { 25917 /* Transport busy - try again. */ 25918 poll_delay = 1 * CSEC; /* 10 msec. */ 25919 } 25920 } else { 25921 /* 25922 * Transport accepted - check pkt status. 25923 */ 25924 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25925 if ((pkt->pkt_reason == CMD_CMPLT) && 25926 (rc == STATUS_CHECK) && 25927 (pkt->pkt_state & STATE_ARQ_DONE)) { 25928 arqstat = 25929 (struct scsi_arq_status *)(pkt->pkt_scbp); 25930 sensep = (uint8_t *)&arqstat->sts_sensedata; 25931 } else { 25932 sensep = NULL; 25933 } 25934 25935 if ((pkt->pkt_reason == CMD_CMPLT) && 25936 (rc == STATUS_GOOD)) { 25937 /* No error - we're done */ 25938 rval = 0; 25939 break; 25940 25941 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25942 /* Lost connection - give up */ 25943 break; 25944 25945 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25946 (pkt->pkt_state == 0)) { 25947 /* Pkt not dispatched - try again. */ 25948 poll_delay = 1 * CSEC; /* 10 msec. */ 25949 25950 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25951 (rc == STATUS_QFULL)) { 25952 /* Queue full - try again. */ 25953 poll_delay = 1 * CSEC; /* 10 msec. */ 25954 25955 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25956 (rc == STATUS_BUSY)) { 25957 /* Busy - try again. */ 25958 poll_delay = 100 * CSEC; /* 1 sec. */ 25959 busy_count += (SEC_TO_CSEC - 1); 25960 25961 } else if ((sensep != NULL) && 25962 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25963 /* 25964 * Unit Attention - try again. 25965 * Pretend it took 1 sec. 25966 * NOTE: 'continue' avoids poll_delay 25967 */ 25968 busy_count += (SEC_TO_CSEC - 1); 25969 continue; 25970 25971 } else if ((sensep != NULL) && 25972 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25973 (scsi_sense_asc(sensep) == 0x04) && 25974 (scsi_sense_ascq(sensep) == 0x01)) { 25975 /* 25976 * Not ready -> ready - try again. 25977 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25978 * ...same as STATUS_BUSY 25979 */ 25980 poll_delay = 100 * CSEC; /* 1 sec. */ 25981 busy_count += (SEC_TO_CSEC - 1); 25982 25983 } else { 25984 /* BAD status - give up. */ 25985 break; 25986 } 25987 } 25988 25989 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25990 !do_polled_io) { 25991 delay(drv_usectohz(poll_delay)); 25992 } else { 25993 /* we busy wait during cpr_dump or interrupt threads */ 25994 drv_usecwait(poll_delay); 25995 } 25996 } 25997 25998 pkt->pkt_flags = savef; 25999 pkt->pkt_comp = savec; 26000 pkt->pkt_time = savet; 26001 26002 /* return on error */ 26003 if (rval) 26004 return (rval); 26005 26006 /* 26007 * This is not a performance critical code path. 26008 * 26009 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26010 * issues associated with looking at DMA memory prior to 26011 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26012 */ 26013 scsi_sync_pkt(pkt); 26014 return (0); 26015 } 26016 26017 26018 26019 /* 26020 * Function: sd_persistent_reservation_in_read_keys 26021 * 26022 * Description: This routine is the driver entry point for handling CD-ROM 26023 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26024 * by sending the SCSI-3 PRIN commands to the device. 26025 * Processes the read keys command response by copying the 26026 * reservation key information into the user provided buffer. 26027 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26028 * 26029 * Arguments: un - Pointer to soft state struct for the target. 26030 * usrp - user provided pointer to multihost Persistent In Read 26031 * Keys structure (mhioc_inkeys_t) 26032 * flag - this argument is a pass through to ddi_copyxxx() 26033 * directly from the mode argument of ioctl(). 26034 * 26035 * Return Code: 0 - Success 26036 * EACCES 26037 * ENOTSUP 26038 * errno return code from sd_send_scsi_cmd() 26039 * 26040 * Context: Can sleep. Does not return until command is completed. 26041 */ 26042 26043 static int 26044 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26045 mhioc_inkeys_t *usrp, int flag) 26046 { 26047 #ifdef _MULTI_DATAMODEL 26048 struct mhioc_key_list32 li32; 26049 #endif 26050 sd_prin_readkeys_t *in; 26051 mhioc_inkeys_t *ptr; 26052 mhioc_key_list_t li; 26053 uchar_t *data_bufp; 26054 int data_len; 26055 int rval = 0; 26056 size_t copysz; 26057 sd_ssc_t *ssc; 26058 26059 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26060 return (EINVAL); 26061 } 26062 bzero(&li, sizeof (mhioc_key_list_t)); 26063 26064 ssc = sd_ssc_init(un); 26065 26066 /* 26067 * Get the listsize from user 26068 */ 26069 #ifdef _MULTI_DATAMODEL 26070 26071 switch (ddi_model_convert_from(flag & FMODELS)) { 26072 case DDI_MODEL_ILP32: 26073 copysz = sizeof (struct mhioc_key_list32); 26074 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26075 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26076 "sd_persistent_reservation_in_read_keys: " 26077 "failed ddi_copyin: mhioc_key_list32_t\n"); 26078 rval = EFAULT; 26079 goto done; 26080 } 26081 li.listsize = li32.listsize; 26082 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26083 break; 26084 26085 case DDI_MODEL_NONE: 26086 copysz = sizeof (mhioc_key_list_t); 26087 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26088 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26089 "sd_persistent_reservation_in_read_keys: " 26090 "failed ddi_copyin: mhioc_key_list_t\n"); 26091 rval = EFAULT; 26092 goto done; 26093 } 26094 break; 26095 } 26096 26097 #else /* ! _MULTI_DATAMODEL */ 26098 copysz = sizeof (mhioc_key_list_t); 26099 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26100 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26101 "sd_persistent_reservation_in_read_keys: " 26102 "failed ddi_copyin: mhioc_key_list_t\n"); 26103 rval = EFAULT; 26104 goto done; 26105 } 26106 #endif 26107 26108 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26109 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26110 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26111 26112 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26113 data_len, data_bufp); 26114 if (rval != 0) { 26115 if (rval == EIO) 26116 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26117 else 26118 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26119 goto done; 26120 } 26121 in = (sd_prin_readkeys_t *)data_bufp; 26122 ptr->generation = BE_32(in->generation); 26123 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26124 26125 /* 26126 * Return the min(listsize, listlen) keys 26127 */ 26128 #ifdef _MULTI_DATAMODEL 26129 26130 switch (ddi_model_convert_from(flag & FMODELS)) { 26131 case DDI_MODEL_ILP32: 26132 li32.listlen = li.listlen; 26133 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26134 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26135 "sd_persistent_reservation_in_read_keys: " 26136 "failed ddi_copyout: mhioc_key_list32_t\n"); 26137 rval = EFAULT; 26138 goto done; 26139 } 26140 break; 26141 26142 case DDI_MODEL_NONE: 26143 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26144 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26145 "sd_persistent_reservation_in_read_keys: " 26146 "failed ddi_copyout: mhioc_key_list_t\n"); 26147 rval = EFAULT; 26148 goto done; 26149 } 26150 break; 26151 } 26152 26153 #else /* ! _MULTI_DATAMODEL */ 26154 26155 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26156 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26157 "sd_persistent_reservation_in_read_keys: " 26158 "failed ddi_copyout: mhioc_key_list_t\n"); 26159 rval = EFAULT; 26160 goto done; 26161 } 26162 26163 #endif /* _MULTI_DATAMODEL */ 26164 26165 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26166 li.listsize * MHIOC_RESV_KEY_SIZE); 26167 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26168 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26169 "sd_persistent_reservation_in_read_keys: " 26170 "failed ddi_copyout: keylist\n"); 26171 rval = EFAULT; 26172 } 26173 done: 26174 sd_ssc_fini(ssc); 26175 kmem_free(data_bufp, data_len); 26176 return (rval); 26177 } 26178 26179 26180 /* 26181 * Function: sd_persistent_reservation_in_read_resv 26182 * 26183 * Description: This routine is the driver entry point for handling CD-ROM 26184 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26185 * by sending the SCSI-3 PRIN commands to the device. 26186 * Process the read persistent reservations command response by 26187 * copying the reservation information into the user provided 26188 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26189 * 26190 * Arguments: un - Pointer to soft state struct for the target. 26191 * usrp - user provided pointer to multihost Persistent In Read 26192 * Keys structure (mhioc_inkeys_t) 26193 * flag - this argument is a pass through to ddi_copyxxx() 26194 * directly from the mode argument of ioctl(). 26195 * 26196 * Return Code: 0 - Success 26197 * EACCES 26198 * ENOTSUP 26199 * errno return code from sd_send_scsi_cmd() 26200 * 26201 * Context: Can sleep. Does not return until command is completed. 26202 */ 26203 26204 static int 26205 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26206 mhioc_inresvs_t *usrp, int flag) 26207 { 26208 #ifdef _MULTI_DATAMODEL 26209 struct mhioc_resv_desc_list32 resvlist32; 26210 #endif 26211 sd_prin_readresv_t *in; 26212 mhioc_inresvs_t *ptr; 26213 sd_readresv_desc_t *readresv_ptr; 26214 mhioc_resv_desc_list_t resvlist; 26215 mhioc_resv_desc_t resvdesc; 26216 uchar_t *data_bufp = NULL; 26217 int data_len; 26218 int rval = 0; 26219 int i; 26220 size_t copysz; 26221 mhioc_resv_desc_t *bufp; 26222 sd_ssc_t *ssc; 26223 26224 if ((ptr = usrp) == NULL) { 26225 return (EINVAL); 26226 } 26227 26228 ssc = sd_ssc_init(un); 26229 26230 /* 26231 * Get the listsize from user 26232 */ 26233 #ifdef _MULTI_DATAMODEL 26234 switch (ddi_model_convert_from(flag & FMODELS)) { 26235 case DDI_MODEL_ILP32: 26236 copysz = sizeof (struct mhioc_resv_desc_list32); 26237 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26238 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26239 "sd_persistent_reservation_in_read_resv: " 26240 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26241 rval = EFAULT; 26242 goto done; 26243 } 26244 resvlist.listsize = resvlist32.listsize; 26245 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26246 break; 26247 26248 case DDI_MODEL_NONE: 26249 copysz = sizeof (mhioc_resv_desc_list_t); 26250 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26251 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26252 "sd_persistent_reservation_in_read_resv: " 26253 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26254 rval = EFAULT; 26255 goto done; 26256 } 26257 break; 26258 } 26259 #else /* ! _MULTI_DATAMODEL */ 26260 copysz = sizeof (mhioc_resv_desc_list_t); 26261 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26262 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26263 "sd_persistent_reservation_in_read_resv: " 26264 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26265 rval = EFAULT; 26266 goto done; 26267 } 26268 #endif /* ! _MULTI_DATAMODEL */ 26269 26270 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26271 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26272 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26273 26274 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26275 data_len, data_bufp); 26276 if (rval != 0) { 26277 if (rval == EIO) 26278 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26279 else 26280 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26281 goto done; 26282 } 26283 in = (sd_prin_readresv_t *)data_bufp; 26284 ptr->generation = BE_32(in->generation); 26285 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26286 26287 /* 26288 * Return the min(listsize, listlen( keys 26289 */ 26290 #ifdef _MULTI_DATAMODEL 26291 26292 switch (ddi_model_convert_from(flag & FMODELS)) { 26293 case DDI_MODEL_ILP32: 26294 resvlist32.listlen = resvlist.listlen; 26295 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26296 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26297 "sd_persistent_reservation_in_read_resv: " 26298 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26299 rval = EFAULT; 26300 goto done; 26301 } 26302 break; 26303 26304 case DDI_MODEL_NONE: 26305 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26306 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26307 "sd_persistent_reservation_in_read_resv: " 26308 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26309 rval = EFAULT; 26310 goto done; 26311 } 26312 break; 26313 } 26314 26315 #else /* ! _MULTI_DATAMODEL */ 26316 26317 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26318 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26319 "sd_persistent_reservation_in_read_resv: " 26320 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26321 rval = EFAULT; 26322 goto done; 26323 } 26324 26325 #endif /* ! _MULTI_DATAMODEL */ 26326 26327 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26328 bufp = resvlist.list; 26329 copysz = sizeof (mhioc_resv_desc_t); 26330 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26331 i++, readresv_ptr++, bufp++) { 26332 26333 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26334 MHIOC_RESV_KEY_SIZE); 26335 resvdesc.type = readresv_ptr->type; 26336 resvdesc.scope = readresv_ptr->scope; 26337 resvdesc.scope_specific_addr = 26338 BE_32(readresv_ptr->scope_specific_addr); 26339 26340 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26341 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26342 "sd_persistent_reservation_in_read_resv: " 26343 "failed ddi_copyout: resvlist\n"); 26344 rval = EFAULT; 26345 goto done; 26346 } 26347 } 26348 done: 26349 sd_ssc_fini(ssc); 26350 /* only if data_bufp is allocated, we need to free it */ 26351 if (data_bufp) { 26352 kmem_free(data_bufp, data_len); 26353 } 26354 return (rval); 26355 } 26356 26357 26358 /* 26359 * Function: sr_change_blkmode() 26360 * 26361 * Description: This routine is the driver entry point for handling CD-ROM 26362 * block mode ioctl requests. Support for returning and changing 26363 * the current block size in use by the device is implemented. The 26364 * LBA size is changed via a MODE SELECT Block Descriptor. 26365 * 26366 * This routine issues a mode sense with an allocation length of 26367 * 12 bytes for the mode page header and a single block descriptor. 26368 * 26369 * Arguments: dev - the device 'dev_t' 26370 * cmd - the request type; one of CDROMGBLKMODE (get) or 26371 * CDROMSBLKMODE (set) 26372 * data - current block size or requested block size 26373 * flag - this argument is a pass through to ddi_copyxxx() directly 26374 * from the mode argument of ioctl(). 26375 * 26376 * Return Code: the code returned by sd_send_scsi_cmd() 26377 * EINVAL if invalid arguments are provided 26378 * EFAULT if ddi_copyxxx() fails 26379 * ENXIO if fail ddi_get_soft_state 26380 * EIO if invalid mode sense block descriptor length 26381 * 26382 */ 26383 26384 static int 26385 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26386 { 26387 struct sd_lun *un = NULL; 26388 struct mode_header *sense_mhp, *select_mhp; 26389 struct block_descriptor *sense_desc, *select_desc; 26390 int current_bsize; 26391 int rval = EINVAL; 26392 uchar_t *sense = NULL; 26393 uchar_t *select = NULL; 26394 sd_ssc_t *ssc; 26395 26396 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26397 26398 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26399 return (ENXIO); 26400 } 26401 26402 /* 26403 * The block length is changed via the Mode Select block descriptor, the 26404 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26405 * required as part of this routine. Therefore the mode sense allocation 26406 * length is specified to be the length of a mode page header and a 26407 * block descriptor. 26408 */ 26409 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26410 26411 ssc = sd_ssc_init(un); 26412 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26413 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26414 sd_ssc_fini(ssc); 26415 if (rval != 0) { 26416 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26417 "sr_change_blkmode: Mode Sense Failed\n"); 26418 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26419 return (rval); 26420 } 26421 26422 /* Check the block descriptor len to handle only 1 block descriptor */ 26423 sense_mhp = (struct mode_header *)sense; 26424 if ((sense_mhp->bdesc_length == 0) || 26425 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26426 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26427 "sr_change_blkmode: Mode Sense returned invalid block" 26428 " descriptor length\n"); 26429 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26430 return (EIO); 26431 } 26432 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26433 current_bsize = ((sense_desc->blksize_hi << 16) | 26434 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26435 26436 /* Process command */ 26437 switch (cmd) { 26438 case CDROMGBLKMODE: 26439 /* Return the block size obtained during the mode sense */ 26440 if (ddi_copyout(¤t_bsize, (void *)data, 26441 sizeof (int), flag) != 0) 26442 rval = EFAULT; 26443 break; 26444 case CDROMSBLKMODE: 26445 /* Validate the requested block size */ 26446 switch (data) { 26447 case CDROM_BLK_512: 26448 case CDROM_BLK_1024: 26449 case CDROM_BLK_2048: 26450 case CDROM_BLK_2056: 26451 case CDROM_BLK_2336: 26452 case CDROM_BLK_2340: 26453 case CDROM_BLK_2352: 26454 case CDROM_BLK_2368: 26455 case CDROM_BLK_2448: 26456 case CDROM_BLK_2646: 26457 case CDROM_BLK_2647: 26458 break; 26459 default: 26460 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26461 "sr_change_blkmode: " 26462 "Block Size '%ld' Not Supported\n", data); 26463 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26464 return (EINVAL); 26465 } 26466 26467 /* 26468 * The current block size matches the requested block size so 26469 * there is no need to send the mode select to change the size 26470 */ 26471 if (current_bsize == data) { 26472 break; 26473 } 26474 26475 /* Build the select data for the requested block size */ 26476 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26477 select_mhp = (struct mode_header *)select; 26478 select_desc = 26479 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26480 /* 26481 * The LBA size is changed via the block descriptor, so the 26482 * descriptor is built according to the user data 26483 */ 26484 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26485 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26486 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26487 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26488 26489 /* Send the mode select for the requested block size */ 26490 ssc = sd_ssc_init(un); 26491 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26492 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26493 SD_PATH_STANDARD); 26494 sd_ssc_fini(ssc); 26495 if (rval != 0) { 26496 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26497 "sr_change_blkmode: Mode Select Failed\n"); 26498 /* 26499 * The mode select failed for the requested block size, 26500 * so reset the data for the original block size and 26501 * send it to the target. The error is indicated by the 26502 * return value for the failed mode select. 26503 */ 26504 select_desc->blksize_hi = sense_desc->blksize_hi; 26505 select_desc->blksize_mid = sense_desc->blksize_mid; 26506 select_desc->blksize_lo = sense_desc->blksize_lo; 26507 ssc = sd_ssc_init(un); 26508 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26509 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26510 SD_PATH_STANDARD); 26511 sd_ssc_fini(ssc); 26512 } else { 26513 ASSERT(!mutex_owned(SD_MUTEX(un))); 26514 mutex_enter(SD_MUTEX(un)); 26515 sd_update_block_info(un, (uint32_t)data, 0); 26516 mutex_exit(SD_MUTEX(un)); 26517 } 26518 break; 26519 default: 26520 /* should not reach here, but check anyway */ 26521 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26522 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26523 rval = EINVAL; 26524 break; 26525 } 26526 26527 if (select) { 26528 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26529 } 26530 if (sense) { 26531 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26532 } 26533 return (rval); 26534 } 26535 26536 26537 /* 26538 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26539 * implement driver support for getting and setting the CD speed. The command 26540 * set used will be based on the device type. If the device has not been 26541 * identified as MMC the Toshiba vendor specific mode page will be used. If 26542 * the device is MMC but does not support the Real Time Streaming feature 26543 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26544 * be used to read the speed. 26545 */ 26546 26547 /* 26548 * Function: sr_change_speed() 26549 * 26550 * Description: This routine is the driver entry point for handling CD-ROM 26551 * drive speed ioctl requests for devices supporting the Toshiba 26552 * vendor specific drive speed mode page. Support for returning 26553 * and changing the current drive speed in use by the device is 26554 * implemented. 26555 * 26556 * Arguments: dev - the device 'dev_t' 26557 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26558 * CDROMSDRVSPEED (set) 26559 * data - current drive speed or requested drive speed 26560 * flag - this argument is a pass through to ddi_copyxxx() directly 26561 * from the mode argument of ioctl(). 26562 * 26563 * Return Code: the code returned by sd_send_scsi_cmd() 26564 * EINVAL if invalid arguments are provided 26565 * EFAULT if ddi_copyxxx() fails 26566 * ENXIO if fail ddi_get_soft_state 26567 * EIO if invalid mode sense block descriptor length 26568 */ 26569 26570 static int 26571 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26572 { 26573 struct sd_lun *un = NULL; 26574 struct mode_header *sense_mhp, *select_mhp; 26575 struct mode_speed *sense_page, *select_page; 26576 int current_speed; 26577 int rval = EINVAL; 26578 int bd_len; 26579 uchar_t *sense = NULL; 26580 uchar_t *select = NULL; 26581 sd_ssc_t *ssc; 26582 26583 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26584 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26585 return (ENXIO); 26586 } 26587 26588 /* 26589 * Note: The drive speed is being modified here according to a Toshiba 26590 * vendor specific mode page (0x31). 26591 */ 26592 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26593 26594 ssc = sd_ssc_init(un); 26595 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26596 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26597 SD_PATH_STANDARD); 26598 sd_ssc_fini(ssc); 26599 if (rval != 0) { 26600 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26601 "sr_change_speed: Mode Sense Failed\n"); 26602 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26603 return (rval); 26604 } 26605 sense_mhp = (struct mode_header *)sense; 26606 26607 /* Check the block descriptor len to handle only 1 block descriptor */ 26608 bd_len = sense_mhp->bdesc_length; 26609 if (bd_len > MODE_BLK_DESC_LENGTH) { 26610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26611 "sr_change_speed: Mode Sense returned invalid block " 26612 "descriptor length\n"); 26613 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26614 return (EIO); 26615 } 26616 26617 sense_page = (struct mode_speed *) 26618 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26619 current_speed = sense_page->speed; 26620 26621 /* Process command */ 26622 switch (cmd) { 26623 case CDROMGDRVSPEED: 26624 /* Return the drive speed obtained during the mode sense */ 26625 if (current_speed == 0x2) { 26626 current_speed = CDROM_TWELVE_SPEED; 26627 } 26628 if (ddi_copyout(¤t_speed, (void *)data, 26629 sizeof (int), flag) != 0) { 26630 rval = EFAULT; 26631 } 26632 break; 26633 case CDROMSDRVSPEED: 26634 /* Validate the requested drive speed */ 26635 switch ((uchar_t)data) { 26636 case CDROM_TWELVE_SPEED: 26637 data = 0x2; 26638 /*FALLTHROUGH*/ 26639 case CDROM_NORMAL_SPEED: 26640 case CDROM_DOUBLE_SPEED: 26641 case CDROM_QUAD_SPEED: 26642 case CDROM_MAXIMUM_SPEED: 26643 break; 26644 default: 26645 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26646 "sr_change_speed: " 26647 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26648 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26649 return (EINVAL); 26650 } 26651 26652 /* 26653 * The current drive speed matches the requested drive speed so 26654 * there is no need to send the mode select to change the speed 26655 */ 26656 if (current_speed == data) { 26657 break; 26658 } 26659 26660 /* Build the select data for the requested drive speed */ 26661 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26662 select_mhp = (struct mode_header *)select; 26663 select_mhp->bdesc_length = 0; 26664 select_page = 26665 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26666 select_page = 26667 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26668 select_page->mode_page.code = CDROM_MODE_SPEED; 26669 select_page->mode_page.length = 2; 26670 select_page->speed = (uchar_t)data; 26671 26672 /* Send the mode select for the requested block size */ 26673 ssc = sd_ssc_init(un); 26674 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26675 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26676 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26677 sd_ssc_fini(ssc); 26678 if (rval != 0) { 26679 /* 26680 * The mode select failed for the requested drive speed, 26681 * so reset the data for the original drive speed and 26682 * send it to the target. The error is indicated by the 26683 * return value for the failed mode select. 26684 */ 26685 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26686 "sr_drive_speed: Mode Select Failed\n"); 26687 select_page->speed = sense_page->speed; 26688 ssc = sd_ssc_init(un); 26689 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26690 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26691 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26692 sd_ssc_fini(ssc); 26693 } 26694 break; 26695 default: 26696 /* should not reach here, but check anyway */ 26697 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26698 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26699 rval = EINVAL; 26700 break; 26701 } 26702 26703 if (select) { 26704 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26705 } 26706 if (sense) { 26707 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26708 } 26709 26710 return (rval); 26711 } 26712 26713 26714 /* 26715 * Function: sr_atapi_change_speed() 26716 * 26717 * Description: This routine is the driver entry point for handling CD-ROM 26718 * drive speed ioctl requests for MMC devices that do not support 26719 * the Real Time Streaming feature (0x107). 26720 * 26721 * Note: This routine will use the SET SPEED command which may not 26722 * be supported by all devices. 26723 * 26724 * Arguments: dev- the device 'dev_t' 26725 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26726 * CDROMSDRVSPEED (set) 26727 * data- current drive speed or requested drive speed 26728 * flag- this argument is a pass through to ddi_copyxxx() directly 26729 * from the mode argument of ioctl(). 26730 * 26731 * Return Code: the code returned by sd_send_scsi_cmd() 26732 * EINVAL if invalid arguments are provided 26733 * EFAULT if ddi_copyxxx() fails 26734 * ENXIO if fail ddi_get_soft_state 26735 * EIO if invalid mode sense block descriptor length 26736 */ 26737 26738 static int 26739 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26740 { 26741 struct sd_lun *un; 26742 struct uscsi_cmd *com = NULL; 26743 struct mode_header_grp2 *sense_mhp; 26744 uchar_t *sense_page; 26745 uchar_t *sense = NULL; 26746 char cdb[CDB_GROUP5]; 26747 int bd_len; 26748 int current_speed = 0; 26749 int max_speed = 0; 26750 int rval; 26751 sd_ssc_t *ssc; 26752 26753 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26754 26755 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26756 return (ENXIO); 26757 } 26758 26759 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26760 26761 ssc = sd_ssc_init(un); 26762 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 26763 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26764 SD_PATH_STANDARD); 26765 sd_ssc_fini(ssc); 26766 if (rval != 0) { 26767 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26768 "sr_atapi_change_speed: Mode Sense Failed\n"); 26769 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26770 return (rval); 26771 } 26772 26773 /* Check the block descriptor len to handle only 1 block descriptor */ 26774 sense_mhp = (struct mode_header_grp2 *)sense; 26775 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26776 if (bd_len > MODE_BLK_DESC_LENGTH) { 26777 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26778 "sr_atapi_change_speed: Mode Sense returned invalid " 26779 "block descriptor length\n"); 26780 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26781 return (EIO); 26782 } 26783 26784 /* Calculate the current and maximum drive speeds */ 26785 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26786 current_speed = (sense_page[14] << 8) | sense_page[15]; 26787 max_speed = (sense_page[8] << 8) | sense_page[9]; 26788 26789 /* Process the command */ 26790 switch (cmd) { 26791 case CDROMGDRVSPEED: 26792 current_speed /= SD_SPEED_1X; 26793 if (ddi_copyout(¤t_speed, (void *)data, 26794 sizeof (int), flag) != 0) 26795 rval = EFAULT; 26796 break; 26797 case CDROMSDRVSPEED: 26798 /* Convert the speed code to KB/sec */ 26799 switch ((uchar_t)data) { 26800 case CDROM_NORMAL_SPEED: 26801 current_speed = SD_SPEED_1X; 26802 break; 26803 case CDROM_DOUBLE_SPEED: 26804 current_speed = 2 * SD_SPEED_1X; 26805 break; 26806 case CDROM_QUAD_SPEED: 26807 current_speed = 4 * SD_SPEED_1X; 26808 break; 26809 case CDROM_TWELVE_SPEED: 26810 current_speed = 12 * SD_SPEED_1X; 26811 break; 26812 case CDROM_MAXIMUM_SPEED: 26813 current_speed = 0xffff; 26814 break; 26815 default: 26816 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26817 "sr_atapi_change_speed: invalid drive speed %d\n", 26818 (uchar_t)data); 26819 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26820 return (EINVAL); 26821 } 26822 26823 /* Check the request against the drive's max speed. */ 26824 if (current_speed != 0xffff) { 26825 if (current_speed > max_speed) { 26826 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26827 return (EINVAL); 26828 } 26829 } 26830 26831 /* 26832 * Build and send the SET SPEED command 26833 * 26834 * Note: The SET SPEED (0xBB) command used in this routine is 26835 * obsolete per the SCSI MMC spec but still supported in the 26836 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26837 * therefore the command is still implemented in this routine. 26838 */ 26839 bzero(cdb, sizeof (cdb)); 26840 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26841 cdb[2] = (uchar_t)(current_speed >> 8); 26842 cdb[3] = (uchar_t)current_speed; 26843 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26844 com->uscsi_cdb = (caddr_t)cdb; 26845 com->uscsi_cdblen = CDB_GROUP5; 26846 com->uscsi_bufaddr = NULL; 26847 com->uscsi_buflen = 0; 26848 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26849 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26850 break; 26851 default: 26852 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26853 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26854 rval = EINVAL; 26855 } 26856 26857 if (sense) { 26858 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26859 } 26860 if (com) { 26861 kmem_free(com, sizeof (*com)); 26862 } 26863 return (rval); 26864 } 26865 26866 26867 /* 26868 * Function: sr_pause_resume() 26869 * 26870 * Description: This routine is the driver entry point for handling CD-ROM 26871 * pause/resume ioctl requests. This only affects the audio play 26872 * operation. 26873 * 26874 * Arguments: dev - the device 'dev_t' 26875 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26876 * for setting the resume bit of the cdb. 26877 * 26878 * Return Code: the code returned by sd_send_scsi_cmd() 26879 * EINVAL if invalid mode specified 26880 * 26881 */ 26882 26883 static int 26884 sr_pause_resume(dev_t dev, int cmd) 26885 { 26886 struct sd_lun *un; 26887 struct uscsi_cmd *com; 26888 char cdb[CDB_GROUP1]; 26889 int rval; 26890 26891 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26892 return (ENXIO); 26893 } 26894 26895 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26896 bzero(cdb, CDB_GROUP1); 26897 cdb[0] = SCMD_PAUSE_RESUME; 26898 switch (cmd) { 26899 case CDROMRESUME: 26900 cdb[8] = 1; 26901 break; 26902 case CDROMPAUSE: 26903 cdb[8] = 0; 26904 break; 26905 default: 26906 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26907 " Command '%x' Not Supported\n", cmd); 26908 rval = EINVAL; 26909 goto done; 26910 } 26911 26912 com->uscsi_cdb = cdb; 26913 com->uscsi_cdblen = CDB_GROUP1; 26914 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26915 26916 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26917 SD_PATH_STANDARD); 26918 26919 done: 26920 kmem_free(com, sizeof (*com)); 26921 return (rval); 26922 } 26923 26924 26925 /* 26926 * Function: sr_play_msf() 26927 * 26928 * Description: This routine is the driver entry point for handling CD-ROM 26929 * ioctl requests to output the audio signals at the specified 26930 * starting address and continue the audio play until the specified 26931 * ending address (CDROMPLAYMSF) The address is in Minute Second 26932 * Frame (MSF) format. 26933 * 26934 * Arguments: dev - the device 'dev_t' 26935 * data - pointer to user provided audio msf structure, 26936 * specifying start/end addresses. 26937 * flag - this argument is a pass through to ddi_copyxxx() 26938 * directly from the mode argument of ioctl(). 26939 * 26940 * Return Code: the code returned by sd_send_scsi_cmd() 26941 * EFAULT if ddi_copyxxx() fails 26942 * ENXIO if fail ddi_get_soft_state 26943 * EINVAL if data pointer is NULL 26944 */ 26945 26946 static int 26947 sr_play_msf(dev_t dev, caddr_t data, int flag) 26948 { 26949 struct sd_lun *un; 26950 struct uscsi_cmd *com; 26951 struct cdrom_msf msf_struct; 26952 struct cdrom_msf *msf = &msf_struct; 26953 char cdb[CDB_GROUP1]; 26954 int rval; 26955 26956 if (data == NULL) { 26957 return (EINVAL); 26958 } 26959 26960 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26961 return (ENXIO); 26962 } 26963 26964 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26965 return (EFAULT); 26966 } 26967 26968 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26969 bzero(cdb, CDB_GROUP1); 26970 cdb[0] = SCMD_PLAYAUDIO_MSF; 26971 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26972 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26973 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26974 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26975 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26976 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26977 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26978 } else { 26979 cdb[3] = msf->cdmsf_min0; 26980 cdb[4] = msf->cdmsf_sec0; 26981 cdb[5] = msf->cdmsf_frame0; 26982 cdb[6] = msf->cdmsf_min1; 26983 cdb[7] = msf->cdmsf_sec1; 26984 cdb[8] = msf->cdmsf_frame1; 26985 } 26986 com->uscsi_cdb = cdb; 26987 com->uscsi_cdblen = CDB_GROUP1; 26988 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26989 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26990 SD_PATH_STANDARD); 26991 kmem_free(com, sizeof (*com)); 26992 return (rval); 26993 } 26994 26995 26996 /* 26997 * Function: sr_play_trkind() 26998 * 26999 * Description: This routine is the driver entry point for handling CD-ROM 27000 * ioctl requests to output the audio signals at the specified 27001 * starting address and continue the audio play until the specified 27002 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27003 * format. 27004 * 27005 * Arguments: dev - the device 'dev_t' 27006 * data - pointer to user provided audio track/index structure, 27007 * specifying start/end addresses. 27008 * flag - this argument is a pass through to ddi_copyxxx() 27009 * directly from the mode argument of ioctl(). 27010 * 27011 * Return Code: the code returned by sd_send_scsi_cmd() 27012 * EFAULT if ddi_copyxxx() fails 27013 * ENXIO if fail ddi_get_soft_state 27014 * EINVAL if data pointer is NULL 27015 */ 27016 27017 static int 27018 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27019 { 27020 struct cdrom_ti ti_struct; 27021 struct cdrom_ti *ti = &ti_struct; 27022 struct uscsi_cmd *com = NULL; 27023 char cdb[CDB_GROUP1]; 27024 int rval; 27025 27026 if (data == NULL) { 27027 return (EINVAL); 27028 } 27029 27030 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27031 return (EFAULT); 27032 } 27033 27034 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27035 bzero(cdb, CDB_GROUP1); 27036 cdb[0] = SCMD_PLAYAUDIO_TI; 27037 cdb[4] = ti->cdti_trk0; 27038 cdb[5] = ti->cdti_ind0; 27039 cdb[7] = ti->cdti_trk1; 27040 cdb[8] = ti->cdti_ind1; 27041 com->uscsi_cdb = cdb; 27042 com->uscsi_cdblen = CDB_GROUP1; 27043 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27044 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27045 SD_PATH_STANDARD); 27046 kmem_free(com, sizeof (*com)); 27047 return (rval); 27048 } 27049 27050 27051 /* 27052 * Function: sr_read_all_subcodes() 27053 * 27054 * Description: This routine is the driver entry point for handling CD-ROM 27055 * ioctl requests to return raw subcode data while the target is 27056 * playing audio (CDROMSUBCODE). 27057 * 27058 * Arguments: dev - the device 'dev_t' 27059 * data - pointer to user provided cdrom subcode structure, 27060 * specifying the transfer length and address. 27061 * flag - this argument is a pass through to ddi_copyxxx() 27062 * directly from the mode argument of ioctl(). 27063 * 27064 * Return Code: the code returned by sd_send_scsi_cmd() 27065 * EFAULT if ddi_copyxxx() fails 27066 * ENXIO if fail ddi_get_soft_state 27067 * EINVAL if data pointer is NULL 27068 */ 27069 27070 static int 27071 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27072 { 27073 struct sd_lun *un = NULL; 27074 struct uscsi_cmd *com = NULL; 27075 struct cdrom_subcode *subcode = NULL; 27076 int rval; 27077 size_t buflen; 27078 char cdb[CDB_GROUP5]; 27079 27080 #ifdef _MULTI_DATAMODEL 27081 /* To support ILP32 applications in an LP64 world */ 27082 struct cdrom_subcode32 cdrom_subcode32; 27083 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27084 #endif 27085 if (data == NULL) { 27086 return (EINVAL); 27087 } 27088 27089 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27090 return (ENXIO); 27091 } 27092 27093 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27094 27095 #ifdef _MULTI_DATAMODEL 27096 switch (ddi_model_convert_from(flag & FMODELS)) { 27097 case DDI_MODEL_ILP32: 27098 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27099 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27100 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27101 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27102 return (EFAULT); 27103 } 27104 /* Convert the ILP32 uscsi data from the application to LP64 */ 27105 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27106 break; 27107 case DDI_MODEL_NONE: 27108 if (ddi_copyin(data, subcode, 27109 sizeof (struct cdrom_subcode), flag)) { 27110 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27111 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27112 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27113 return (EFAULT); 27114 } 27115 break; 27116 } 27117 #else /* ! _MULTI_DATAMODEL */ 27118 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27119 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27120 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27121 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27122 return (EFAULT); 27123 } 27124 #endif /* _MULTI_DATAMODEL */ 27125 27126 /* 27127 * Since MMC-2 expects max 3 bytes for length, check if the 27128 * length input is greater than 3 bytes 27129 */ 27130 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27131 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27132 "sr_read_all_subcodes: " 27133 "cdrom transfer length too large: %d (limit %d)\n", 27134 subcode->cdsc_length, 0xFFFFFF); 27135 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27136 return (EINVAL); 27137 } 27138 27139 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27140 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27141 bzero(cdb, CDB_GROUP5); 27142 27143 if (un->un_f_mmc_cap == TRUE) { 27144 cdb[0] = (char)SCMD_READ_CD; 27145 cdb[2] = (char)0xff; 27146 cdb[3] = (char)0xff; 27147 cdb[4] = (char)0xff; 27148 cdb[5] = (char)0xff; 27149 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27150 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27151 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27152 cdb[10] = 1; 27153 } else { 27154 /* 27155 * Note: A vendor specific command (0xDF) is being used her to 27156 * request a read of all subcodes. 27157 */ 27158 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27159 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27160 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27161 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27162 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27163 } 27164 com->uscsi_cdb = cdb; 27165 com->uscsi_cdblen = CDB_GROUP5; 27166 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27167 com->uscsi_buflen = buflen; 27168 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27169 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27170 SD_PATH_STANDARD); 27171 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27172 kmem_free(com, sizeof (*com)); 27173 return (rval); 27174 } 27175 27176 27177 /* 27178 * Function: sr_read_subchannel() 27179 * 27180 * Description: This routine is the driver entry point for handling CD-ROM 27181 * ioctl requests to return the Q sub-channel data of the CD 27182 * current position block. (CDROMSUBCHNL) The data includes the 27183 * track number, index number, absolute CD-ROM address (LBA or MSF 27184 * format per the user) , track relative CD-ROM address (LBA or MSF 27185 * format per the user), control data and audio status. 27186 * 27187 * Arguments: dev - the device 'dev_t' 27188 * data - pointer to user provided cdrom sub-channel structure 27189 * flag - this argument is a pass through to ddi_copyxxx() 27190 * directly from the mode argument of ioctl(). 27191 * 27192 * Return Code: the code returned by sd_send_scsi_cmd() 27193 * EFAULT if ddi_copyxxx() fails 27194 * ENXIO if fail ddi_get_soft_state 27195 * EINVAL if data pointer is NULL 27196 */ 27197 27198 static int 27199 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27200 { 27201 struct sd_lun *un; 27202 struct uscsi_cmd *com; 27203 struct cdrom_subchnl subchanel; 27204 struct cdrom_subchnl *subchnl = &subchanel; 27205 char cdb[CDB_GROUP1]; 27206 caddr_t buffer; 27207 int rval; 27208 27209 if (data == NULL) { 27210 return (EINVAL); 27211 } 27212 27213 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27214 (un->un_state == SD_STATE_OFFLINE)) { 27215 return (ENXIO); 27216 } 27217 27218 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27219 return (EFAULT); 27220 } 27221 27222 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27223 bzero(cdb, CDB_GROUP1); 27224 cdb[0] = SCMD_READ_SUBCHANNEL; 27225 /* Set the MSF bit based on the user requested address format */ 27226 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27227 /* 27228 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27229 * returned 27230 */ 27231 cdb[2] = 0x40; 27232 /* 27233 * Set byte 3 to specify the return data format. A value of 0x01 27234 * indicates that the CD-ROM current position should be returned. 27235 */ 27236 cdb[3] = 0x01; 27237 cdb[8] = 0x10; 27238 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27239 com->uscsi_cdb = cdb; 27240 com->uscsi_cdblen = CDB_GROUP1; 27241 com->uscsi_bufaddr = buffer; 27242 com->uscsi_buflen = 16; 27243 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27244 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27245 SD_PATH_STANDARD); 27246 if (rval != 0) { 27247 kmem_free(buffer, 16); 27248 kmem_free(com, sizeof (*com)); 27249 return (rval); 27250 } 27251 27252 /* Process the returned Q sub-channel data */ 27253 subchnl->cdsc_audiostatus = buffer[1]; 27254 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27255 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27256 subchnl->cdsc_trk = buffer[6]; 27257 subchnl->cdsc_ind = buffer[7]; 27258 if (subchnl->cdsc_format & CDROM_LBA) { 27259 subchnl->cdsc_absaddr.lba = 27260 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27261 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27262 subchnl->cdsc_reladdr.lba = 27263 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27264 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27265 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27266 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27267 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27268 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27269 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27270 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27271 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27272 } else { 27273 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27274 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27275 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27276 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27277 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27278 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27279 } 27280 kmem_free(buffer, 16); 27281 kmem_free(com, sizeof (*com)); 27282 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27283 != 0) { 27284 return (EFAULT); 27285 } 27286 return (rval); 27287 } 27288 27289 27290 /* 27291 * Function: sr_read_tocentry() 27292 * 27293 * Description: This routine is the driver entry point for handling CD-ROM 27294 * ioctl requests to read from the Table of Contents (TOC) 27295 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27296 * fields, the starting address (LBA or MSF format per the user) 27297 * and the data mode if the user specified track is a data track. 27298 * 27299 * Note: The READ HEADER (0x44) command used in this routine is 27300 * obsolete per the SCSI MMC spec but still supported in the 27301 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27302 * therefore the command is still implemented in this routine. 27303 * 27304 * Arguments: dev - the device 'dev_t' 27305 * data - pointer to user provided toc entry structure, 27306 * specifying the track # and the address format 27307 * (LBA or MSF). 27308 * flag - this argument is a pass through to ddi_copyxxx() 27309 * directly from the mode argument of ioctl(). 27310 * 27311 * Return Code: the code returned by sd_send_scsi_cmd() 27312 * EFAULT if ddi_copyxxx() fails 27313 * ENXIO if fail ddi_get_soft_state 27314 * EINVAL if data pointer is NULL 27315 */ 27316 27317 static int 27318 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27319 { 27320 struct sd_lun *un = NULL; 27321 struct uscsi_cmd *com; 27322 struct cdrom_tocentry toc_entry; 27323 struct cdrom_tocentry *entry = &toc_entry; 27324 caddr_t buffer; 27325 int rval; 27326 char cdb[CDB_GROUP1]; 27327 27328 if (data == NULL) { 27329 return (EINVAL); 27330 } 27331 27332 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27333 (un->un_state == SD_STATE_OFFLINE)) { 27334 return (ENXIO); 27335 } 27336 27337 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27338 return (EFAULT); 27339 } 27340 27341 /* Validate the requested track and address format */ 27342 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27343 return (EINVAL); 27344 } 27345 27346 if (entry->cdte_track == 0) { 27347 return (EINVAL); 27348 } 27349 27350 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27351 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27352 bzero(cdb, CDB_GROUP1); 27353 27354 cdb[0] = SCMD_READ_TOC; 27355 /* Set the MSF bit based on the user requested address format */ 27356 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27357 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27358 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27359 } else { 27360 cdb[6] = entry->cdte_track; 27361 } 27362 27363 /* 27364 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27365 * (4 byte TOC response header + 8 byte track descriptor) 27366 */ 27367 cdb[8] = 12; 27368 com->uscsi_cdb = cdb; 27369 com->uscsi_cdblen = CDB_GROUP1; 27370 com->uscsi_bufaddr = buffer; 27371 com->uscsi_buflen = 0x0C; 27372 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27373 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27374 SD_PATH_STANDARD); 27375 if (rval != 0) { 27376 kmem_free(buffer, 12); 27377 kmem_free(com, sizeof (*com)); 27378 return (rval); 27379 } 27380 27381 /* Process the toc entry */ 27382 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27383 entry->cdte_ctrl = (buffer[5] & 0x0F); 27384 if (entry->cdte_format & CDROM_LBA) { 27385 entry->cdte_addr.lba = 27386 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27387 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27388 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27389 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27390 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27391 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27392 /* 27393 * Send a READ TOC command using the LBA address format to get 27394 * the LBA for the track requested so it can be used in the 27395 * READ HEADER request 27396 * 27397 * Note: The MSF bit of the READ HEADER command specifies the 27398 * output format. The block address specified in that command 27399 * must be in LBA format. 27400 */ 27401 cdb[1] = 0; 27402 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27403 SD_PATH_STANDARD); 27404 if (rval != 0) { 27405 kmem_free(buffer, 12); 27406 kmem_free(com, sizeof (*com)); 27407 return (rval); 27408 } 27409 } else { 27410 entry->cdte_addr.msf.minute = buffer[9]; 27411 entry->cdte_addr.msf.second = buffer[10]; 27412 entry->cdte_addr.msf.frame = buffer[11]; 27413 /* 27414 * Send a READ TOC command using the LBA address format to get 27415 * the LBA for the track requested so it can be used in the 27416 * READ HEADER request 27417 * 27418 * Note: The MSF bit of the READ HEADER command specifies the 27419 * output format. The block address specified in that command 27420 * must be in LBA format. 27421 */ 27422 cdb[1] = 0; 27423 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27424 SD_PATH_STANDARD); 27425 if (rval != 0) { 27426 kmem_free(buffer, 12); 27427 kmem_free(com, sizeof (*com)); 27428 return (rval); 27429 } 27430 } 27431 27432 /* 27433 * Build and send the READ HEADER command to determine the data mode of 27434 * the user specified track. 27435 */ 27436 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27437 (entry->cdte_track != CDROM_LEADOUT)) { 27438 bzero(cdb, CDB_GROUP1); 27439 cdb[0] = SCMD_READ_HEADER; 27440 cdb[2] = buffer[8]; 27441 cdb[3] = buffer[9]; 27442 cdb[4] = buffer[10]; 27443 cdb[5] = buffer[11]; 27444 cdb[8] = 0x08; 27445 com->uscsi_buflen = 0x08; 27446 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27447 SD_PATH_STANDARD); 27448 if (rval == 0) { 27449 entry->cdte_datamode = buffer[0]; 27450 } else { 27451 /* 27452 * READ HEADER command failed, since this is 27453 * obsoleted in one spec, its better to return 27454 * -1 for an invlid track so that we can still 27455 * receive the rest of the TOC data. 27456 */ 27457 entry->cdte_datamode = (uchar_t)-1; 27458 } 27459 } else { 27460 entry->cdte_datamode = (uchar_t)-1; 27461 } 27462 27463 kmem_free(buffer, 12); 27464 kmem_free(com, sizeof (*com)); 27465 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27466 return (EFAULT); 27467 27468 return (rval); 27469 } 27470 27471 27472 /* 27473 * Function: sr_read_tochdr() 27474 * 27475 * Description: This routine is the driver entry point for handling CD-ROM 27476 * ioctl requests to read the Table of Contents (TOC) header 27477 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27478 * and ending track numbers 27479 * 27480 * Arguments: dev - the device 'dev_t' 27481 * data - pointer to user provided toc header structure, 27482 * specifying the starting and ending track numbers. 27483 * flag - this argument is a pass through to ddi_copyxxx() 27484 * directly from the mode argument of ioctl(). 27485 * 27486 * Return Code: the code returned by sd_send_scsi_cmd() 27487 * EFAULT if ddi_copyxxx() fails 27488 * ENXIO if fail ddi_get_soft_state 27489 * EINVAL if data pointer is NULL 27490 */ 27491 27492 static int 27493 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27494 { 27495 struct sd_lun *un; 27496 struct uscsi_cmd *com; 27497 struct cdrom_tochdr toc_header; 27498 struct cdrom_tochdr *hdr = &toc_header; 27499 char cdb[CDB_GROUP1]; 27500 int rval; 27501 caddr_t buffer; 27502 27503 if (data == NULL) { 27504 return (EINVAL); 27505 } 27506 27507 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27508 (un->un_state == SD_STATE_OFFLINE)) { 27509 return (ENXIO); 27510 } 27511 27512 buffer = kmem_zalloc(4, KM_SLEEP); 27513 bzero(cdb, CDB_GROUP1); 27514 cdb[0] = SCMD_READ_TOC; 27515 /* 27516 * Specifying a track number of 0x00 in the READ TOC command indicates 27517 * that the TOC header should be returned 27518 */ 27519 cdb[6] = 0x00; 27520 /* 27521 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27522 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27523 */ 27524 cdb[8] = 0x04; 27525 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27526 com->uscsi_cdb = cdb; 27527 com->uscsi_cdblen = CDB_GROUP1; 27528 com->uscsi_bufaddr = buffer; 27529 com->uscsi_buflen = 0x04; 27530 com->uscsi_timeout = 300; 27531 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27532 27533 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27534 SD_PATH_STANDARD); 27535 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27536 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27537 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27538 } else { 27539 hdr->cdth_trk0 = buffer[2]; 27540 hdr->cdth_trk1 = buffer[3]; 27541 } 27542 kmem_free(buffer, 4); 27543 kmem_free(com, sizeof (*com)); 27544 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27545 return (EFAULT); 27546 } 27547 return (rval); 27548 } 27549 27550 27551 /* 27552 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27553 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27554 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27555 * digital audio and extended architecture digital audio. These modes are 27556 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27557 * MMC specs. 27558 * 27559 * In addition to support for the various data formats these routines also 27560 * include support for devices that implement only the direct access READ 27561 * commands (0x08, 0x28), devices that implement the READ_CD commands 27562 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27563 * READ CDXA commands (0xD8, 0xDB) 27564 */ 27565 27566 /* 27567 * Function: sr_read_mode1() 27568 * 27569 * Description: This routine is the driver entry point for handling CD-ROM 27570 * ioctl read mode1 requests (CDROMREADMODE1). 27571 * 27572 * Arguments: dev - the device 'dev_t' 27573 * data - pointer to user provided cd read structure specifying 27574 * the lba buffer address and length. 27575 * flag - this argument is a pass through to ddi_copyxxx() 27576 * directly from the mode argument of ioctl(). 27577 * 27578 * Return Code: the code returned by sd_send_scsi_cmd() 27579 * EFAULT if ddi_copyxxx() fails 27580 * ENXIO if fail ddi_get_soft_state 27581 * EINVAL if data pointer is NULL 27582 */ 27583 27584 static int 27585 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27586 { 27587 struct sd_lun *un; 27588 struct cdrom_read mode1_struct; 27589 struct cdrom_read *mode1 = &mode1_struct; 27590 int rval; 27591 sd_ssc_t *ssc; 27592 27593 #ifdef _MULTI_DATAMODEL 27594 /* To support ILP32 applications in an LP64 world */ 27595 struct cdrom_read32 cdrom_read32; 27596 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27597 #endif /* _MULTI_DATAMODEL */ 27598 27599 if (data == NULL) { 27600 return (EINVAL); 27601 } 27602 27603 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27604 (un->un_state == SD_STATE_OFFLINE)) { 27605 return (ENXIO); 27606 } 27607 27608 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27609 "sd_read_mode1: entry: un:0x%p\n", un); 27610 27611 #ifdef _MULTI_DATAMODEL 27612 switch (ddi_model_convert_from(flag & FMODELS)) { 27613 case DDI_MODEL_ILP32: 27614 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27615 return (EFAULT); 27616 } 27617 /* Convert the ILP32 uscsi data from the application to LP64 */ 27618 cdrom_read32tocdrom_read(cdrd32, mode1); 27619 break; 27620 case DDI_MODEL_NONE: 27621 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27622 return (EFAULT); 27623 } 27624 } 27625 #else /* ! _MULTI_DATAMODEL */ 27626 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27627 return (EFAULT); 27628 } 27629 #endif /* _MULTI_DATAMODEL */ 27630 27631 ssc = sd_ssc_init(un); 27632 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 27633 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27634 sd_ssc_fini(ssc); 27635 27636 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27637 "sd_read_mode1: exit: un:0x%p\n", un); 27638 27639 return (rval); 27640 } 27641 27642 27643 /* 27644 * Function: sr_read_cd_mode2() 27645 * 27646 * Description: This routine is the driver entry point for handling CD-ROM 27647 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27648 * support the READ CD (0xBE) command or the 1st generation 27649 * READ CD (0xD4) command. 27650 * 27651 * Arguments: dev - the device 'dev_t' 27652 * data - pointer to user provided cd read structure specifying 27653 * the lba buffer address and length. 27654 * flag - this argument is a pass through to ddi_copyxxx() 27655 * directly from the mode argument of ioctl(). 27656 * 27657 * Return Code: the code returned by sd_send_scsi_cmd() 27658 * EFAULT if ddi_copyxxx() fails 27659 * ENXIO if fail ddi_get_soft_state 27660 * EINVAL if data pointer is NULL 27661 */ 27662 27663 static int 27664 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27665 { 27666 struct sd_lun *un; 27667 struct uscsi_cmd *com; 27668 struct cdrom_read mode2_struct; 27669 struct cdrom_read *mode2 = &mode2_struct; 27670 uchar_t cdb[CDB_GROUP5]; 27671 int nblocks; 27672 int rval; 27673 #ifdef _MULTI_DATAMODEL 27674 /* To support ILP32 applications in an LP64 world */ 27675 struct cdrom_read32 cdrom_read32; 27676 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27677 #endif /* _MULTI_DATAMODEL */ 27678 27679 if (data == NULL) { 27680 return (EINVAL); 27681 } 27682 27683 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27684 (un->un_state == SD_STATE_OFFLINE)) { 27685 return (ENXIO); 27686 } 27687 27688 #ifdef _MULTI_DATAMODEL 27689 switch (ddi_model_convert_from(flag & FMODELS)) { 27690 case DDI_MODEL_ILP32: 27691 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27692 return (EFAULT); 27693 } 27694 /* Convert the ILP32 uscsi data from the application to LP64 */ 27695 cdrom_read32tocdrom_read(cdrd32, mode2); 27696 break; 27697 case DDI_MODEL_NONE: 27698 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27699 return (EFAULT); 27700 } 27701 break; 27702 } 27703 27704 #else /* ! _MULTI_DATAMODEL */ 27705 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27706 return (EFAULT); 27707 } 27708 #endif /* _MULTI_DATAMODEL */ 27709 27710 bzero(cdb, sizeof (cdb)); 27711 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27712 /* Read command supported by 1st generation atapi drives */ 27713 cdb[0] = SCMD_READ_CDD4; 27714 } else { 27715 /* Universal CD Access Command */ 27716 cdb[0] = SCMD_READ_CD; 27717 } 27718 27719 /* 27720 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27721 */ 27722 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27723 27724 /* set the start address */ 27725 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27726 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27727 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27728 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27729 27730 /* set the transfer length */ 27731 nblocks = mode2->cdread_buflen / 2336; 27732 cdb[6] = (uchar_t)(nblocks >> 16); 27733 cdb[7] = (uchar_t)(nblocks >> 8); 27734 cdb[8] = (uchar_t)nblocks; 27735 27736 /* set the filter bits */ 27737 cdb[9] = CDROM_READ_CD_USERDATA; 27738 27739 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27740 com->uscsi_cdb = (caddr_t)cdb; 27741 com->uscsi_cdblen = sizeof (cdb); 27742 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27743 com->uscsi_buflen = mode2->cdread_buflen; 27744 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27745 27746 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27747 SD_PATH_STANDARD); 27748 kmem_free(com, sizeof (*com)); 27749 return (rval); 27750 } 27751 27752 27753 /* 27754 * Function: sr_read_mode2() 27755 * 27756 * Description: This routine is the driver entry point for handling CD-ROM 27757 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27758 * do not support the READ CD (0xBE) command. 27759 * 27760 * Arguments: dev - the device 'dev_t' 27761 * data - pointer to user provided cd read structure specifying 27762 * the lba buffer address and length. 27763 * flag - this argument is a pass through to ddi_copyxxx() 27764 * directly from the mode argument of ioctl(). 27765 * 27766 * Return Code: the code returned by sd_send_scsi_cmd() 27767 * EFAULT if ddi_copyxxx() fails 27768 * ENXIO if fail ddi_get_soft_state 27769 * EINVAL if data pointer is NULL 27770 * EIO if fail to reset block size 27771 * EAGAIN if commands are in progress in the driver 27772 */ 27773 27774 static int 27775 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27776 { 27777 struct sd_lun *un; 27778 struct cdrom_read mode2_struct; 27779 struct cdrom_read *mode2 = &mode2_struct; 27780 int rval; 27781 uint32_t restore_blksize; 27782 struct uscsi_cmd *com; 27783 uchar_t cdb[CDB_GROUP0]; 27784 int nblocks; 27785 27786 #ifdef _MULTI_DATAMODEL 27787 /* To support ILP32 applications in an LP64 world */ 27788 struct cdrom_read32 cdrom_read32; 27789 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27790 #endif /* _MULTI_DATAMODEL */ 27791 27792 if (data == NULL) { 27793 return (EINVAL); 27794 } 27795 27796 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27797 (un->un_state == SD_STATE_OFFLINE)) { 27798 return (ENXIO); 27799 } 27800 27801 /* 27802 * Because this routine will update the device and driver block size 27803 * being used we want to make sure there are no commands in progress. 27804 * If commands are in progress the user will have to try again. 27805 * 27806 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27807 * in sdioctl to protect commands from sdioctl through to the top of 27808 * sd_uscsi_strategy. See sdioctl for details. 27809 */ 27810 mutex_enter(SD_MUTEX(un)); 27811 if (un->un_ncmds_in_driver != 1) { 27812 mutex_exit(SD_MUTEX(un)); 27813 return (EAGAIN); 27814 } 27815 mutex_exit(SD_MUTEX(un)); 27816 27817 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27818 "sd_read_mode2: entry: un:0x%p\n", un); 27819 27820 #ifdef _MULTI_DATAMODEL 27821 switch (ddi_model_convert_from(flag & FMODELS)) { 27822 case DDI_MODEL_ILP32: 27823 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27824 return (EFAULT); 27825 } 27826 /* Convert the ILP32 uscsi data from the application to LP64 */ 27827 cdrom_read32tocdrom_read(cdrd32, mode2); 27828 break; 27829 case DDI_MODEL_NONE: 27830 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27831 return (EFAULT); 27832 } 27833 break; 27834 } 27835 #else /* ! _MULTI_DATAMODEL */ 27836 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27837 return (EFAULT); 27838 } 27839 #endif /* _MULTI_DATAMODEL */ 27840 27841 /* Store the current target block size for restoration later */ 27842 restore_blksize = un->un_tgt_blocksize; 27843 27844 /* Change the device and soft state target block size to 2336 */ 27845 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27846 rval = EIO; 27847 goto done; 27848 } 27849 27850 27851 bzero(cdb, sizeof (cdb)); 27852 27853 /* set READ operation */ 27854 cdb[0] = SCMD_READ; 27855 27856 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27857 mode2->cdread_lba >>= 2; 27858 27859 /* set the start address */ 27860 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27861 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27862 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27863 27864 /* set the transfer length */ 27865 nblocks = mode2->cdread_buflen / 2336; 27866 cdb[4] = (uchar_t)nblocks & 0xFF; 27867 27868 /* build command */ 27869 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27870 com->uscsi_cdb = (caddr_t)cdb; 27871 com->uscsi_cdblen = sizeof (cdb); 27872 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27873 com->uscsi_buflen = mode2->cdread_buflen; 27874 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27875 27876 /* 27877 * Issue SCSI command with user space address for read buffer. 27878 * 27879 * This sends the command through main channel in the driver. 27880 * 27881 * Since this is accessed via an IOCTL call, we go through the 27882 * standard path, so that if the device was powered down, then 27883 * it would be 'awakened' to handle the command. 27884 */ 27885 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27886 SD_PATH_STANDARD); 27887 27888 kmem_free(com, sizeof (*com)); 27889 27890 /* Restore the device and soft state target block size */ 27891 if (sr_sector_mode(dev, restore_blksize) != 0) { 27892 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27893 "can't do switch back to mode 1\n"); 27894 /* 27895 * If sd_send_scsi_READ succeeded we still need to report 27896 * an error because we failed to reset the block size 27897 */ 27898 if (rval == 0) { 27899 rval = EIO; 27900 } 27901 } 27902 27903 done: 27904 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27905 "sd_read_mode2: exit: un:0x%p\n", un); 27906 27907 return (rval); 27908 } 27909 27910 27911 /* 27912 * Function: sr_sector_mode() 27913 * 27914 * Description: This utility function is used by sr_read_mode2 to set the target 27915 * block size based on the user specified size. This is a legacy 27916 * implementation based upon a vendor specific mode page 27917 * 27918 * Arguments: dev - the device 'dev_t' 27919 * data - flag indicating if block size is being set to 2336 or 27920 * 512. 27921 * 27922 * Return Code: the code returned by sd_send_scsi_cmd() 27923 * EFAULT if ddi_copyxxx() fails 27924 * ENXIO if fail ddi_get_soft_state 27925 * EINVAL if data pointer is NULL 27926 */ 27927 27928 static int 27929 sr_sector_mode(dev_t dev, uint32_t blksize) 27930 { 27931 struct sd_lun *un; 27932 uchar_t *sense; 27933 uchar_t *select; 27934 int rval; 27935 sd_ssc_t *ssc; 27936 27937 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27938 (un->un_state == SD_STATE_OFFLINE)) { 27939 return (ENXIO); 27940 } 27941 27942 sense = kmem_zalloc(20, KM_SLEEP); 27943 27944 /* Note: This is a vendor specific mode page (0x81) */ 27945 ssc = sd_ssc_init(un); 27946 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27947 SD_PATH_STANDARD); 27948 sd_ssc_fini(ssc); 27949 if (rval != 0) { 27950 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27951 "sr_sector_mode: Mode Sense failed\n"); 27952 kmem_free(sense, 20); 27953 return (rval); 27954 } 27955 select = kmem_zalloc(20, KM_SLEEP); 27956 select[3] = 0x08; 27957 select[10] = ((blksize >> 8) & 0xff); 27958 select[11] = (blksize & 0xff); 27959 select[12] = 0x01; 27960 select[13] = 0x06; 27961 select[14] = sense[14]; 27962 select[15] = sense[15]; 27963 if (blksize == SD_MODE2_BLKSIZE) { 27964 select[14] |= 0x01; 27965 } 27966 27967 ssc = sd_ssc_init(un); 27968 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27969 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27970 sd_ssc_fini(ssc); 27971 if (rval != 0) { 27972 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27973 "sr_sector_mode: Mode Select failed\n"); 27974 } else { 27975 /* 27976 * Only update the softstate block size if we successfully 27977 * changed the device block mode. 27978 */ 27979 mutex_enter(SD_MUTEX(un)); 27980 sd_update_block_info(un, blksize, 0); 27981 mutex_exit(SD_MUTEX(un)); 27982 } 27983 kmem_free(sense, 20); 27984 kmem_free(select, 20); 27985 return (rval); 27986 } 27987 27988 27989 /* 27990 * Function: sr_read_cdda() 27991 * 27992 * Description: This routine is the driver entry point for handling CD-ROM 27993 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27994 * the target supports CDDA these requests are handled via a vendor 27995 * specific command (0xD8) If the target does not support CDDA 27996 * these requests are handled via the READ CD command (0xBE). 27997 * 27998 * Arguments: dev - the device 'dev_t' 27999 * data - pointer to user provided CD-DA structure specifying 28000 * the track starting address, transfer length, and 28001 * subcode options. 28002 * flag - this argument is a pass through to ddi_copyxxx() 28003 * directly from the mode argument of ioctl(). 28004 * 28005 * Return Code: the code returned by sd_send_scsi_cmd() 28006 * EFAULT if ddi_copyxxx() fails 28007 * ENXIO if fail ddi_get_soft_state 28008 * EINVAL if invalid arguments are provided 28009 * ENOTTY 28010 */ 28011 28012 static int 28013 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28014 { 28015 struct sd_lun *un; 28016 struct uscsi_cmd *com; 28017 struct cdrom_cdda *cdda; 28018 int rval; 28019 size_t buflen; 28020 char cdb[CDB_GROUP5]; 28021 28022 #ifdef _MULTI_DATAMODEL 28023 /* To support ILP32 applications in an LP64 world */ 28024 struct cdrom_cdda32 cdrom_cdda32; 28025 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28026 #endif /* _MULTI_DATAMODEL */ 28027 28028 if (data == NULL) { 28029 return (EINVAL); 28030 } 28031 28032 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28033 return (ENXIO); 28034 } 28035 28036 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28037 28038 #ifdef _MULTI_DATAMODEL 28039 switch (ddi_model_convert_from(flag & FMODELS)) { 28040 case DDI_MODEL_ILP32: 28041 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28042 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28043 "sr_read_cdda: ddi_copyin Failed\n"); 28044 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28045 return (EFAULT); 28046 } 28047 /* Convert the ILP32 uscsi data from the application to LP64 */ 28048 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28049 break; 28050 case DDI_MODEL_NONE: 28051 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28052 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28053 "sr_read_cdda: ddi_copyin Failed\n"); 28054 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28055 return (EFAULT); 28056 } 28057 break; 28058 } 28059 #else /* ! _MULTI_DATAMODEL */ 28060 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28061 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28062 "sr_read_cdda: ddi_copyin Failed\n"); 28063 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28064 return (EFAULT); 28065 } 28066 #endif /* _MULTI_DATAMODEL */ 28067 28068 /* 28069 * Since MMC-2 expects max 3 bytes for length, check if the 28070 * length input is greater than 3 bytes 28071 */ 28072 if ((cdda->cdda_length & 0xFF000000) != 0) { 28073 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28074 "cdrom transfer length too large: %d (limit %d)\n", 28075 cdda->cdda_length, 0xFFFFFF); 28076 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28077 return (EINVAL); 28078 } 28079 28080 switch (cdda->cdda_subcode) { 28081 case CDROM_DA_NO_SUBCODE: 28082 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28083 break; 28084 case CDROM_DA_SUBQ: 28085 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28086 break; 28087 case CDROM_DA_ALL_SUBCODE: 28088 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28089 break; 28090 case CDROM_DA_SUBCODE_ONLY: 28091 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28092 break; 28093 default: 28094 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28095 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28096 cdda->cdda_subcode); 28097 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28098 return (EINVAL); 28099 } 28100 28101 /* Build and send the command */ 28102 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28103 bzero(cdb, CDB_GROUP5); 28104 28105 if (un->un_f_cfg_cdda == TRUE) { 28106 cdb[0] = (char)SCMD_READ_CD; 28107 cdb[1] = 0x04; 28108 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28109 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28110 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28111 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28112 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28113 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28114 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28115 cdb[9] = 0x10; 28116 switch (cdda->cdda_subcode) { 28117 case CDROM_DA_NO_SUBCODE : 28118 cdb[10] = 0x0; 28119 break; 28120 case CDROM_DA_SUBQ : 28121 cdb[10] = 0x2; 28122 break; 28123 case CDROM_DA_ALL_SUBCODE : 28124 cdb[10] = 0x1; 28125 break; 28126 case CDROM_DA_SUBCODE_ONLY : 28127 /* FALLTHROUGH */ 28128 default : 28129 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28130 kmem_free(com, sizeof (*com)); 28131 return (ENOTTY); 28132 } 28133 } else { 28134 cdb[0] = (char)SCMD_READ_CDDA; 28135 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28136 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28137 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28138 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28139 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28140 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28141 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28142 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28143 cdb[10] = cdda->cdda_subcode; 28144 } 28145 28146 com->uscsi_cdb = cdb; 28147 com->uscsi_cdblen = CDB_GROUP5; 28148 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28149 com->uscsi_buflen = buflen; 28150 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28151 28152 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28153 SD_PATH_STANDARD); 28154 28155 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28156 kmem_free(com, sizeof (*com)); 28157 return (rval); 28158 } 28159 28160 28161 /* 28162 * Function: sr_read_cdxa() 28163 * 28164 * Description: This routine is the driver entry point for handling CD-ROM 28165 * ioctl requests to return CD-XA (Extended Architecture) data. 28166 * (CDROMCDXA). 28167 * 28168 * Arguments: dev - the device 'dev_t' 28169 * data - pointer to user provided CD-XA structure specifying 28170 * the data starting address, transfer length, and format 28171 * flag - this argument is a pass through to ddi_copyxxx() 28172 * directly from the mode argument of ioctl(). 28173 * 28174 * Return Code: the code returned by sd_send_scsi_cmd() 28175 * EFAULT if ddi_copyxxx() fails 28176 * ENXIO if fail ddi_get_soft_state 28177 * EINVAL if data pointer is NULL 28178 */ 28179 28180 static int 28181 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28182 { 28183 struct sd_lun *un; 28184 struct uscsi_cmd *com; 28185 struct cdrom_cdxa *cdxa; 28186 int rval; 28187 size_t buflen; 28188 char cdb[CDB_GROUP5]; 28189 uchar_t read_flags; 28190 28191 #ifdef _MULTI_DATAMODEL 28192 /* To support ILP32 applications in an LP64 world */ 28193 struct cdrom_cdxa32 cdrom_cdxa32; 28194 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28195 #endif /* _MULTI_DATAMODEL */ 28196 28197 if (data == NULL) { 28198 return (EINVAL); 28199 } 28200 28201 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28202 return (ENXIO); 28203 } 28204 28205 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28206 28207 #ifdef _MULTI_DATAMODEL 28208 switch (ddi_model_convert_from(flag & FMODELS)) { 28209 case DDI_MODEL_ILP32: 28210 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28211 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28212 return (EFAULT); 28213 } 28214 /* 28215 * Convert the ILP32 uscsi data from the 28216 * application to LP64 for internal use. 28217 */ 28218 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28219 break; 28220 case DDI_MODEL_NONE: 28221 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28222 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28223 return (EFAULT); 28224 } 28225 break; 28226 } 28227 #else /* ! _MULTI_DATAMODEL */ 28228 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28229 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28230 return (EFAULT); 28231 } 28232 #endif /* _MULTI_DATAMODEL */ 28233 28234 /* 28235 * Since MMC-2 expects max 3 bytes for length, check if the 28236 * length input is greater than 3 bytes 28237 */ 28238 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28239 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28240 "cdrom transfer length too large: %d (limit %d)\n", 28241 cdxa->cdxa_length, 0xFFFFFF); 28242 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28243 return (EINVAL); 28244 } 28245 28246 switch (cdxa->cdxa_format) { 28247 case CDROM_XA_DATA: 28248 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28249 read_flags = 0x10; 28250 break; 28251 case CDROM_XA_SECTOR_DATA: 28252 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28253 read_flags = 0xf8; 28254 break; 28255 case CDROM_XA_DATA_W_ERROR: 28256 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28257 read_flags = 0xfc; 28258 break; 28259 default: 28260 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28261 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28262 cdxa->cdxa_format); 28263 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28264 return (EINVAL); 28265 } 28266 28267 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28268 bzero(cdb, CDB_GROUP5); 28269 if (un->un_f_mmc_cap == TRUE) { 28270 cdb[0] = (char)SCMD_READ_CD; 28271 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28272 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28273 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28274 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28275 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28276 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28277 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28278 cdb[9] = (char)read_flags; 28279 } else { 28280 /* 28281 * Note: A vendor specific command (0xDB) is being used her to 28282 * request a read of all subcodes. 28283 */ 28284 cdb[0] = (char)SCMD_READ_CDXA; 28285 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28286 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28287 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28288 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28289 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28290 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28291 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28292 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28293 cdb[10] = cdxa->cdxa_format; 28294 } 28295 com->uscsi_cdb = cdb; 28296 com->uscsi_cdblen = CDB_GROUP5; 28297 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28298 com->uscsi_buflen = buflen; 28299 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28300 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28301 SD_PATH_STANDARD); 28302 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28303 kmem_free(com, sizeof (*com)); 28304 return (rval); 28305 } 28306 28307 28308 /* 28309 * Function: sr_eject() 28310 * 28311 * Description: This routine is the driver entry point for handling CD-ROM 28312 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28313 * 28314 * Arguments: dev - the device 'dev_t' 28315 * 28316 * Return Code: the code returned by sd_send_scsi_cmd() 28317 */ 28318 28319 static int 28320 sr_eject(dev_t dev) 28321 { 28322 struct sd_lun *un; 28323 int rval; 28324 sd_ssc_t *ssc; 28325 28326 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28327 (un->un_state == SD_STATE_OFFLINE)) { 28328 return (ENXIO); 28329 } 28330 28331 /* 28332 * To prevent race conditions with the eject 28333 * command, keep track of an eject command as 28334 * it progresses. If we are already handling 28335 * an eject command in the driver for the given 28336 * unit and another request to eject is received 28337 * immediately return EAGAIN so we don't lose 28338 * the command if the current eject command fails. 28339 */ 28340 mutex_enter(SD_MUTEX(un)); 28341 if (un->un_f_ejecting == TRUE) { 28342 mutex_exit(SD_MUTEX(un)); 28343 return (EAGAIN); 28344 } 28345 un->un_f_ejecting = TRUE; 28346 mutex_exit(SD_MUTEX(un)); 28347 28348 ssc = sd_ssc_init(un); 28349 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28350 SD_PATH_STANDARD); 28351 sd_ssc_fini(ssc); 28352 28353 if (rval != 0) { 28354 mutex_enter(SD_MUTEX(un)); 28355 un->un_f_ejecting = FALSE; 28356 mutex_exit(SD_MUTEX(un)); 28357 return (rval); 28358 } 28359 28360 ssc = sd_ssc_init(un); 28361 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 28362 SD_PATH_STANDARD); 28363 sd_ssc_fini(ssc); 28364 28365 if (rval == 0) { 28366 mutex_enter(SD_MUTEX(un)); 28367 sr_ejected(un); 28368 un->un_mediastate = DKIO_EJECTED; 28369 un->un_f_ejecting = FALSE; 28370 cv_broadcast(&un->un_state_cv); 28371 mutex_exit(SD_MUTEX(un)); 28372 } else { 28373 mutex_enter(SD_MUTEX(un)); 28374 un->un_f_ejecting = FALSE; 28375 mutex_exit(SD_MUTEX(un)); 28376 } 28377 return (rval); 28378 } 28379 28380 28381 /* 28382 * Function: sr_ejected() 28383 * 28384 * Description: This routine updates the soft state structure to invalidate the 28385 * geometry information after the media has been ejected or a 28386 * media eject has been detected. 28387 * 28388 * Arguments: un - driver soft state (unit) structure 28389 */ 28390 28391 static void 28392 sr_ejected(struct sd_lun *un) 28393 { 28394 struct sd_errstats *stp; 28395 28396 ASSERT(un != NULL); 28397 ASSERT(mutex_owned(SD_MUTEX(un))); 28398 28399 un->un_f_blockcount_is_valid = FALSE; 28400 un->un_f_tgt_blocksize_is_valid = FALSE; 28401 mutex_exit(SD_MUTEX(un)); 28402 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28403 mutex_enter(SD_MUTEX(un)); 28404 28405 if (un->un_errstats != NULL) { 28406 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28407 stp->sd_capacity.value.ui64 = 0; 28408 } 28409 } 28410 28411 28412 /* 28413 * Function: sr_check_wp() 28414 * 28415 * Description: This routine checks the write protection of a removable 28416 * media disk and hotpluggable devices via the write protect bit of 28417 * the Mode Page Header device specific field. Some devices choke 28418 * on unsupported mode page. In order to workaround this issue, 28419 * this routine has been implemented to use 0x3f mode page(request 28420 * for all pages) for all device types. 28421 * 28422 * Arguments: dev - the device 'dev_t' 28423 * 28424 * Return Code: int indicating if the device is write protected (1) or not (0) 28425 * 28426 * Context: Kernel thread. 28427 * 28428 */ 28429 28430 static int 28431 sr_check_wp(dev_t dev) 28432 { 28433 struct sd_lun *un; 28434 uchar_t device_specific; 28435 uchar_t *sense; 28436 int hdrlen; 28437 int rval = FALSE; 28438 int status; 28439 sd_ssc_t *ssc; 28440 28441 /* 28442 * Note: The return codes for this routine should be reworked to 28443 * properly handle the case of a NULL softstate. 28444 */ 28445 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28446 return (FALSE); 28447 } 28448 28449 if (un->un_f_cfg_is_atapi == TRUE) { 28450 /* 28451 * The mode page contents are not required; set the allocation 28452 * length for the mode page header only 28453 */ 28454 hdrlen = MODE_HEADER_LENGTH_GRP2; 28455 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28456 ssc = sd_ssc_init(un); 28457 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28458 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28459 sd_ssc_fini(ssc); 28460 if (status != 0) 28461 goto err_exit; 28462 device_specific = 28463 ((struct mode_header_grp2 *)sense)->device_specific; 28464 } else { 28465 hdrlen = MODE_HEADER_LENGTH; 28466 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28467 ssc = sd_ssc_init(un); 28468 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28469 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28470 sd_ssc_fini(ssc); 28471 if (status != 0) 28472 goto err_exit; 28473 device_specific = 28474 ((struct mode_header *)sense)->device_specific; 28475 } 28476 28477 28478 /* 28479 * Write protect mode sense failed; not all disks 28480 * understand this query. Return FALSE assuming that 28481 * these devices are not writable. 28482 */ 28483 if (device_specific & WRITE_PROTECT) { 28484 rval = TRUE; 28485 } 28486 28487 err_exit: 28488 kmem_free(sense, hdrlen); 28489 return (rval); 28490 } 28491 28492 /* 28493 * Function: sr_volume_ctrl() 28494 * 28495 * Description: This routine is the driver entry point for handling CD-ROM 28496 * audio output volume ioctl requests. (CDROMVOLCTRL) 28497 * 28498 * Arguments: dev - the device 'dev_t' 28499 * data - pointer to user audio volume control structure 28500 * flag - this argument is a pass through to ddi_copyxxx() 28501 * directly from the mode argument of ioctl(). 28502 * 28503 * Return Code: the code returned by sd_send_scsi_cmd() 28504 * EFAULT if ddi_copyxxx() fails 28505 * ENXIO if fail ddi_get_soft_state 28506 * EINVAL if data pointer is NULL 28507 * 28508 */ 28509 28510 static int 28511 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28512 { 28513 struct sd_lun *un; 28514 struct cdrom_volctrl volume; 28515 struct cdrom_volctrl *vol = &volume; 28516 uchar_t *sense_page; 28517 uchar_t *select_page; 28518 uchar_t *sense; 28519 uchar_t *select; 28520 int sense_buflen; 28521 int select_buflen; 28522 int rval; 28523 sd_ssc_t *ssc; 28524 28525 if (data == NULL) { 28526 return (EINVAL); 28527 } 28528 28529 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28530 (un->un_state == SD_STATE_OFFLINE)) { 28531 return (ENXIO); 28532 } 28533 28534 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28535 return (EFAULT); 28536 } 28537 28538 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28539 struct mode_header_grp2 *sense_mhp; 28540 struct mode_header_grp2 *select_mhp; 28541 int bd_len; 28542 28543 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28544 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28545 MODEPAGE_AUDIO_CTRL_LEN; 28546 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28547 select = kmem_zalloc(select_buflen, KM_SLEEP); 28548 ssc = sd_ssc_init(un); 28549 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 28550 sense_buflen, MODEPAGE_AUDIO_CTRL, 28551 SD_PATH_STANDARD); 28552 sd_ssc_fini(ssc); 28553 28554 if (rval != 0) { 28555 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28556 "sr_volume_ctrl: Mode Sense Failed\n"); 28557 kmem_free(sense, sense_buflen); 28558 kmem_free(select, select_buflen); 28559 return (rval); 28560 } 28561 sense_mhp = (struct mode_header_grp2 *)sense; 28562 select_mhp = (struct mode_header_grp2 *)select; 28563 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28564 sense_mhp->bdesc_length_lo; 28565 if (bd_len > MODE_BLK_DESC_LENGTH) { 28566 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28567 "sr_volume_ctrl: Mode Sense returned invalid " 28568 "block descriptor length\n"); 28569 kmem_free(sense, sense_buflen); 28570 kmem_free(select, select_buflen); 28571 return (EIO); 28572 } 28573 sense_page = (uchar_t *) 28574 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28575 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28576 select_mhp->length_msb = 0; 28577 select_mhp->length_lsb = 0; 28578 select_mhp->bdesc_length_hi = 0; 28579 select_mhp->bdesc_length_lo = 0; 28580 } else { 28581 struct mode_header *sense_mhp, *select_mhp; 28582 28583 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28584 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28585 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28586 select = kmem_zalloc(select_buflen, KM_SLEEP); 28587 ssc = sd_ssc_init(un); 28588 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 28589 sense_buflen, MODEPAGE_AUDIO_CTRL, 28590 SD_PATH_STANDARD); 28591 sd_ssc_fini(ssc); 28592 28593 if (rval != 0) { 28594 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28595 "sr_volume_ctrl: Mode Sense Failed\n"); 28596 kmem_free(sense, sense_buflen); 28597 kmem_free(select, select_buflen); 28598 return (rval); 28599 } 28600 sense_mhp = (struct mode_header *)sense; 28601 select_mhp = (struct mode_header *)select; 28602 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28603 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28604 "sr_volume_ctrl: Mode Sense returned invalid " 28605 "block descriptor length\n"); 28606 kmem_free(sense, sense_buflen); 28607 kmem_free(select, select_buflen); 28608 return (EIO); 28609 } 28610 sense_page = (uchar_t *) 28611 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28612 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28613 select_mhp->length = 0; 28614 select_mhp->bdesc_length = 0; 28615 } 28616 /* 28617 * Note: An audio control data structure could be created and overlayed 28618 * on the following in place of the array indexing method implemented. 28619 */ 28620 28621 /* Build the select data for the user volume data */ 28622 select_page[0] = MODEPAGE_AUDIO_CTRL; 28623 select_page[1] = 0xE; 28624 /* Set the immediate bit */ 28625 select_page[2] = 0x04; 28626 /* Zero out reserved fields */ 28627 select_page[3] = 0x00; 28628 select_page[4] = 0x00; 28629 /* Return sense data for fields not to be modified */ 28630 select_page[5] = sense_page[5]; 28631 select_page[6] = sense_page[6]; 28632 select_page[7] = sense_page[7]; 28633 /* Set the user specified volume levels for channel 0 and 1 */ 28634 select_page[8] = 0x01; 28635 select_page[9] = vol->channel0; 28636 select_page[10] = 0x02; 28637 select_page[11] = vol->channel1; 28638 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28639 select_page[12] = sense_page[12]; 28640 select_page[13] = sense_page[13]; 28641 select_page[14] = sense_page[14]; 28642 select_page[15] = sense_page[15]; 28643 28644 ssc = sd_ssc_init(un); 28645 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28646 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 28647 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28648 } else { 28649 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 28650 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28651 } 28652 sd_ssc_fini(ssc); 28653 28654 kmem_free(sense, sense_buflen); 28655 kmem_free(select, select_buflen); 28656 return (rval); 28657 } 28658 28659 28660 /* 28661 * Function: sr_read_sony_session_offset() 28662 * 28663 * Description: This routine is the driver entry point for handling CD-ROM 28664 * ioctl requests for session offset information. (CDROMREADOFFSET) 28665 * The address of the first track in the last session of a 28666 * multi-session CD-ROM is returned 28667 * 28668 * Note: This routine uses a vendor specific key value in the 28669 * command control field without implementing any vendor check here 28670 * or in the ioctl routine. 28671 * 28672 * Arguments: dev - the device 'dev_t' 28673 * data - pointer to an int to hold the requested address 28674 * flag - this argument is a pass through to ddi_copyxxx() 28675 * directly from the mode argument of ioctl(). 28676 * 28677 * Return Code: the code returned by sd_send_scsi_cmd() 28678 * EFAULT if ddi_copyxxx() fails 28679 * ENXIO if fail ddi_get_soft_state 28680 * EINVAL if data pointer is NULL 28681 */ 28682 28683 static int 28684 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28685 { 28686 struct sd_lun *un; 28687 struct uscsi_cmd *com; 28688 caddr_t buffer; 28689 char cdb[CDB_GROUP1]; 28690 int session_offset = 0; 28691 int rval; 28692 28693 if (data == NULL) { 28694 return (EINVAL); 28695 } 28696 28697 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28698 (un->un_state == SD_STATE_OFFLINE)) { 28699 return (ENXIO); 28700 } 28701 28702 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28703 bzero(cdb, CDB_GROUP1); 28704 cdb[0] = SCMD_READ_TOC; 28705 /* 28706 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28707 * (4 byte TOC response header + 8 byte response data) 28708 */ 28709 cdb[8] = SONY_SESSION_OFFSET_LEN; 28710 /* Byte 9 is the control byte. A vendor specific value is used */ 28711 cdb[9] = SONY_SESSION_OFFSET_KEY; 28712 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28713 com->uscsi_cdb = cdb; 28714 com->uscsi_cdblen = CDB_GROUP1; 28715 com->uscsi_bufaddr = buffer; 28716 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28717 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28718 28719 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28720 SD_PATH_STANDARD); 28721 if (rval != 0) { 28722 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28723 kmem_free(com, sizeof (*com)); 28724 return (rval); 28725 } 28726 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28727 session_offset = 28728 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28729 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28730 /* 28731 * Offset returned offset in current lbasize block's. Convert to 28732 * 2k block's to return to the user 28733 */ 28734 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28735 session_offset >>= 2; 28736 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28737 session_offset >>= 1; 28738 } 28739 } 28740 28741 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28742 rval = EFAULT; 28743 } 28744 28745 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28746 kmem_free(com, sizeof (*com)); 28747 return (rval); 28748 } 28749 28750 28751 /* 28752 * Function: sd_wm_cache_constructor() 28753 * 28754 * Description: Cache Constructor for the wmap cache for the read/modify/write 28755 * devices. 28756 * 28757 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28758 * un - sd_lun structure for the device. 28759 * flag - the km flags passed to constructor 28760 * 28761 * Return Code: 0 on success. 28762 * -1 on failure. 28763 */ 28764 28765 /*ARGSUSED*/ 28766 static int 28767 sd_wm_cache_constructor(void *wm, void *un, int flags) 28768 { 28769 bzero(wm, sizeof (struct sd_w_map)); 28770 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28771 return (0); 28772 } 28773 28774 28775 /* 28776 * Function: sd_wm_cache_destructor() 28777 * 28778 * Description: Cache destructor for the wmap cache for the read/modify/write 28779 * devices. 28780 * 28781 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28782 * un - sd_lun structure for the device. 28783 */ 28784 /*ARGSUSED*/ 28785 static void 28786 sd_wm_cache_destructor(void *wm, void *un) 28787 { 28788 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28789 } 28790 28791 28792 /* 28793 * Function: sd_range_lock() 28794 * 28795 * Description: Lock the range of blocks specified as parameter to ensure 28796 * that read, modify write is atomic and no other i/o writes 28797 * to the same location. The range is specified in terms 28798 * of start and end blocks. Block numbers are the actual 28799 * media block numbers and not system. 28800 * 28801 * Arguments: un - sd_lun structure for the device. 28802 * startb - The starting block number 28803 * endb - The end block number 28804 * typ - type of i/o - simple/read_modify_write 28805 * 28806 * Return Code: wm - pointer to the wmap structure. 28807 * 28808 * Context: This routine can sleep. 28809 */ 28810 28811 static struct sd_w_map * 28812 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28813 { 28814 struct sd_w_map *wmp = NULL; 28815 struct sd_w_map *sl_wmp = NULL; 28816 struct sd_w_map *tmp_wmp; 28817 wm_state state = SD_WM_CHK_LIST; 28818 28819 28820 ASSERT(un != NULL); 28821 ASSERT(!mutex_owned(SD_MUTEX(un))); 28822 28823 mutex_enter(SD_MUTEX(un)); 28824 28825 while (state != SD_WM_DONE) { 28826 28827 switch (state) { 28828 case SD_WM_CHK_LIST: 28829 /* 28830 * This is the starting state. Check the wmap list 28831 * to see if the range is currently available. 28832 */ 28833 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28834 /* 28835 * If this is a simple write and no rmw 28836 * i/o is pending then try to lock the 28837 * range as the range should be available. 28838 */ 28839 state = SD_WM_LOCK_RANGE; 28840 } else { 28841 tmp_wmp = sd_get_range(un, startb, endb); 28842 if (tmp_wmp != NULL) { 28843 if ((wmp != NULL) && ONLIST(un, wmp)) { 28844 /* 28845 * Should not keep onlist wmps 28846 * while waiting this macro 28847 * will also do wmp = NULL; 28848 */ 28849 FREE_ONLIST_WMAP(un, wmp); 28850 } 28851 /* 28852 * sl_wmp is the wmap on which wait 28853 * is done, since the tmp_wmp points 28854 * to the inuse wmap, set sl_wmp to 28855 * tmp_wmp and change the state to sleep 28856 */ 28857 sl_wmp = tmp_wmp; 28858 state = SD_WM_WAIT_MAP; 28859 } else { 28860 state = SD_WM_LOCK_RANGE; 28861 } 28862 28863 } 28864 break; 28865 28866 case SD_WM_LOCK_RANGE: 28867 ASSERT(un->un_wm_cache); 28868 /* 28869 * The range need to be locked, try to get a wmap. 28870 * First attempt it with NO_SLEEP, want to avoid a sleep 28871 * if possible as we will have to release the sd mutex 28872 * if we have to sleep. 28873 */ 28874 if (wmp == NULL) 28875 wmp = kmem_cache_alloc(un->un_wm_cache, 28876 KM_NOSLEEP); 28877 if (wmp == NULL) { 28878 mutex_exit(SD_MUTEX(un)); 28879 _NOTE(DATA_READABLE_WITHOUT_LOCK 28880 (sd_lun::un_wm_cache)) 28881 wmp = kmem_cache_alloc(un->un_wm_cache, 28882 KM_SLEEP); 28883 mutex_enter(SD_MUTEX(un)); 28884 /* 28885 * we released the mutex so recheck and go to 28886 * check list state. 28887 */ 28888 state = SD_WM_CHK_LIST; 28889 } else { 28890 /* 28891 * We exit out of state machine since we 28892 * have the wmap. Do the housekeeping first. 28893 * place the wmap on the wmap list if it is not 28894 * on it already and then set the state to done. 28895 */ 28896 wmp->wm_start = startb; 28897 wmp->wm_end = endb; 28898 wmp->wm_flags = typ | SD_WM_BUSY; 28899 if (typ & SD_WTYPE_RMW) { 28900 un->un_rmw_count++; 28901 } 28902 /* 28903 * If not already on the list then link 28904 */ 28905 if (!ONLIST(un, wmp)) { 28906 wmp->wm_next = un->un_wm; 28907 wmp->wm_prev = NULL; 28908 if (wmp->wm_next) 28909 wmp->wm_next->wm_prev = wmp; 28910 un->un_wm = wmp; 28911 } 28912 state = SD_WM_DONE; 28913 } 28914 break; 28915 28916 case SD_WM_WAIT_MAP: 28917 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28918 /* 28919 * Wait is done on sl_wmp, which is set in the 28920 * check_list state. 28921 */ 28922 sl_wmp->wm_wanted_count++; 28923 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28924 sl_wmp->wm_wanted_count--; 28925 /* 28926 * We can reuse the memory from the completed sl_wmp 28927 * lock range for our new lock, but only if noone is 28928 * waiting for it. 28929 */ 28930 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28931 if (sl_wmp->wm_wanted_count == 0) { 28932 if (wmp != NULL) 28933 CHK_N_FREEWMP(un, wmp); 28934 wmp = sl_wmp; 28935 } 28936 sl_wmp = NULL; 28937 /* 28938 * After waking up, need to recheck for availability of 28939 * range. 28940 */ 28941 state = SD_WM_CHK_LIST; 28942 break; 28943 28944 default: 28945 panic("sd_range_lock: " 28946 "Unknown state %d in sd_range_lock", state); 28947 /*NOTREACHED*/ 28948 } /* switch(state) */ 28949 28950 } /* while(state != SD_WM_DONE) */ 28951 28952 mutex_exit(SD_MUTEX(un)); 28953 28954 ASSERT(wmp != NULL); 28955 28956 return (wmp); 28957 } 28958 28959 28960 /* 28961 * Function: sd_get_range() 28962 * 28963 * Description: Find if there any overlapping I/O to this one 28964 * Returns the write-map of 1st such I/O, NULL otherwise. 28965 * 28966 * Arguments: un - sd_lun structure for the device. 28967 * startb - The starting block number 28968 * endb - The end block number 28969 * 28970 * Return Code: wm - pointer to the wmap structure. 28971 */ 28972 28973 static struct sd_w_map * 28974 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28975 { 28976 struct sd_w_map *wmp; 28977 28978 ASSERT(un != NULL); 28979 28980 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28981 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28982 continue; 28983 } 28984 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28985 break; 28986 } 28987 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28988 break; 28989 } 28990 } 28991 28992 return (wmp); 28993 } 28994 28995 28996 /* 28997 * Function: sd_free_inlist_wmap() 28998 * 28999 * Description: Unlink and free a write map struct. 29000 * 29001 * Arguments: un - sd_lun structure for the device. 29002 * wmp - sd_w_map which needs to be unlinked. 29003 */ 29004 29005 static void 29006 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29007 { 29008 ASSERT(un != NULL); 29009 29010 if (un->un_wm == wmp) { 29011 un->un_wm = wmp->wm_next; 29012 } else { 29013 wmp->wm_prev->wm_next = wmp->wm_next; 29014 } 29015 29016 if (wmp->wm_next) { 29017 wmp->wm_next->wm_prev = wmp->wm_prev; 29018 } 29019 29020 wmp->wm_next = wmp->wm_prev = NULL; 29021 29022 kmem_cache_free(un->un_wm_cache, wmp); 29023 } 29024 29025 29026 /* 29027 * Function: sd_range_unlock() 29028 * 29029 * Description: Unlock the range locked by wm. 29030 * Free write map if nobody else is waiting on it. 29031 * 29032 * Arguments: un - sd_lun structure for the device. 29033 * wmp - sd_w_map which needs to be unlinked. 29034 */ 29035 29036 static void 29037 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29038 { 29039 ASSERT(un != NULL); 29040 ASSERT(wm != NULL); 29041 ASSERT(!mutex_owned(SD_MUTEX(un))); 29042 29043 mutex_enter(SD_MUTEX(un)); 29044 29045 if (wm->wm_flags & SD_WTYPE_RMW) { 29046 un->un_rmw_count--; 29047 } 29048 29049 if (wm->wm_wanted_count) { 29050 wm->wm_flags = 0; 29051 /* 29052 * Broadcast that the wmap is available now. 29053 */ 29054 cv_broadcast(&wm->wm_avail); 29055 } else { 29056 /* 29057 * If no one is waiting on the map, it should be free'ed. 29058 */ 29059 sd_free_inlist_wmap(un, wm); 29060 } 29061 29062 mutex_exit(SD_MUTEX(un)); 29063 } 29064 29065 29066 /* 29067 * Function: sd_read_modify_write_task 29068 * 29069 * Description: Called from a taskq thread to initiate the write phase of 29070 * a read-modify-write request. This is used for targets where 29071 * un->un_sys_blocksize != un->un_tgt_blocksize. 29072 * 29073 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29074 * 29075 * Context: Called under taskq thread context. 29076 */ 29077 29078 static void 29079 sd_read_modify_write_task(void *arg) 29080 { 29081 struct sd_mapblocksize_info *bsp; 29082 struct buf *bp; 29083 struct sd_xbuf *xp; 29084 struct sd_lun *un; 29085 29086 bp = arg; /* The bp is given in arg */ 29087 ASSERT(bp != NULL); 29088 29089 /* Get the pointer to the layer-private data struct */ 29090 xp = SD_GET_XBUF(bp); 29091 ASSERT(xp != NULL); 29092 bsp = xp->xb_private; 29093 ASSERT(bsp != NULL); 29094 29095 un = SD_GET_UN(bp); 29096 ASSERT(un != NULL); 29097 ASSERT(!mutex_owned(SD_MUTEX(un))); 29098 29099 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29100 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29101 29102 /* 29103 * This is the write phase of a read-modify-write request, called 29104 * under the context of a taskq thread in response to the completion 29105 * of the read portion of the rmw request completing under interrupt 29106 * context. The write request must be sent from here down the iostart 29107 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29108 * we use the layer index saved in the layer-private data area. 29109 */ 29110 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29111 29112 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29113 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29114 } 29115 29116 29117 /* 29118 * Function: sddump_do_read_of_rmw() 29119 * 29120 * Description: This routine will be called from sddump, If sddump is called 29121 * with an I/O which not aligned on device blocksize boundary 29122 * then the write has to be converted to read-modify-write. 29123 * Do the read part here in order to keep sddump simple. 29124 * Note - That the sd_mutex is held across the call to this 29125 * routine. 29126 * 29127 * Arguments: un - sd_lun 29128 * blkno - block number in terms of media block size. 29129 * nblk - number of blocks. 29130 * bpp - pointer to pointer to the buf structure. On return 29131 * from this function, *bpp points to the valid buffer 29132 * to which the write has to be done. 29133 * 29134 * Return Code: 0 for success or errno-type return code 29135 */ 29136 29137 static int 29138 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29139 struct buf **bpp) 29140 { 29141 int err; 29142 int i; 29143 int rval; 29144 struct buf *bp; 29145 struct scsi_pkt *pkt = NULL; 29146 uint32_t target_blocksize; 29147 29148 ASSERT(un != NULL); 29149 ASSERT(mutex_owned(SD_MUTEX(un))); 29150 29151 target_blocksize = un->un_tgt_blocksize; 29152 29153 mutex_exit(SD_MUTEX(un)); 29154 29155 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29156 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29157 if (bp == NULL) { 29158 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29159 "no resources for dumping; giving up"); 29160 err = ENOMEM; 29161 goto done; 29162 } 29163 29164 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29165 blkno, nblk); 29166 if (rval != 0) { 29167 scsi_free_consistent_buf(bp); 29168 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29169 "no resources for dumping; giving up"); 29170 err = ENOMEM; 29171 goto done; 29172 } 29173 29174 pkt->pkt_flags |= FLAG_NOINTR; 29175 29176 err = EIO; 29177 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29178 29179 /* 29180 * Scsi_poll returns 0 (success) if the command completes and 29181 * the status block is STATUS_GOOD. We should only check 29182 * errors if this condition is not true. Even then we should 29183 * send our own request sense packet only if we have a check 29184 * condition and auto request sense has not been performed by 29185 * the hba. 29186 */ 29187 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29188 29189 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29190 err = 0; 29191 break; 29192 } 29193 29194 /* 29195 * Check CMD_DEV_GONE 1st, give up if device is gone, 29196 * no need to read RQS data. 29197 */ 29198 if (pkt->pkt_reason == CMD_DEV_GONE) { 29199 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29200 "Error while dumping state with rmw..." 29201 "Device is gone\n"); 29202 break; 29203 } 29204 29205 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29206 SD_INFO(SD_LOG_DUMP, un, 29207 "sddump: read failed with CHECK, try # %d\n", i); 29208 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29209 (void) sd_send_polled_RQS(un); 29210 } 29211 29212 continue; 29213 } 29214 29215 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29216 int reset_retval = 0; 29217 29218 SD_INFO(SD_LOG_DUMP, un, 29219 "sddump: read failed with BUSY, try # %d\n", i); 29220 29221 if (un->un_f_lun_reset_enabled == TRUE) { 29222 reset_retval = scsi_reset(SD_ADDRESS(un), 29223 RESET_LUN); 29224 } 29225 if (reset_retval == 0) { 29226 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29227 } 29228 (void) sd_send_polled_RQS(un); 29229 29230 } else { 29231 SD_INFO(SD_LOG_DUMP, un, 29232 "sddump: read failed with 0x%x, try # %d\n", 29233 SD_GET_PKT_STATUS(pkt), i); 29234 mutex_enter(SD_MUTEX(un)); 29235 sd_reset_target(un, pkt); 29236 mutex_exit(SD_MUTEX(un)); 29237 } 29238 29239 /* 29240 * If we are not getting anywhere with lun/target resets, 29241 * let's reset the bus. 29242 */ 29243 if (i > SD_NDUMP_RETRIES/2) { 29244 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29245 (void) sd_send_polled_RQS(un); 29246 } 29247 29248 } 29249 scsi_destroy_pkt(pkt); 29250 29251 if (err != 0) { 29252 scsi_free_consistent_buf(bp); 29253 *bpp = NULL; 29254 } else { 29255 *bpp = bp; 29256 } 29257 29258 done: 29259 mutex_enter(SD_MUTEX(un)); 29260 return (err); 29261 } 29262 29263 29264 /* 29265 * Function: sd_failfast_flushq 29266 * 29267 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29268 * in b_flags and move them onto the failfast queue, then kick 29269 * off a thread to return all bp's on the failfast queue to 29270 * their owners with an error set. 29271 * 29272 * Arguments: un - pointer to the soft state struct for the instance. 29273 * 29274 * Context: may execute in interrupt context. 29275 */ 29276 29277 static void 29278 sd_failfast_flushq(struct sd_lun *un) 29279 { 29280 struct buf *bp; 29281 struct buf *next_waitq_bp; 29282 struct buf *prev_waitq_bp = NULL; 29283 29284 ASSERT(un != NULL); 29285 ASSERT(mutex_owned(SD_MUTEX(un))); 29286 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29287 ASSERT(un->un_failfast_bp == NULL); 29288 29289 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29290 "sd_failfast_flushq: entry: un:0x%p\n", un); 29291 29292 /* 29293 * Check if we should flush all bufs when entering failfast state, or 29294 * just those with B_FAILFAST set. 29295 */ 29296 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29297 /* 29298 * Move *all* bp's on the wait queue to the failfast flush 29299 * queue, including those that do NOT have B_FAILFAST set. 29300 */ 29301 if (un->un_failfast_headp == NULL) { 29302 ASSERT(un->un_failfast_tailp == NULL); 29303 un->un_failfast_headp = un->un_waitq_headp; 29304 } else { 29305 ASSERT(un->un_failfast_tailp != NULL); 29306 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29307 } 29308 29309 un->un_failfast_tailp = un->un_waitq_tailp; 29310 29311 /* update kstat for each bp moved out of the waitq */ 29312 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29313 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29314 } 29315 29316 /* empty the waitq */ 29317 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29318 29319 } else { 29320 /* 29321 * Go thru the wait queue, pick off all entries with 29322 * B_FAILFAST set, and move these onto the failfast queue. 29323 */ 29324 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29325 /* 29326 * Save the pointer to the next bp on the wait queue, 29327 * so we get to it on the next iteration of this loop. 29328 */ 29329 next_waitq_bp = bp->av_forw; 29330 29331 /* 29332 * If this bp from the wait queue does NOT have 29333 * B_FAILFAST set, just move on to the next element 29334 * in the wait queue. Note, this is the only place 29335 * where it is correct to set prev_waitq_bp. 29336 */ 29337 if ((bp->b_flags & B_FAILFAST) == 0) { 29338 prev_waitq_bp = bp; 29339 continue; 29340 } 29341 29342 /* 29343 * Remove the bp from the wait queue. 29344 */ 29345 if (bp == un->un_waitq_headp) { 29346 /* The bp is the first element of the waitq. */ 29347 un->un_waitq_headp = next_waitq_bp; 29348 if (un->un_waitq_headp == NULL) { 29349 /* The wait queue is now empty */ 29350 un->un_waitq_tailp = NULL; 29351 } 29352 } else { 29353 /* 29354 * The bp is either somewhere in the middle 29355 * or at the end of the wait queue. 29356 */ 29357 ASSERT(un->un_waitq_headp != NULL); 29358 ASSERT(prev_waitq_bp != NULL); 29359 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29360 == 0); 29361 if (bp == un->un_waitq_tailp) { 29362 /* bp is the last entry on the waitq. */ 29363 ASSERT(next_waitq_bp == NULL); 29364 un->un_waitq_tailp = prev_waitq_bp; 29365 } 29366 prev_waitq_bp->av_forw = next_waitq_bp; 29367 } 29368 bp->av_forw = NULL; 29369 29370 /* 29371 * update kstat since the bp is moved out of 29372 * the waitq 29373 */ 29374 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29375 29376 /* 29377 * Now put the bp onto the failfast queue. 29378 */ 29379 if (un->un_failfast_headp == NULL) { 29380 /* failfast queue is currently empty */ 29381 ASSERT(un->un_failfast_tailp == NULL); 29382 un->un_failfast_headp = 29383 un->un_failfast_tailp = bp; 29384 } else { 29385 /* Add the bp to the end of the failfast q */ 29386 ASSERT(un->un_failfast_tailp != NULL); 29387 ASSERT(un->un_failfast_tailp->b_flags & 29388 B_FAILFAST); 29389 un->un_failfast_tailp->av_forw = bp; 29390 un->un_failfast_tailp = bp; 29391 } 29392 } 29393 } 29394 29395 /* 29396 * Now return all bp's on the failfast queue to their owners. 29397 */ 29398 while ((bp = un->un_failfast_headp) != NULL) { 29399 29400 un->un_failfast_headp = bp->av_forw; 29401 if (un->un_failfast_headp == NULL) { 29402 un->un_failfast_tailp = NULL; 29403 } 29404 29405 /* 29406 * We want to return the bp with a failure error code, but 29407 * we do not want a call to sd_start_cmds() to occur here, 29408 * so use sd_return_failed_command_no_restart() instead of 29409 * sd_return_failed_command(). 29410 */ 29411 sd_return_failed_command_no_restart(un, bp, EIO); 29412 } 29413 29414 /* Flush the xbuf queues if required. */ 29415 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29416 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29417 } 29418 29419 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29420 "sd_failfast_flushq: exit: un:0x%p\n", un); 29421 } 29422 29423 29424 /* 29425 * Function: sd_failfast_flushq_callback 29426 * 29427 * Description: Return TRUE if the given bp meets the criteria for failfast 29428 * flushing. Used with ddi_xbuf_flushq(9F). 29429 * 29430 * Arguments: bp - ptr to buf struct to be examined. 29431 * 29432 * Context: Any 29433 */ 29434 29435 static int 29436 sd_failfast_flushq_callback(struct buf *bp) 29437 { 29438 /* 29439 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29440 * state is entered; OR (2) the given bp has B_FAILFAST set. 29441 */ 29442 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29443 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29444 } 29445 29446 29447 29448 /* 29449 * Function: sd_setup_next_xfer 29450 * 29451 * Description: Prepare next I/O operation using DMA_PARTIAL 29452 * 29453 */ 29454 29455 static int 29456 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29457 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29458 { 29459 ssize_t num_blks_not_xfered; 29460 daddr_t strt_blk_num; 29461 ssize_t bytes_not_xfered; 29462 int rval; 29463 29464 ASSERT(pkt->pkt_resid == 0); 29465 29466 /* 29467 * Calculate next block number and amount to be transferred. 29468 * 29469 * How much data NOT transfered to the HBA yet. 29470 */ 29471 bytes_not_xfered = xp->xb_dma_resid; 29472 29473 /* 29474 * figure how many blocks NOT transfered to the HBA yet. 29475 */ 29476 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29477 29478 /* 29479 * set starting block number to the end of what WAS transfered. 29480 */ 29481 strt_blk_num = xp->xb_blkno + 29482 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29483 29484 /* 29485 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29486 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29487 * the disk mutex here. 29488 */ 29489 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29490 strt_blk_num, num_blks_not_xfered); 29491 29492 if (rval == 0) { 29493 29494 /* 29495 * Success. 29496 * 29497 * Adjust things if there are still more blocks to be 29498 * transfered. 29499 */ 29500 xp->xb_dma_resid = pkt->pkt_resid; 29501 pkt->pkt_resid = 0; 29502 29503 return (1); 29504 } 29505 29506 /* 29507 * There's really only one possible return value from 29508 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29509 * returns NULL. 29510 */ 29511 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29512 29513 bp->b_resid = bp->b_bcount; 29514 bp->b_flags |= B_ERROR; 29515 29516 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29517 "Error setting up next portion of DMA transfer\n"); 29518 29519 return (0); 29520 } 29521 29522 /* 29523 * Function: sd_panic_for_res_conflict 29524 * 29525 * Description: Call panic with a string formatted with "Reservation Conflict" 29526 * and a human readable identifier indicating the SD instance 29527 * that experienced the reservation conflict. 29528 * 29529 * Arguments: un - pointer to the soft state struct for the instance. 29530 * 29531 * Context: may execute in interrupt context. 29532 */ 29533 29534 #define SD_RESV_CONFLICT_FMT_LEN 40 29535 void 29536 sd_panic_for_res_conflict(struct sd_lun *un) 29537 { 29538 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 29539 char path_str[MAXPATHLEN]; 29540 29541 (void) snprintf(panic_str, sizeof (panic_str), 29542 "Reservation Conflict\nDisk: %s", 29543 ddi_pathname(SD_DEVINFO(un), path_str)); 29544 29545 panic(panic_str); 29546 } 29547 29548 /* 29549 * Note: The following sd_faultinjection_ioctl( ) routines implement 29550 * driver support for handling fault injection for error analysis 29551 * causing faults in multiple layers of the driver. 29552 * 29553 */ 29554 29555 #ifdef SD_FAULT_INJECTION 29556 static uint_t sd_fault_injection_on = 0; 29557 29558 /* 29559 * Function: sd_faultinjection_ioctl() 29560 * 29561 * Description: This routine is the driver entry point for handling 29562 * faultinjection ioctls to inject errors into the 29563 * layer model 29564 * 29565 * Arguments: cmd - the ioctl cmd received 29566 * arg - the arguments from user and returns 29567 */ 29568 29569 static void 29570 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29571 29572 uint_t i = 0; 29573 uint_t rval; 29574 29575 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29576 29577 mutex_enter(SD_MUTEX(un)); 29578 29579 switch (cmd) { 29580 case SDIOCRUN: 29581 /* Allow pushed faults to be injected */ 29582 SD_INFO(SD_LOG_SDTEST, un, 29583 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29584 29585 sd_fault_injection_on = 1; 29586 29587 SD_INFO(SD_LOG_IOERR, un, 29588 "sd_faultinjection_ioctl: run finished\n"); 29589 break; 29590 29591 case SDIOCSTART: 29592 /* Start Injection Session */ 29593 SD_INFO(SD_LOG_SDTEST, un, 29594 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29595 29596 sd_fault_injection_on = 0; 29597 un->sd_injection_mask = 0xFFFFFFFF; 29598 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29599 un->sd_fi_fifo_pkt[i] = NULL; 29600 un->sd_fi_fifo_xb[i] = NULL; 29601 un->sd_fi_fifo_un[i] = NULL; 29602 un->sd_fi_fifo_arq[i] = NULL; 29603 } 29604 un->sd_fi_fifo_start = 0; 29605 un->sd_fi_fifo_end = 0; 29606 29607 mutex_enter(&(un->un_fi_mutex)); 29608 un->sd_fi_log[0] = '\0'; 29609 un->sd_fi_buf_len = 0; 29610 mutex_exit(&(un->un_fi_mutex)); 29611 29612 SD_INFO(SD_LOG_IOERR, un, 29613 "sd_faultinjection_ioctl: start finished\n"); 29614 break; 29615 29616 case SDIOCSTOP: 29617 /* Stop Injection Session */ 29618 SD_INFO(SD_LOG_SDTEST, un, 29619 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29620 sd_fault_injection_on = 0; 29621 un->sd_injection_mask = 0x0; 29622 29623 /* Empty stray or unuseds structs from fifo */ 29624 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29625 if (un->sd_fi_fifo_pkt[i] != NULL) { 29626 kmem_free(un->sd_fi_fifo_pkt[i], 29627 sizeof (struct sd_fi_pkt)); 29628 } 29629 if (un->sd_fi_fifo_xb[i] != NULL) { 29630 kmem_free(un->sd_fi_fifo_xb[i], 29631 sizeof (struct sd_fi_xb)); 29632 } 29633 if (un->sd_fi_fifo_un[i] != NULL) { 29634 kmem_free(un->sd_fi_fifo_un[i], 29635 sizeof (struct sd_fi_un)); 29636 } 29637 if (un->sd_fi_fifo_arq[i] != NULL) { 29638 kmem_free(un->sd_fi_fifo_arq[i], 29639 sizeof (struct sd_fi_arq)); 29640 } 29641 un->sd_fi_fifo_pkt[i] = NULL; 29642 un->sd_fi_fifo_un[i] = NULL; 29643 un->sd_fi_fifo_xb[i] = NULL; 29644 un->sd_fi_fifo_arq[i] = NULL; 29645 } 29646 un->sd_fi_fifo_start = 0; 29647 un->sd_fi_fifo_end = 0; 29648 29649 SD_INFO(SD_LOG_IOERR, un, 29650 "sd_faultinjection_ioctl: stop finished\n"); 29651 break; 29652 29653 case SDIOCINSERTPKT: 29654 /* Store a packet struct to be pushed onto fifo */ 29655 SD_INFO(SD_LOG_SDTEST, un, 29656 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29657 29658 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29659 29660 sd_fault_injection_on = 0; 29661 29662 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29663 if (un->sd_fi_fifo_pkt[i] != NULL) { 29664 kmem_free(un->sd_fi_fifo_pkt[i], 29665 sizeof (struct sd_fi_pkt)); 29666 } 29667 if (arg != NULL) { 29668 un->sd_fi_fifo_pkt[i] = 29669 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29670 if (un->sd_fi_fifo_pkt[i] == NULL) { 29671 /* Alloc failed don't store anything */ 29672 break; 29673 } 29674 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29675 sizeof (struct sd_fi_pkt), 0); 29676 if (rval == -1) { 29677 kmem_free(un->sd_fi_fifo_pkt[i], 29678 sizeof (struct sd_fi_pkt)); 29679 un->sd_fi_fifo_pkt[i] = NULL; 29680 } 29681 } else { 29682 SD_INFO(SD_LOG_IOERR, un, 29683 "sd_faultinjection_ioctl: pkt null\n"); 29684 } 29685 break; 29686 29687 case SDIOCINSERTXB: 29688 /* Store a xb struct to be pushed onto fifo */ 29689 SD_INFO(SD_LOG_SDTEST, un, 29690 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29691 29692 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29693 29694 sd_fault_injection_on = 0; 29695 29696 if (un->sd_fi_fifo_xb[i] != NULL) { 29697 kmem_free(un->sd_fi_fifo_xb[i], 29698 sizeof (struct sd_fi_xb)); 29699 un->sd_fi_fifo_xb[i] = NULL; 29700 } 29701 if (arg != NULL) { 29702 un->sd_fi_fifo_xb[i] = 29703 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29704 if (un->sd_fi_fifo_xb[i] == NULL) { 29705 /* Alloc failed don't store anything */ 29706 break; 29707 } 29708 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29709 sizeof (struct sd_fi_xb), 0); 29710 29711 if (rval == -1) { 29712 kmem_free(un->sd_fi_fifo_xb[i], 29713 sizeof (struct sd_fi_xb)); 29714 un->sd_fi_fifo_xb[i] = NULL; 29715 } 29716 } else { 29717 SD_INFO(SD_LOG_IOERR, un, 29718 "sd_faultinjection_ioctl: xb null\n"); 29719 } 29720 break; 29721 29722 case SDIOCINSERTUN: 29723 /* Store a un struct to be pushed onto fifo */ 29724 SD_INFO(SD_LOG_SDTEST, un, 29725 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29726 29727 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29728 29729 sd_fault_injection_on = 0; 29730 29731 if (un->sd_fi_fifo_un[i] != NULL) { 29732 kmem_free(un->sd_fi_fifo_un[i], 29733 sizeof (struct sd_fi_un)); 29734 un->sd_fi_fifo_un[i] = NULL; 29735 } 29736 if (arg != NULL) { 29737 un->sd_fi_fifo_un[i] = 29738 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29739 if (un->sd_fi_fifo_un[i] == NULL) { 29740 /* Alloc failed don't store anything */ 29741 break; 29742 } 29743 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29744 sizeof (struct sd_fi_un), 0); 29745 if (rval == -1) { 29746 kmem_free(un->sd_fi_fifo_un[i], 29747 sizeof (struct sd_fi_un)); 29748 un->sd_fi_fifo_un[i] = NULL; 29749 } 29750 29751 } else { 29752 SD_INFO(SD_LOG_IOERR, un, 29753 "sd_faultinjection_ioctl: un null\n"); 29754 } 29755 29756 break; 29757 29758 case SDIOCINSERTARQ: 29759 /* Store a arq struct to be pushed onto fifo */ 29760 SD_INFO(SD_LOG_SDTEST, un, 29761 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29762 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29763 29764 sd_fault_injection_on = 0; 29765 29766 if (un->sd_fi_fifo_arq[i] != NULL) { 29767 kmem_free(un->sd_fi_fifo_arq[i], 29768 sizeof (struct sd_fi_arq)); 29769 un->sd_fi_fifo_arq[i] = NULL; 29770 } 29771 if (arg != NULL) { 29772 un->sd_fi_fifo_arq[i] = 29773 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29774 if (un->sd_fi_fifo_arq[i] == NULL) { 29775 /* Alloc failed don't store anything */ 29776 break; 29777 } 29778 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29779 sizeof (struct sd_fi_arq), 0); 29780 if (rval == -1) { 29781 kmem_free(un->sd_fi_fifo_arq[i], 29782 sizeof (struct sd_fi_arq)); 29783 un->sd_fi_fifo_arq[i] = NULL; 29784 } 29785 29786 } else { 29787 SD_INFO(SD_LOG_IOERR, un, 29788 "sd_faultinjection_ioctl: arq null\n"); 29789 } 29790 29791 break; 29792 29793 case SDIOCPUSH: 29794 /* Push stored xb, pkt, un, and arq onto fifo */ 29795 sd_fault_injection_on = 0; 29796 29797 if (arg != NULL) { 29798 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29799 if (rval != -1 && 29800 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29801 un->sd_fi_fifo_end += i; 29802 } 29803 } else { 29804 SD_INFO(SD_LOG_IOERR, un, 29805 "sd_faultinjection_ioctl: push arg null\n"); 29806 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29807 un->sd_fi_fifo_end++; 29808 } 29809 } 29810 SD_INFO(SD_LOG_IOERR, un, 29811 "sd_faultinjection_ioctl: push to end=%d\n", 29812 un->sd_fi_fifo_end); 29813 break; 29814 29815 case SDIOCRETRIEVE: 29816 /* Return buffer of log from Injection session */ 29817 SD_INFO(SD_LOG_SDTEST, un, 29818 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29819 29820 sd_fault_injection_on = 0; 29821 29822 mutex_enter(&(un->un_fi_mutex)); 29823 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29824 un->sd_fi_buf_len+1, 0); 29825 mutex_exit(&(un->un_fi_mutex)); 29826 29827 if (rval == -1) { 29828 /* 29829 * arg is possibly invalid setting 29830 * it to NULL for return 29831 */ 29832 arg = NULL; 29833 } 29834 break; 29835 } 29836 29837 mutex_exit(SD_MUTEX(un)); 29838 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29839 " exit\n"); 29840 } 29841 29842 29843 /* 29844 * Function: sd_injection_log() 29845 * 29846 * Description: This routine adds buff to the already existing injection log 29847 * for retrieval via faultinjection_ioctl for use in fault 29848 * detection and recovery 29849 * 29850 * Arguments: buf - the string to add to the log 29851 */ 29852 29853 static void 29854 sd_injection_log(char *buf, struct sd_lun *un) 29855 { 29856 uint_t len; 29857 29858 ASSERT(un != NULL); 29859 ASSERT(buf != NULL); 29860 29861 mutex_enter(&(un->un_fi_mutex)); 29862 29863 len = min(strlen(buf), 255); 29864 /* Add logged value to Injection log to be returned later */ 29865 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29866 uint_t offset = strlen((char *)un->sd_fi_log); 29867 char *destp = (char *)un->sd_fi_log + offset; 29868 int i; 29869 for (i = 0; i < len; i++) { 29870 *destp++ = *buf++; 29871 } 29872 un->sd_fi_buf_len += len; 29873 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29874 } 29875 29876 mutex_exit(&(un->un_fi_mutex)); 29877 } 29878 29879 29880 /* 29881 * Function: sd_faultinjection() 29882 * 29883 * Description: This routine takes the pkt and changes its 29884 * content based on error injection scenerio. 29885 * 29886 * Arguments: pktp - packet to be changed 29887 */ 29888 29889 static void 29890 sd_faultinjection(struct scsi_pkt *pktp) 29891 { 29892 uint_t i; 29893 struct sd_fi_pkt *fi_pkt; 29894 struct sd_fi_xb *fi_xb; 29895 struct sd_fi_un *fi_un; 29896 struct sd_fi_arq *fi_arq; 29897 struct buf *bp; 29898 struct sd_xbuf *xb; 29899 struct sd_lun *un; 29900 29901 ASSERT(pktp != NULL); 29902 29903 /* pull bp xb and un from pktp */ 29904 bp = (struct buf *)pktp->pkt_private; 29905 xb = SD_GET_XBUF(bp); 29906 un = SD_GET_UN(bp); 29907 29908 ASSERT(un != NULL); 29909 29910 mutex_enter(SD_MUTEX(un)); 29911 29912 SD_TRACE(SD_LOG_SDTEST, un, 29913 "sd_faultinjection: entry Injection from sdintr\n"); 29914 29915 /* if injection is off return */ 29916 if (sd_fault_injection_on == 0 || 29917 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29918 mutex_exit(SD_MUTEX(un)); 29919 return; 29920 } 29921 29922 SD_INFO(SD_LOG_SDTEST, un, 29923 "sd_faultinjection: is working for copying\n"); 29924 29925 /* take next set off fifo */ 29926 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29927 29928 fi_pkt = un->sd_fi_fifo_pkt[i]; 29929 fi_xb = un->sd_fi_fifo_xb[i]; 29930 fi_un = un->sd_fi_fifo_un[i]; 29931 fi_arq = un->sd_fi_fifo_arq[i]; 29932 29933 29934 /* set variables accordingly */ 29935 /* set pkt if it was on fifo */ 29936 if (fi_pkt != NULL) { 29937 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29938 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29939 if (fi_pkt->pkt_cdbp != 0xff) 29940 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29941 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29942 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29943 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29944 29945 } 29946 /* set xb if it was on fifo */ 29947 if (fi_xb != NULL) { 29948 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29949 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29950 if (fi_xb->xb_retry_count != 0) 29951 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29952 SD_CONDSET(xb, xb, xb_victim_retry_count, 29953 "xb_victim_retry_count"); 29954 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29955 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29956 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29957 29958 /* copy in block data from sense */ 29959 /* 29960 * if (fi_xb->xb_sense_data[0] != -1) { 29961 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29962 * SENSE_LENGTH); 29963 * } 29964 */ 29965 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29966 29967 /* copy in extended sense codes */ 29968 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29969 xb, es_code, "es_code"); 29970 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29971 xb, es_key, "es_key"); 29972 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29973 xb, es_add_code, "es_add_code"); 29974 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29975 xb, es_qual_code, "es_qual_code"); 29976 struct scsi_extended_sense *esp; 29977 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29978 esp->es_class = CLASS_EXTENDED_SENSE; 29979 } 29980 29981 /* set un if it was on fifo */ 29982 if (fi_un != NULL) { 29983 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29984 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29985 SD_CONDSET(un, un, un_reset_retry_count, 29986 "un_reset_retry_count"); 29987 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29988 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29989 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29990 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29991 "un_f_allow_bus_device_reset"); 29992 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29993 29994 } 29995 29996 /* copy in auto request sense if it was on fifo */ 29997 if (fi_arq != NULL) { 29998 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29999 } 30000 30001 /* free structs */ 30002 if (un->sd_fi_fifo_pkt[i] != NULL) { 30003 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30004 } 30005 if (un->sd_fi_fifo_xb[i] != NULL) { 30006 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30007 } 30008 if (un->sd_fi_fifo_un[i] != NULL) { 30009 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30010 } 30011 if (un->sd_fi_fifo_arq[i] != NULL) { 30012 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30013 } 30014 30015 /* 30016 * kmem_free does not gurantee to set to NULL 30017 * since we uses these to determine if we set 30018 * values or not lets confirm they are always 30019 * NULL after free 30020 */ 30021 un->sd_fi_fifo_pkt[i] = NULL; 30022 un->sd_fi_fifo_un[i] = NULL; 30023 un->sd_fi_fifo_xb[i] = NULL; 30024 un->sd_fi_fifo_arq[i] = NULL; 30025 30026 un->sd_fi_fifo_start++; 30027 30028 mutex_exit(SD_MUTEX(un)); 30029 30030 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30031 } 30032 30033 #endif /* SD_FAULT_INJECTION */ 30034 30035 /* 30036 * This routine is invoked in sd_unit_attach(). Before calling it, the 30037 * properties in conf file should be processed already, and "hotpluggable" 30038 * property was processed also. 30039 * 30040 * The sd driver distinguishes 3 different type of devices: removable media, 30041 * non-removable media, and hotpluggable. Below the differences are defined: 30042 * 30043 * 1. Device ID 30044 * 30045 * The device ID of a device is used to identify this device. Refer to 30046 * ddi_devid_register(9F). 30047 * 30048 * For a non-removable media disk device which can provide 0x80 or 0x83 30049 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30050 * device ID is created to identify this device. For other non-removable 30051 * media devices, a default device ID is created only if this device has 30052 * at least 2 alter cylinders. Otherwise, this device has no devid. 30053 * 30054 * ------------------------------------------------------- 30055 * removable media hotpluggable | Can Have Device ID 30056 * ------------------------------------------------------- 30057 * false false | Yes 30058 * false true | Yes 30059 * true x | No 30060 * ------------------------------------------------------ 30061 * 30062 * 30063 * 2. SCSI group 4 commands 30064 * 30065 * In SCSI specs, only some commands in group 4 command set can use 30066 * 8-byte addresses that can be used to access >2TB storage spaces. 30067 * Other commands have no such capability. Without supporting group4, 30068 * it is impossible to make full use of storage spaces of a disk with 30069 * capacity larger than 2TB. 30070 * 30071 * ----------------------------------------------- 30072 * removable media hotpluggable LP64 | Group 30073 * ----------------------------------------------- 30074 * false false false | 1 30075 * false false true | 4 30076 * false true false | 1 30077 * false true true | 4 30078 * true x x | 5 30079 * ----------------------------------------------- 30080 * 30081 * 30082 * 3. Check for VTOC Label 30083 * 30084 * If a direct-access disk has no EFI label, sd will check if it has a 30085 * valid VTOC label. Now, sd also does that check for removable media 30086 * and hotpluggable devices. 30087 * 30088 * -------------------------------------------------------------- 30089 * Direct-Access removable media hotpluggable | Check Label 30090 * ------------------------------------------------------------- 30091 * false false false | No 30092 * false false true | No 30093 * false true false | Yes 30094 * false true true | Yes 30095 * true x x | Yes 30096 * -------------------------------------------------------------- 30097 * 30098 * 30099 * 4. Building default VTOC label 30100 * 30101 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30102 * If those devices have no valid VTOC label, sd(7d) will attempt to 30103 * create default VTOC for them. Currently sd creates default VTOC label 30104 * for all devices on x86 platform (VTOC_16), but only for removable 30105 * media devices on SPARC (VTOC_8). 30106 * 30107 * ----------------------------------------------------------- 30108 * removable media hotpluggable platform | Default Label 30109 * ----------------------------------------------------------- 30110 * false false sparc | No 30111 * false true x86 | Yes 30112 * false true sparc | Yes 30113 * true x x | Yes 30114 * ---------------------------------------------------------- 30115 * 30116 * 30117 * 5. Supported blocksizes of target devices 30118 * 30119 * Sd supports non-512-byte blocksize for removable media devices only. 30120 * For other devices, only 512-byte blocksize is supported. This may be 30121 * changed in near future because some RAID devices require non-512-byte 30122 * blocksize 30123 * 30124 * ----------------------------------------------------------- 30125 * removable media hotpluggable | non-512-byte blocksize 30126 * ----------------------------------------------------------- 30127 * false false | No 30128 * false true | No 30129 * true x | Yes 30130 * ----------------------------------------------------------- 30131 * 30132 * 30133 * 6. Automatic mount & unmount 30134 * 30135 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30136 * if a device is removable media device. It return 1 for removable media 30137 * devices, and 0 for others. 30138 * 30139 * The automatic mounting subsystem should distinguish between the types 30140 * of devices and apply automounting policies to each. 30141 * 30142 * 30143 * 7. fdisk partition management 30144 * 30145 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30146 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30147 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30148 * fdisk partitions on both x86 and SPARC platform. 30149 * 30150 * ----------------------------------------------------------- 30151 * platform removable media USB/1394 | fdisk supported 30152 * ----------------------------------------------------------- 30153 * x86 X X | true 30154 * ------------------------------------------------------------ 30155 * sparc X X | false 30156 * ------------------------------------------------------------ 30157 * 30158 * 30159 * 8. MBOOT/MBR 30160 * 30161 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30162 * read/write mboot for removable media devices on sparc platform. 30163 * 30164 * ----------------------------------------------------------- 30165 * platform removable media USB/1394 | mboot supported 30166 * ----------------------------------------------------------- 30167 * x86 X X | true 30168 * ------------------------------------------------------------ 30169 * sparc false false | false 30170 * sparc false true | true 30171 * sparc true false | true 30172 * sparc true true | true 30173 * ------------------------------------------------------------ 30174 * 30175 * 30176 * 9. error handling during opening device 30177 * 30178 * If failed to open a disk device, an errno is returned. For some kinds 30179 * of errors, different errno is returned depending on if this device is 30180 * a removable media device. This brings USB/1394 hard disks in line with 30181 * expected hard disk behavior. It is not expected that this breaks any 30182 * application. 30183 * 30184 * ------------------------------------------------------ 30185 * removable media hotpluggable | errno 30186 * ------------------------------------------------------ 30187 * false false | EIO 30188 * false true | EIO 30189 * true x | ENXIO 30190 * ------------------------------------------------------ 30191 * 30192 * 30193 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30194 * 30195 * These IOCTLs are applicable only to removable media devices. 30196 * 30197 * ----------------------------------------------------------- 30198 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30199 * ----------------------------------------------------------- 30200 * false false | No 30201 * false true | No 30202 * true x | Yes 30203 * ----------------------------------------------------------- 30204 * 30205 * 30206 * 12. Kstats for partitions 30207 * 30208 * sd creates partition kstat for non-removable media devices. USB and 30209 * Firewire hard disks now have partition kstats 30210 * 30211 * ------------------------------------------------------ 30212 * removable media hotpluggable | kstat 30213 * ------------------------------------------------------ 30214 * false false | Yes 30215 * false true | Yes 30216 * true x | No 30217 * ------------------------------------------------------ 30218 * 30219 * 30220 * 13. Removable media & hotpluggable properties 30221 * 30222 * Sd driver creates a "removable-media" property for removable media 30223 * devices. Parent nexus drivers create a "hotpluggable" property if 30224 * it supports hotplugging. 30225 * 30226 * --------------------------------------------------------------------- 30227 * removable media hotpluggable | "removable-media" " hotpluggable" 30228 * --------------------------------------------------------------------- 30229 * false false | No No 30230 * false true | No Yes 30231 * true false | Yes No 30232 * true true | Yes Yes 30233 * --------------------------------------------------------------------- 30234 * 30235 * 30236 * 14. Power Management 30237 * 30238 * sd only power manages removable media devices or devices that support 30239 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30240 * 30241 * A parent nexus that supports hotplugging can also set "pm-capable" 30242 * if the disk can be power managed. 30243 * 30244 * ------------------------------------------------------------ 30245 * removable media hotpluggable pm-capable | power manage 30246 * ------------------------------------------------------------ 30247 * false false false | No 30248 * false false true | Yes 30249 * false true false | No 30250 * false true true | Yes 30251 * true x x | Yes 30252 * ------------------------------------------------------------ 30253 * 30254 * USB and firewire hard disks can now be power managed independently 30255 * of the framebuffer 30256 * 30257 * 30258 * 15. Support for USB disks with capacity larger than 1TB 30259 * 30260 * Currently, sd doesn't permit a fixed disk device with capacity 30261 * larger than 1TB to be used in a 32-bit operating system environment. 30262 * However, sd doesn't do that for removable media devices. Instead, it 30263 * assumes that removable media devices cannot have a capacity larger 30264 * than 1TB. Therefore, using those devices on 32-bit system is partially 30265 * supported, which can cause some unexpected results. 30266 * 30267 * --------------------------------------------------------------------- 30268 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30269 * --------------------------------------------------------------------- 30270 * false false | true | no 30271 * false true | true | no 30272 * true false | true | Yes 30273 * true true | true | Yes 30274 * --------------------------------------------------------------------- 30275 * 30276 * 30277 * 16. Check write-protection at open time 30278 * 30279 * When a removable media device is being opened for writing without NDELAY 30280 * flag, sd will check if this device is writable. If attempting to open 30281 * without NDELAY flag a write-protected device, this operation will abort. 30282 * 30283 * ------------------------------------------------------------ 30284 * removable media USB/1394 | WP Check 30285 * ------------------------------------------------------------ 30286 * false false | No 30287 * false true | No 30288 * true false | Yes 30289 * true true | Yes 30290 * ------------------------------------------------------------ 30291 * 30292 * 30293 * 17. syslog when corrupted VTOC is encountered 30294 * 30295 * Currently, if an invalid VTOC is encountered, sd only print syslog 30296 * for fixed SCSI disks. 30297 * ------------------------------------------------------------ 30298 * removable media USB/1394 | print syslog 30299 * ------------------------------------------------------------ 30300 * false false | Yes 30301 * false true | No 30302 * true false | No 30303 * true true | No 30304 * ------------------------------------------------------------ 30305 */ 30306 static void 30307 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30308 { 30309 int pm_capable_prop; 30310 30311 ASSERT(un->un_sd); 30312 ASSERT(un->un_sd->sd_inq); 30313 30314 /* 30315 * Enable SYNC CACHE support for all devices. 30316 */ 30317 un->un_f_sync_cache_supported = TRUE; 30318 30319 /* 30320 * Set the sync cache required flag to false. 30321 * This would ensure that there is no SYNC CACHE 30322 * sent when there are no writes 30323 */ 30324 un->un_f_sync_cache_required = FALSE; 30325 30326 if (un->un_sd->sd_inq->inq_rmb) { 30327 /* 30328 * The media of this device is removable. And for this kind 30329 * of devices, it is possible to change medium after opening 30330 * devices. Thus we should support this operation. 30331 */ 30332 un->un_f_has_removable_media = TRUE; 30333 30334 /* 30335 * support non-512-byte blocksize of removable media devices 30336 */ 30337 un->un_f_non_devbsize_supported = TRUE; 30338 30339 /* 30340 * Assume that all removable media devices support DOOR_LOCK 30341 */ 30342 un->un_f_doorlock_supported = TRUE; 30343 30344 /* 30345 * For a removable media device, it is possible to be opened 30346 * with NDELAY flag when there is no media in drive, in this 30347 * case we don't care if device is writable. But if without 30348 * NDELAY flag, we need to check if media is write-protected. 30349 */ 30350 un->un_f_chk_wp_open = TRUE; 30351 30352 /* 30353 * need to start a SCSI watch thread to monitor media state, 30354 * when media is being inserted or ejected, notify syseventd. 30355 */ 30356 un->un_f_monitor_media_state = TRUE; 30357 30358 /* 30359 * Some devices don't support START_STOP_UNIT command. 30360 * Therefore, we'd better check if a device supports it 30361 * before sending it. 30362 */ 30363 un->un_f_check_start_stop = TRUE; 30364 30365 /* 30366 * support eject media ioctl: 30367 * FDEJECT, DKIOCEJECT, CDROMEJECT 30368 */ 30369 un->un_f_eject_media_supported = TRUE; 30370 30371 /* 30372 * Because many removable-media devices don't support 30373 * LOG_SENSE, we couldn't use this command to check if 30374 * a removable media device support power-management. 30375 * We assume that they support power-management via 30376 * START_STOP_UNIT command and can be spun up and down 30377 * without limitations. 30378 */ 30379 un->un_f_pm_supported = TRUE; 30380 30381 /* 30382 * Need to create a zero length (Boolean) property 30383 * removable-media for the removable media devices. 30384 * Note that the return value of the property is not being 30385 * checked, since if unable to create the property 30386 * then do not want the attach to fail altogether. Consistent 30387 * with other property creation in attach. 30388 */ 30389 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30390 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30391 30392 } else { 30393 /* 30394 * create device ID for device 30395 */ 30396 un->un_f_devid_supported = TRUE; 30397 30398 /* 30399 * Spin up non-removable-media devices once it is attached 30400 */ 30401 un->un_f_attach_spinup = TRUE; 30402 30403 /* 30404 * According to SCSI specification, Sense data has two kinds of 30405 * format: fixed format, and descriptor format. At present, we 30406 * don't support descriptor format sense data for removable 30407 * media. 30408 */ 30409 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30410 un->un_f_descr_format_supported = TRUE; 30411 } 30412 30413 /* 30414 * kstats are created only for non-removable media devices. 30415 * 30416 * Set this in sd.conf to 0 in order to disable kstats. The 30417 * default is 1, so they are enabled by default. 30418 */ 30419 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30420 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30421 "enable-partition-kstats", 1)); 30422 30423 /* 30424 * Check if HBA has set the "pm-capable" property. 30425 * If "pm-capable" exists and is non-zero then we can 30426 * power manage the device without checking the start/stop 30427 * cycle count log sense page. 30428 * 30429 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 30430 * then we should not power manage the device. 30431 * 30432 * If "pm-capable" doesn't exist then pm_capable_prop will 30433 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30434 * sd will check the start/stop cycle count log sense page 30435 * and power manage the device if the cycle count limit has 30436 * not been exceeded. 30437 */ 30438 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30439 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30440 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 30441 un->un_f_log_sense_supported = TRUE; 30442 } else { 30443 /* 30444 * pm-capable property exists. 30445 * 30446 * Convert "TRUE" values for pm_capable_prop to 30447 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 30448 * later. "TRUE" values are any values except 30449 * SD_PM_CAPABLE_FALSE (0) and 30450 * SD_PM_CAPABLE_UNDEFINED (-1) 30451 */ 30452 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 30453 un->un_f_log_sense_supported = FALSE; 30454 } else { 30455 un->un_f_pm_supported = TRUE; 30456 } 30457 30458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30459 "sd_unit_attach: un:0x%p pm-capable " 30460 "property set to %d.\n", un, un->un_f_pm_supported); 30461 } 30462 } 30463 30464 if (un->un_f_is_hotpluggable) { 30465 30466 /* 30467 * Have to watch hotpluggable devices as well, since 30468 * that's the only way for userland applications to 30469 * detect hot removal while device is busy/mounted. 30470 */ 30471 un->un_f_monitor_media_state = TRUE; 30472 30473 un->un_f_check_start_stop = TRUE; 30474 30475 } 30476 } 30477 30478 /* 30479 * sd_tg_rdwr: 30480 * Provides rdwr access for cmlb via sd_tgops. The start_block is 30481 * in sys block size, req_length in bytes. 30482 * 30483 */ 30484 static int 30485 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 30486 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 30487 { 30488 struct sd_lun *un; 30489 int path_flag = (int)(uintptr_t)tg_cookie; 30490 char *dkl = NULL; 30491 diskaddr_t real_addr = start_block; 30492 diskaddr_t first_byte, end_block; 30493 30494 size_t buffer_size = reqlength; 30495 int rval = 0; 30496 diskaddr_t cap; 30497 uint32_t lbasize; 30498 sd_ssc_t *ssc; 30499 30500 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30501 if (un == NULL) 30502 return (ENXIO); 30503 30504 if (cmd != TG_READ && cmd != TG_WRITE) 30505 return (EINVAL); 30506 30507 ssc = sd_ssc_init(un); 30508 mutex_enter(SD_MUTEX(un)); 30509 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 30510 mutex_exit(SD_MUTEX(un)); 30511 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30512 &lbasize, path_flag); 30513 if (rval != 0) 30514 goto done1; 30515 mutex_enter(SD_MUTEX(un)); 30516 sd_update_block_info(un, lbasize, cap); 30517 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 30518 mutex_exit(SD_MUTEX(un)); 30519 rval = EIO; 30520 goto done; 30521 } 30522 } 30523 30524 if (NOT_DEVBSIZE(un)) { 30525 /* 30526 * sys_blocksize != tgt_blocksize, need to re-adjust 30527 * blkno and save the index to beginning of dk_label 30528 */ 30529 first_byte = SD_SYSBLOCKS2BYTES(start_block); 30530 real_addr = first_byte / un->un_tgt_blocksize; 30531 30532 end_block = (first_byte + reqlength + 30533 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 30534 30535 /* round up buffer size to multiple of target block size */ 30536 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 30537 30538 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 30539 "label_addr: 0x%x allocation size: 0x%x\n", 30540 real_addr, buffer_size); 30541 30542 if (((first_byte % un->un_tgt_blocksize) != 0) || 30543 (reqlength % un->un_tgt_blocksize) != 0) 30544 /* the request is not aligned */ 30545 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 30546 } 30547 30548 /* 30549 * The MMC standard allows READ CAPACITY to be 30550 * inaccurate by a bounded amount (in the interest of 30551 * response latency). As a result, failed READs are 30552 * commonplace (due to the reading of metadata and not 30553 * data). Depending on the per-Vendor/drive Sense data, 30554 * the failed READ can cause many (unnecessary) retries. 30555 */ 30556 30557 if (ISCD(un) && (cmd == TG_READ) && 30558 (un->un_f_blockcount_is_valid == TRUE) && 30559 ((start_block == (un->un_blockcount - 1))|| 30560 (start_block == (un->un_blockcount - 2)))) { 30561 path_flag = SD_PATH_DIRECT_PRIORITY; 30562 } 30563 30564 mutex_exit(SD_MUTEX(un)); 30565 if (cmd == TG_READ) { 30566 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 30567 buffer_size, real_addr, path_flag); 30568 if (dkl != NULL) 30569 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 30570 real_addr), bufaddr, reqlength); 30571 } else { 30572 if (dkl) { 30573 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 30574 real_addr, path_flag); 30575 if (rval) { 30576 goto done1; 30577 } 30578 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 30579 real_addr), reqlength); 30580 } 30581 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 30582 buffer_size, real_addr, path_flag); 30583 } 30584 30585 done1: 30586 if (dkl != NULL) 30587 kmem_free(dkl, buffer_size); 30588 30589 if (rval != 0) { 30590 if (rval == EIO) 30591 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 30592 else 30593 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 30594 } 30595 done: 30596 sd_ssc_fini(ssc); 30597 return (rval); 30598 } 30599 30600 30601 static int 30602 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 30603 { 30604 30605 struct sd_lun *un; 30606 diskaddr_t cap; 30607 uint32_t lbasize; 30608 int path_flag = (int)(uintptr_t)tg_cookie; 30609 int ret = 0; 30610 30611 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30612 if (un == NULL) 30613 return (ENXIO); 30614 30615 switch (cmd) { 30616 case TG_GETPHYGEOM: 30617 case TG_GETVIRTGEOM: 30618 case TG_GETCAPACITY: 30619 case TG_GETBLOCKSIZE: 30620 mutex_enter(SD_MUTEX(un)); 30621 30622 if ((un->un_f_blockcount_is_valid == TRUE) && 30623 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 30624 cap = un->un_blockcount; 30625 lbasize = un->un_tgt_blocksize; 30626 mutex_exit(SD_MUTEX(un)); 30627 } else { 30628 sd_ssc_t *ssc; 30629 mutex_exit(SD_MUTEX(un)); 30630 ssc = sd_ssc_init(un); 30631 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30632 &lbasize, path_flag); 30633 if (ret != 0) { 30634 if (ret == EIO) 30635 sd_ssc_assessment(ssc, 30636 SD_FMT_STATUS_CHECK); 30637 else 30638 sd_ssc_assessment(ssc, 30639 SD_FMT_IGNORE); 30640 sd_ssc_fini(ssc); 30641 return (ret); 30642 } 30643 sd_ssc_fini(ssc); 30644 mutex_enter(SD_MUTEX(un)); 30645 sd_update_block_info(un, lbasize, cap); 30646 if ((un->un_f_blockcount_is_valid == FALSE) || 30647 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 30648 mutex_exit(SD_MUTEX(un)); 30649 return (EIO); 30650 } 30651 mutex_exit(SD_MUTEX(un)); 30652 } 30653 30654 if (cmd == TG_GETCAPACITY) { 30655 *(diskaddr_t *)arg = cap; 30656 return (0); 30657 } 30658 30659 if (cmd == TG_GETBLOCKSIZE) { 30660 *(uint32_t *)arg = lbasize; 30661 return (0); 30662 } 30663 30664 if (cmd == TG_GETPHYGEOM) 30665 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 30666 cap, lbasize, path_flag); 30667 else 30668 /* TG_GETVIRTGEOM */ 30669 ret = sd_get_virtual_geometry(un, 30670 (cmlb_geom_t *)arg, cap, lbasize); 30671 30672 return (ret); 30673 30674 case TG_GETATTR: 30675 mutex_enter(SD_MUTEX(un)); 30676 ((tg_attribute_t *)arg)->media_is_writable = 30677 un->un_f_mmc_writable_media; 30678 mutex_exit(SD_MUTEX(un)); 30679 return (0); 30680 default: 30681 return (ENOTTY); 30682 30683 } 30684 } 30685 30686 /* 30687 * Function: sd_ssc_ereport_post 30688 * 30689 * Description: Will be called when SD driver need to post an ereport. 30690 * 30691 * Context: Kernel thread or interrupt context. 30692 */ 30693 static void 30694 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 30695 { 30696 int uscsi_path_instance = 0; 30697 uchar_t uscsi_pkt_reason; 30698 uint32_t uscsi_pkt_state; 30699 uint32_t uscsi_pkt_statistics; 30700 uint64_t uscsi_ena; 30701 uchar_t op_code; 30702 uint8_t *sensep; 30703 union scsi_cdb *cdbp; 30704 uint_t cdblen = 0; 30705 uint_t senlen = 0; 30706 struct sd_lun *un; 30707 dev_info_t *dip; 30708 char *devid; 30709 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 30710 SSC_FLAGS_INVALID_STATUS | 30711 SSC_FLAGS_INVALID_SENSE | 30712 SSC_FLAGS_INVALID_DATA; 30713 char assessment[16]; 30714 30715 ASSERT(ssc != NULL); 30716 ASSERT(ssc->ssc_uscsi_cmd != NULL); 30717 ASSERT(ssc->ssc_uscsi_info != NULL); 30718 30719 un = ssc->ssc_un; 30720 ASSERT(un != NULL); 30721 30722 dip = un->un_sd->sd_dev; 30723 30724 /* 30725 * Get the devid: 30726 * devid will only be passed to non-transport error reports. 30727 */ 30728 devid = DEVI(dip)->devi_devid_str; 30729 30730 /* 30731 * If we are syncing or dumping, the command will not be executed 30732 * so we bypass this situation. 30733 */ 30734 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 30735 (un->un_state == SD_STATE_DUMPING)) 30736 return; 30737 30738 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 30739 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 30740 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 30741 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 30742 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 30743 30744 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 30745 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 30746 30747 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 30748 if (cdbp == NULL) { 30749 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30750 "sd_ssc_ereport_post meet empty cdb\n"); 30751 return; 30752 } 30753 30754 op_code = cdbp->scc_cmd; 30755 30756 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 30757 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 30758 ssc->ssc_uscsi_cmd->uscsi_rqresid); 30759 30760 if (senlen > 0) 30761 ASSERT(sensep != NULL); 30762 30763 /* 30764 * Initialize drv_assess to corresponding values. 30765 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 30766 * on the sense-key returned back. 30767 */ 30768 switch (drv_assess) { 30769 case SD_FM_DRV_RECOVERY: 30770 (void) sprintf(assessment, "%s", "recovered"); 30771 break; 30772 case SD_FM_DRV_RETRY: 30773 (void) sprintf(assessment, "%s", "retry"); 30774 break; 30775 case SD_FM_DRV_NOTICE: 30776 (void) sprintf(assessment, "%s", "info"); 30777 break; 30778 case SD_FM_DRV_FATAL: 30779 default: 30780 (void) sprintf(assessment, "%s", "unknown"); 30781 } 30782 /* 30783 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30784 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30785 * driver-assessment will always be "recovered" here. 30786 */ 30787 if (drv_assess == SD_FM_DRV_RECOVERY) { 30788 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30789 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30790 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30791 "driver-assessment", DATA_TYPE_STRING, assessment, 30792 "op-code", DATA_TYPE_UINT8, op_code, 30793 "cdb", DATA_TYPE_UINT8_ARRAY, 30794 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30795 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30796 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30797 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30798 NULL); 30799 return; 30800 } 30801 30802 /* 30803 * If there is un-expected/un-decodable data, we should post 30804 * ereport.io.scsi.cmd.disk.dev.uderr. 30805 * driver-assessment will be set based on parameter drv_assess. 30806 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30807 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30808 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30809 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30810 */ 30811 if (ssc->ssc_flags & ssc_invalid_flags) { 30812 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30813 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30814 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30815 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30816 "driver-assessment", DATA_TYPE_STRING, 30817 drv_assess == SD_FM_DRV_FATAL ? 30818 "fail" : assessment, 30819 "op-code", DATA_TYPE_UINT8, op_code, 30820 "cdb", DATA_TYPE_UINT8_ARRAY, 30821 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30822 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30823 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30824 "pkt-stats", DATA_TYPE_UINT32, 30825 uscsi_pkt_statistics, 30826 "stat-code", DATA_TYPE_UINT8, 30827 ssc->ssc_uscsi_cmd->uscsi_status, 30828 "un-decode-info", DATA_TYPE_STRING, 30829 ssc->ssc_info, 30830 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30831 senlen, sensep, 30832 NULL); 30833 } else { 30834 /* 30835 * For other type of invalid data, the 30836 * un-decode-value field would be empty because the 30837 * un-decodable content could be seen from upper 30838 * level payload or inside un-decode-info. 30839 */ 30840 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30841 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30842 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30843 "driver-assessment", DATA_TYPE_STRING, 30844 drv_assess == SD_FM_DRV_FATAL ? 30845 "fail" : assessment, 30846 "op-code", DATA_TYPE_UINT8, op_code, 30847 "cdb", DATA_TYPE_UINT8_ARRAY, 30848 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30849 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30850 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30851 "pkt-stats", DATA_TYPE_UINT32, 30852 uscsi_pkt_statistics, 30853 "stat-code", DATA_TYPE_UINT8, 30854 ssc->ssc_uscsi_cmd->uscsi_status, 30855 "un-decode-info", DATA_TYPE_STRING, 30856 ssc->ssc_info, 30857 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30858 0, NULL, 30859 NULL); 30860 } 30861 ssc->ssc_flags &= ~ssc_invalid_flags; 30862 return; 30863 } 30864 30865 if (uscsi_pkt_reason != CMD_CMPLT || 30866 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30867 /* 30868 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30869 * set inside sd_start_cmds due to errors(bad packet or 30870 * fatal transport error), we should take it as a 30871 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30872 * driver-assessment will be set based on drv_assess. 30873 * We will set devid to NULL because it is a transport 30874 * error. 30875 */ 30876 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30877 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30878 30879 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30880 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30881 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30882 "driver-assessment", DATA_TYPE_STRING, 30883 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30884 "op-code", DATA_TYPE_UINT8, op_code, 30885 "cdb", DATA_TYPE_UINT8_ARRAY, 30886 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30887 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30888 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30889 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30890 NULL); 30891 } else { 30892 /* 30893 * If we got here, we have a completed command, and we need 30894 * to further investigate the sense data to see what kind 30895 * of ereport we should post. 30896 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30897 * if sense-key == 0x3. 30898 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30899 * driver-assessment will be set based on the parameter 30900 * drv_assess. 30901 */ 30902 if (senlen > 0) { 30903 /* 30904 * Here we have sense data available. 30905 */ 30906 uint8_t sense_key; 30907 sense_key = scsi_sense_key(sensep); 30908 if (sense_key == 0x3) { 30909 /* 30910 * sense-key == 0x3(medium error), 30911 * driver-assessment should be "fatal" if 30912 * drv_assess is SD_FM_DRV_FATAL. 30913 */ 30914 scsi_fm_ereport_post(un->un_sd, 30915 uscsi_path_instance, 30916 "cmd.disk.dev.rqs.merr", 30917 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30918 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30919 "driver-assessment", 30920 DATA_TYPE_STRING, 30921 drv_assess == SD_FM_DRV_FATAL ? 30922 "fatal" : assessment, 30923 "op-code", 30924 DATA_TYPE_UINT8, op_code, 30925 "cdb", 30926 DATA_TYPE_UINT8_ARRAY, cdblen, 30927 ssc->ssc_uscsi_cmd->uscsi_cdb, 30928 "pkt-reason", 30929 DATA_TYPE_UINT8, uscsi_pkt_reason, 30930 "pkt-state", 30931 DATA_TYPE_UINT8, uscsi_pkt_state, 30932 "pkt-stats", 30933 DATA_TYPE_UINT32, 30934 uscsi_pkt_statistics, 30935 "stat-code", 30936 DATA_TYPE_UINT8, 30937 ssc->ssc_uscsi_cmd->uscsi_status, 30938 "key", 30939 DATA_TYPE_UINT8, 30940 scsi_sense_key(sensep), 30941 "asc", 30942 DATA_TYPE_UINT8, 30943 scsi_sense_asc(sensep), 30944 "ascq", 30945 DATA_TYPE_UINT8, 30946 scsi_sense_ascq(sensep), 30947 "sense-data", 30948 DATA_TYPE_UINT8_ARRAY, 30949 senlen, sensep, 30950 "lba", 30951 DATA_TYPE_UINT64, 30952 ssc->ssc_uscsi_info->ui_lba, 30953 NULL); 30954 } else { 30955 /* 30956 * if sense-key == 0x4(hardware 30957 * error), driver-assessment should 30958 * be "fatal" if drv_assess is 30959 * SD_FM_DRV_FATAL. 30960 */ 30961 scsi_fm_ereport_post(un->un_sd, 30962 uscsi_path_instance, 30963 "cmd.disk.dev.rqs.derr", 30964 uscsi_ena, devid, DDI_NOSLEEP, 30965 FM_VERSION, 30966 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30967 "driver-assessment", 30968 DATA_TYPE_STRING, 30969 drv_assess == SD_FM_DRV_FATAL ? 30970 (sense_key == 0x4 ? 30971 "fatal" : "fail") : assessment, 30972 "op-code", 30973 DATA_TYPE_UINT8, op_code, 30974 "cdb", 30975 DATA_TYPE_UINT8_ARRAY, cdblen, 30976 ssc->ssc_uscsi_cmd->uscsi_cdb, 30977 "pkt-reason", 30978 DATA_TYPE_UINT8, uscsi_pkt_reason, 30979 "pkt-state", 30980 DATA_TYPE_UINT8, uscsi_pkt_state, 30981 "pkt-stats", 30982 DATA_TYPE_UINT32, 30983 uscsi_pkt_statistics, 30984 "stat-code", 30985 DATA_TYPE_UINT8, 30986 ssc->ssc_uscsi_cmd->uscsi_status, 30987 "key", 30988 DATA_TYPE_UINT8, 30989 scsi_sense_key(sensep), 30990 "asc", 30991 DATA_TYPE_UINT8, 30992 scsi_sense_asc(sensep), 30993 "ascq", 30994 DATA_TYPE_UINT8, 30995 scsi_sense_ascq(sensep), 30996 "sense-data", 30997 DATA_TYPE_UINT8_ARRAY, 30998 senlen, sensep, 30999 NULL); 31000 } 31001 } else { 31002 /* 31003 * For stat_code == STATUS_GOOD, this is not a 31004 * hardware error. 31005 */ 31006 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31007 return; 31008 31009 /* 31010 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31011 * stat-code but with sense data unavailable. 31012 * driver-assessment will be set based on parameter 31013 * drv_assess. 31014 */ 31015 scsi_fm_ereport_post(un->un_sd, 31016 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 31017 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 31018 FM_EREPORT_VERS0, 31019 "driver-assessment", DATA_TYPE_STRING, 31020 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31021 "op-code", DATA_TYPE_UINT8, op_code, 31022 "cdb", 31023 DATA_TYPE_UINT8_ARRAY, 31024 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31025 "pkt-reason", 31026 DATA_TYPE_UINT8, uscsi_pkt_reason, 31027 "pkt-state", 31028 DATA_TYPE_UINT8, uscsi_pkt_state, 31029 "pkt-stats", 31030 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31031 "stat-code", 31032 DATA_TYPE_UINT8, 31033 ssc->ssc_uscsi_cmd->uscsi_status, 31034 NULL); 31035 } 31036 } 31037 } 31038 31039 /* 31040 * Function: sd_ssc_extract_info 31041 * 31042 * Description: Extract information available to help generate ereport. 31043 * 31044 * Context: Kernel thread or interrupt context. 31045 */ 31046 static void 31047 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31048 struct buf *bp, struct sd_xbuf *xp) 31049 { 31050 size_t senlen = 0; 31051 union scsi_cdb *cdbp; 31052 int path_instance; 31053 /* 31054 * Need scsi_cdb_size array to determine the cdb length. 31055 */ 31056 extern uchar_t scsi_cdb_size[]; 31057 31058 ASSERT(un != NULL); 31059 ASSERT(pktp != NULL); 31060 ASSERT(bp != NULL); 31061 ASSERT(xp != NULL); 31062 ASSERT(ssc != NULL); 31063 ASSERT(mutex_owned(SD_MUTEX(un))); 31064 31065 /* 31066 * Transfer the cdb buffer pointer here. 31067 */ 31068 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31069 31070 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31071 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31072 31073 /* 31074 * Transfer the sense data buffer pointer if sense data is available, 31075 * calculate the sense data length first. 31076 */ 31077 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31078 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31079 /* 31080 * For arq case, we will enter here. 31081 */ 31082 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31083 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31084 } else { 31085 senlen = SENSE_LENGTH; 31086 } 31087 } else { 31088 /* 31089 * For non-arq case, we will enter this branch. 31090 */ 31091 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31092 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31093 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31094 } 31095 31096 } 31097 31098 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31099 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31100 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31101 31102 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31103 31104 /* 31105 * Only transfer path_instance when scsi_pkt was properly allocated. 31106 */ 31107 path_instance = pktp->pkt_path_instance; 31108 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31109 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31110 else 31111 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31112 31113 /* 31114 * Copy in the other fields we may need when posting ereport. 31115 */ 31116 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31117 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31118 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31119 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31120 31121 /* 31122 * For partially read/write command, we will not create ena 31123 * in case of a successful command be reconized as recovered. 31124 */ 31125 if ((pktp->pkt_reason == CMD_CMPLT) && 31126 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31127 (senlen == 0)) { 31128 return; 31129 } 31130 31131 /* 31132 * To associate ereports of a single command execution flow, we 31133 * need a shared ena for a specific command. 31134 */ 31135 if (xp->xb_ena == 0) 31136 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31137 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31138 } 31139