1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else /* !__fibre */ 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif /* !__fibre */ 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_INTERCONNECT_SAS 5 779 780 #define SD_IS_PARALLEL_SCSI(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 782 #define SD_IS_SERIAL(un) \ 783 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 784 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 785 786 /* 787 * Definitions used by device id registration routines 788 */ 789 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 790 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 791 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 792 793 static kmutex_t sd_sense_mutex = {0}; 794 795 /* 796 * Macros for updates of the driver state 797 */ 798 #define New_state(un, s) \ 799 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 800 #define Restore_state(un) \ 801 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 802 803 static struct sd_cdbinfo sd_cdbtab[] = { 804 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 805 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 806 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 807 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 808 }; 809 810 /* 811 * Specifies the number of seconds that must have elapsed since the last 812 * cmd. has completed for a device to be declared idle to the PM framework. 813 */ 814 static int sd_pm_idletime = 1; 815 816 /* 817 * Internal function prototypes 818 */ 819 820 #if (defined(__fibre)) 821 /* 822 * These #defines are to avoid namespace collisions that occur because this 823 * code is currently used to compile two separate driver modules: sd and ssd. 824 * All function names need to be treated this way (even if declared static) 825 * in order to allow the debugger to resolve the names properly. 826 * It is anticipated that in the near future the ssd module will be obsoleted, 827 * at which time this ugliness should go away. 828 */ 829 #define sd_log_trace ssd_log_trace 830 #define sd_log_info ssd_log_info 831 #define sd_log_err ssd_log_err 832 #define sdprobe ssdprobe 833 #define sdinfo ssdinfo 834 #define sd_prop_op ssd_prop_op 835 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 836 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 837 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 838 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 839 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 840 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 841 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 842 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 843 #define sd_spin_up_unit ssd_spin_up_unit 844 #define sd_enable_descr_sense ssd_enable_descr_sense 845 #define sd_reenable_dsense_task ssd_reenable_dsense_task 846 #define sd_set_mmc_caps ssd_set_mmc_caps 847 #define sd_read_unit_properties ssd_read_unit_properties 848 #define sd_process_sdconf_file ssd_process_sdconf_file 849 #define sd_process_sdconf_table ssd_process_sdconf_table 850 #define sd_sdconf_id_match ssd_sdconf_id_match 851 #define sd_blank_cmp ssd_blank_cmp 852 #define sd_chk_vers1_data ssd_chk_vers1_data 853 #define sd_set_vers1_properties ssd_set_vers1_properties 854 855 #define sd_get_physical_geometry ssd_get_physical_geometry 856 #define sd_get_virtual_geometry ssd_get_virtual_geometry 857 #define sd_update_block_info ssd_update_block_info 858 #define sd_register_devid ssd_register_devid 859 #define sd_get_devid ssd_get_devid 860 #define sd_create_devid ssd_create_devid 861 #define sd_write_deviceid ssd_write_deviceid 862 #define sd_check_vpd_page_support ssd_check_vpd_page_support 863 #define sd_setup_pm ssd_setup_pm 864 #define sd_create_pm_components ssd_create_pm_components 865 #define sd_ddi_suspend ssd_ddi_suspend 866 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 867 #define sd_ddi_resume ssd_ddi_resume 868 #define sd_ddi_pm_resume ssd_ddi_pm_resume 869 #define sdpower ssdpower 870 #define sdattach ssdattach 871 #define sddetach ssddetach 872 #define sd_unit_attach ssd_unit_attach 873 #define sd_unit_detach ssd_unit_detach 874 #define sd_set_unit_attributes ssd_set_unit_attributes 875 #define sd_create_errstats ssd_create_errstats 876 #define sd_set_errstats ssd_set_errstats 877 #define sd_set_pstats ssd_set_pstats 878 #define sddump ssddump 879 #define sd_scsi_poll ssd_scsi_poll 880 #define sd_send_polled_RQS ssd_send_polled_RQS 881 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 882 #define sd_init_event_callbacks ssd_init_event_callbacks 883 #define sd_event_callback ssd_event_callback 884 #define sd_cache_control ssd_cache_control 885 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 886 #define sd_get_nv_sup ssd_get_nv_sup 887 #define sd_make_device ssd_make_device 888 #define sdopen ssdopen 889 #define sdclose ssdclose 890 #define sd_ready_and_valid ssd_ready_and_valid 891 #define sdmin ssdmin 892 #define sdread ssdread 893 #define sdwrite ssdwrite 894 #define sdaread ssdaread 895 #define sdawrite ssdawrite 896 #define sdstrategy ssdstrategy 897 #define sdioctl ssdioctl 898 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 899 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 900 #define sd_checksum_iostart ssd_checksum_iostart 901 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 902 #define sd_pm_iostart ssd_pm_iostart 903 #define sd_core_iostart ssd_core_iostart 904 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 905 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 906 #define sd_checksum_iodone ssd_checksum_iodone 907 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 908 #define sd_pm_iodone ssd_pm_iodone 909 #define sd_initpkt_for_buf ssd_initpkt_for_buf 910 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 911 #define sd_setup_rw_pkt ssd_setup_rw_pkt 912 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 913 #define sd_buf_iodone ssd_buf_iodone 914 #define sd_uscsi_strategy ssd_uscsi_strategy 915 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 916 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 917 #define sd_uscsi_iodone ssd_uscsi_iodone 918 #define sd_xbuf_strategy ssd_xbuf_strategy 919 #define sd_xbuf_init ssd_xbuf_init 920 #define sd_pm_entry ssd_pm_entry 921 #define sd_pm_exit ssd_pm_exit 922 923 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 924 #define sd_pm_timeout_handler ssd_pm_timeout_handler 925 926 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 927 #define sdintr ssdintr 928 #define sd_start_cmds ssd_start_cmds 929 #define sd_send_scsi_cmd ssd_send_scsi_cmd 930 #define sd_bioclone_alloc ssd_bioclone_alloc 931 #define sd_bioclone_free ssd_bioclone_free 932 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 933 #define sd_shadow_buf_free ssd_shadow_buf_free 934 #define sd_print_transport_rejected_message \ 935 ssd_print_transport_rejected_message 936 #define sd_retry_command ssd_retry_command 937 #define sd_set_retry_bp ssd_set_retry_bp 938 #define sd_send_request_sense_command ssd_send_request_sense_command 939 #define sd_start_retry_command ssd_start_retry_command 940 #define sd_start_direct_priority_command \ 941 ssd_start_direct_priority_command 942 #define sd_return_failed_command ssd_return_failed_command 943 #define sd_return_failed_command_no_restart \ 944 ssd_return_failed_command_no_restart 945 #define sd_return_command ssd_return_command 946 #define sd_sync_with_callback ssd_sync_with_callback 947 #define sdrunout ssdrunout 948 #define sd_mark_rqs_busy ssd_mark_rqs_busy 949 #define sd_mark_rqs_idle ssd_mark_rqs_idle 950 #define sd_reduce_throttle ssd_reduce_throttle 951 #define sd_restore_throttle ssd_restore_throttle 952 #define sd_print_incomplete_msg ssd_print_incomplete_msg 953 #define sd_init_cdb_limits ssd_init_cdb_limits 954 #define sd_pkt_status_good ssd_pkt_status_good 955 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 956 #define sd_pkt_status_busy ssd_pkt_status_busy 957 #define sd_pkt_status_reservation_conflict \ 958 ssd_pkt_status_reservation_conflict 959 #define sd_pkt_status_qfull ssd_pkt_status_qfull 960 #define sd_handle_request_sense ssd_handle_request_sense 961 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 962 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 963 #define sd_validate_sense_data ssd_validate_sense_data 964 #define sd_decode_sense ssd_decode_sense 965 #define sd_print_sense_msg ssd_print_sense_msg 966 #define sd_sense_key_no_sense ssd_sense_key_no_sense 967 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 968 #define sd_sense_key_not_ready ssd_sense_key_not_ready 969 #define sd_sense_key_medium_or_hardware_error \ 970 ssd_sense_key_medium_or_hardware_error 971 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 972 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 973 #define sd_sense_key_fail_command ssd_sense_key_fail_command 974 #define sd_sense_key_blank_check ssd_sense_key_blank_check 975 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 976 #define sd_sense_key_default ssd_sense_key_default 977 #define sd_print_retry_msg ssd_print_retry_msg 978 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 979 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 980 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 981 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 982 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 983 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 984 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 985 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 986 #define sd_pkt_reason_default ssd_pkt_reason_default 987 #define sd_reset_target ssd_reset_target 988 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 989 #define sd_start_stop_unit_task ssd_start_stop_unit_task 990 #define sd_taskq_create ssd_taskq_create 991 #define sd_taskq_delete ssd_taskq_delete 992 #define sd_target_change_task ssd_target_change_task 993 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 994 #define sd_media_change_task ssd_media_change_task 995 #define sd_handle_mchange ssd_handle_mchange 996 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 997 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 998 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 999 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1000 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1001 sd_send_scsi_feature_GET_CONFIGURATION 1002 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1003 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1004 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1005 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1006 ssd_send_scsi_PERSISTENT_RESERVE_IN 1007 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1008 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1009 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1010 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1011 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1012 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1013 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1014 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1015 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1016 #define sd_alloc_rqs ssd_alloc_rqs 1017 #define sd_free_rqs ssd_free_rqs 1018 #define sd_dump_memory ssd_dump_memory 1019 #define sd_get_media_info ssd_get_media_info 1020 #define sd_get_media_info_ext ssd_get_media_info_ext 1021 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1022 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1023 #define sd_strtok_r ssd_strtok_r 1024 #define sd_set_properties ssd_set_properties 1025 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1026 #define sd_setup_next_xfer ssd_setup_next_xfer 1027 #define sd_dkio_get_temp ssd_dkio_get_temp 1028 #define sd_check_mhd ssd_check_mhd 1029 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1030 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1031 #define sd_sname ssd_sname 1032 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1033 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1034 #define sd_take_ownership ssd_take_ownership 1035 #define sd_reserve_release ssd_reserve_release 1036 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1037 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1038 #define sd_persistent_reservation_in_read_keys \ 1039 ssd_persistent_reservation_in_read_keys 1040 #define sd_persistent_reservation_in_read_resv \ 1041 ssd_persistent_reservation_in_read_resv 1042 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1043 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1044 #define sd_mhdioc_release ssd_mhdioc_release 1045 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1046 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1047 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1048 #define sr_change_blkmode ssr_change_blkmode 1049 #define sr_change_speed ssr_change_speed 1050 #define sr_atapi_change_speed ssr_atapi_change_speed 1051 #define sr_pause_resume ssr_pause_resume 1052 #define sr_play_msf ssr_play_msf 1053 #define sr_play_trkind ssr_play_trkind 1054 #define sr_read_all_subcodes ssr_read_all_subcodes 1055 #define sr_read_subchannel ssr_read_subchannel 1056 #define sr_read_tocentry ssr_read_tocentry 1057 #define sr_read_tochdr ssr_read_tochdr 1058 #define sr_read_cdda ssr_read_cdda 1059 #define sr_read_cdxa ssr_read_cdxa 1060 #define sr_read_mode1 ssr_read_mode1 1061 #define sr_read_mode2 ssr_read_mode2 1062 #define sr_read_cd_mode2 ssr_read_cd_mode2 1063 #define sr_sector_mode ssr_sector_mode 1064 #define sr_eject ssr_eject 1065 #define sr_ejected ssr_ejected 1066 #define sr_check_wp ssr_check_wp 1067 #define sd_check_media ssd_check_media 1068 #define sd_media_watch_cb ssd_media_watch_cb 1069 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1070 #define sr_volume_ctrl ssr_volume_ctrl 1071 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1072 #define sd_log_page_supported ssd_log_page_supported 1073 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1074 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1075 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1076 #define sd_range_lock ssd_range_lock 1077 #define sd_get_range ssd_get_range 1078 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1079 #define sd_range_unlock ssd_range_unlock 1080 #define sd_read_modify_write_task ssd_read_modify_write_task 1081 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1082 1083 #define sd_iostart_chain ssd_iostart_chain 1084 #define sd_iodone_chain ssd_iodone_chain 1085 #define sd_initpkt_map ssd_initpkt_map 1086 #define sd_destroypkt_map ssd_destroypkt_map 1087 #define sd_chain_type_map ssd_chain_type_map 1088 #define sd_chain_index_map ssd_chain_index_map 1089 1090 #define sd_failfast_flushctl ssd_failfast_flushctl 1091 #define sd_failfast_flushq ssd_failfast_flushq 1092 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1093 1094 #define sd_is_lsi ssd_is_lsi 1095 #define sd_tg_rdwr ssd_tg_rdwr 1096 #define sd_tg_getinfo ssd_tg_getinfo 1097 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1098 1099 #endif /* #if (defined(__fibre)) */ 1100 1101 1102 int _init(void); 1103 int _fini(void); 1104 int _info(struct modinfo *modinfop); 1105 1106 /*PRINTFLIKE3*/ 1107 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1108 /*PRINTFLIKE3*/ 1109 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1110 /*PRINTFLIKE3*/ 1111 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1112 1113 static int sdprobe(dev_info_t *devi); 1114 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1115 void **result); 1116 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1117 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1118 1119 /* 1120 * Smart probe for parallel scsi 1121 */ 1122 static void sd_scsi_probe_cache_init(void); 1123 static void sd_scsi_probe_cache_fini(void); 1124 static void sd_scsi_clear_probe_cache(void); 1125 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1126 1127 /* 1128 * Attached luns on target for parallel scsi 1129 */ 1130 static void sd_scsi_target_lun_init(void); 1131 static void sd_scsi_target_lun_fini(void); 1132 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1133 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1134 1135 static int sd_spin_up_unit(sd_ssc_t *ssc); 1136 1137 /* 1138 * Using sd_ssc_init to establish sd_ssc_t struct 1139 * Using sd_ssc_send to send uscsi internal command 1140 * Using sd_ssc_fini to free sd_ssc_t struct 1141 */ 1142 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1143 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1144 int flag, enum uio_seg dataspace, int path_flag); 1145 static void sd_ssc_fini(sd_ssc_t *ssc); 1146 1147 /* 1148 * Using sd_ssc_assessment to set correct type-of-assessment 1149 * Using sd_ssc_post to post ereport & system log 1150 * sd_ssc_post will call sd_ssc_print to print system log 1151 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1152 */ 1153 static void sd_ssc_assessment(sd_ssc_t *ssc, 1154 enum sd_type_assessment tp_assess); 1155 1156 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1157 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1158 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1159 enum sd_driver_assessment drv_assess); 1160 1161 /* 1162 * Using sd_ssc_set_info to mark an un-decodable-data error. 1163 * Using sd_ssc_extract_info to transfer information from internal 1164 * data structures to sd_ssc_t. 1165 */ 1166 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1167 const char *fmt, ...); 1168 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1169 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1170 1171 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1172 enum uio_seg dataspace, int path_flag); 1173 1174 #ifdef _LP64 1175 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1176 static void sd_reenable_dsense_task(void *arg); 1177 #endif /* _LP64 */ 1178 1179 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1180 1181 static void sd_read_unit_properties(struct sd_lun *un); 1182 static int sd_process_sdconf_file(struct sd_lun *un); 1183 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1184 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1185 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1186 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1187 int *data_list, sd_tunables *values); 1188 static void sd_process_sdconf_table(struct sd_lun *un); 1189 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1190 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1191 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1192 int list_len, char *dataname_ptr); 1193 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1194 sd_tunables *prop_list); 1195 1196 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1197 int reservation_flag); 1198 static int sd_get_devid(sd_ssc_t *ssc); 1199 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1200 static int sd_write_deviceid(sd_ssc_t *ssc); 1201 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1202 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1203 1204 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1205 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1206 1207 static int sd_ddi_suspend(dev_info_t *devi); 1208 static int sd_ddi_pm_suspend(struct sd_lun *un); 1209 static int sd_ddi_resume(dev_info_t *devi); 1210 static int sd_ddi_pm_resume(struct sd_lun *un); 1211 static int sdpower(dev_info_t *devi, int component, int level); 1212 1213 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1214 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1215 static int sd_unit_attach(dev_info_t *devi); 1216 static int sd_unit_detach(dev_info_t *devi); 1217 1218 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1219 static void sd_create_errstats(struct sd_lun *un, int instance); 1220 static void sd_set_errstats(struct sd_lun *un); 1221 static void sd_set_pstats(struct sd_lun *un); 1222 1223 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1224 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1225 static int sd_send_polled_RQS(struct sd_lun *un); 1226 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1227 1228 #if (defined(__fibre)) 1229 /* 1230 * Event callbacks (photon) 1231 */ 1232 static void sd_init_event_callbacks(struct sd_lun *un); 1233 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1234 #endif 1235 1236 /* 1237 * Defines for sd_cache_control 1238 */ 1239 1240 #define SD_CACHE_ENABLE 1 1241 #define SD_CACHE_DISABLE 0 1242 #define SD_CACHE_NOCHANGE -1 1243 1244 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1245 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1246 static void sd_get_nv_sup(sd_ssc_t *ssc); 1247 static dev_t sd_make_device(dev_info_t *devi); 1248 1249 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1250 uint64_t capacity); 1251 1252 /* 1253 * Driver entry point functions. 1254 */ 1255 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1256 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1257 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1258 1259 static void sdmin(struct buf *bp); 1260 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1261 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1262 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1263 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1264 1265 static int sdstrategy(struct buf *bp); 1266 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1267 1268 /* 1269 * Function prototypes for layering functions in the iostart chain. 1270 */ 1271 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1274 struct buf *bp); 1275 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1276 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1277 struct buf *bp); 1278 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1279 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1280 1281 /* 1282 * Function prototypes for layering functions in the iodone chain. 1283 */ 1284 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1285 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1289 struct buf *bp); 1290 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1291 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1292 struct buf *bp); 1293 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1294 1295 /* 1296 * Prototypes for functions to support buf(9S) based IO. 1297 */ 1298 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1299 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1300 static void sd_destroypkt_for_buf(struct buf *); 1301 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1302 struct buf *bp, int flags, 1303 int (*callback)(caddr_t), caddr_t callback_arg, 1304 diskaddr_t lba, uint32_t blockcount); 1305 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1306 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1307 1308 /* 1309 * Prototypes for functions to support USCSI IO. 1310 */ 1311 static int sd_uscsi_strategy(struct buf *bp); 1312 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1313 static void sd_destroypkt_for_uscsi(struct buf *); 1314 1315 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1316 uchar_t chain_type, void *pktinfop); 1317 1318 static int sd_pm_entry(struct sd_lun *un); 1319 static void sd_pm_exit(struct sd_lun *un); 1320 1321 static void sd_pm_idletimeout_handler(void *arg); 1322 1323 /* 1324 * sd_core internal functions (used at the sd_core_io layer). 1325 */ 1326 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1327 static void sdintr(struct scsi_pkt *pktp); 1328 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1329 1330 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1331 enum uio_seg dataspace, int path_flag); 1332 1333 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1334 daddr_t blkno, int (*func)(struct buf *)); 1335 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1336 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1337 static void sd_bioclone_free(struct buf *bp); 1338 static void sd_shadow_buf_free(struct buf *bp); 1339 1340 static void sd_print_transport_rejected_message(struct sd_lun *un, 1341 struct sd_xbuf *xp, int code); 1342 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1343 void *arg, int code); 1344 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1345 void *arg, int code); 1346 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1347 void *arg, int code); 1348 1349 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1350 int retry_check_flag, 1351 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1352 int c), 1353 void *user_arg, int failure_code, clock_t retry_delay, 1354 void (*statp)(kstat_io_t *)); 1355 1356 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1357 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1358 1359 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1360 struct scsi_pkt *pktp); 1361 static void sd_start_retry_command(void *arg); 1362 static void sd_start_direct_priority_command(void *arg); 1363 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1364 int errcode); 1365 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1366 struct buf *bp, int errcode); 1367 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1368 static void sd_sync_with_callback(struct sd_lun *un); 1369 static int sdrunout(caddr_t arg); 1370 1371 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1372 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1373 1374 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1375 static void sd_restore_throttle(void *arg); 1376 1377 static void sd_init_cdb_limits(struct sd_lun *un); 1378 1379 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1380 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1381 1382 /* 1383 * Error handling functions 1384 */ 1385 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1388 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1389 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1390 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 1394 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1395 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1396 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1397 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1398 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1399 struct sd_xbuf *xp, size_t actual_len); 1400 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1401 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1402 1403 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1404 void *arg, int code); 1405 1406 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1407 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1408 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1409 uint8_t *sense_datap, 1410 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1411 static void sd_sense_key_not_ready(struct sd_lun *un, 1412 uint8_t *sense_datap, 1413 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_unit_attention(struct sd_lun *un, 1420 uint8_t *sense_datap, 1421 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1422 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1423 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1424 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1425 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1427 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1428 static void sd_sense_key_default(struct sd_lun *un, 1429 uint8_t *sense_datap, 1430 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1431 1432 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1433 void *arg, int flag); 1434 1435 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1436 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1437 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1438 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1439 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1441 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1442 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1443 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1444 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1445 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1446 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1447 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1448 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1449 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1450 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1451 1452 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1453 1454 static void sd_start_stop_unit_callback(void *arg); 1455 static void sd_start_stop_unit_task(void *arg); 1456 1457 static void sd_taskq_create(void); 1458 static void sd_taskq_delete(void); 1459 static void sd_target_change_task(void *arg); 1460 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1461 static void sd_media_change_task(void *arg); 1462 1463 static int sd_handle_mchange(struct sd_lun *un); 1464 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1465 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1466 uint32_t *lbap, int path_flag); 1467 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1468 uint32_t *lbap, uint32_t *psp, int path_flag); 1469 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1470 int path_flag); 1471 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1472 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1473 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1474 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1475 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1476 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1477 uchar_t usr_cmd, uchar_t *usr_bufp); 1478 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1479 struct dk_callback *dkc); 1480 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1481 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1482 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1483 uchar_t *bufaddr, uint_t buflen, int path_flag); 1484 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1485 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1486 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1487 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1488 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1489 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1490 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1491 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1492 size_t buflen, daddr_t start_block, int path_flag); 1493 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1494 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1495 path_flag) 1496 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1497 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1498 path_flag) 1499 1500 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1501 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1502 uint16_t param_ptr, int path_flag); 1503 1504 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1505 static void sd_free_rqs(struct sd_lun *un); 1506 1507 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1508 uchar_t *data, int len, int fmt); 1509 static void sd_panic_for_res_conflict(struct sd_lun *un); 1510 1511 /* 1512 * Disk Ioctl Function Prototypes 1513 */ 1514 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1515 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1516 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1517 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1518 1519 /* 1520 * Multi-host Ioctl Prototypes 1521 */ 1522 static int sd_check_mhd(dev_t dev, int interval); 1523 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1524 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1525 static char *sd_sname(uchar_t status); 1526 static void sd_mhd_resvd_recover(void *arg); 1527 static void sd_resv_reclaim_thread(); 1528 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1529 static int sd_reserve_release(dev_t dev, int cmd); 1530 static void sd_rmv_resv_reclaim_req(dev_t dev); 1531 static void sd_mhd_reset_notify_cb(caddr_t arg); 1532 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1533 mhioc_inkeys_t *usrp, int flag); 1534 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1535 mhioc_inresvs_t *usrp, int flag); 1536 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1537 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1538 static int sd_mhdioc_release(dev_t dev); 1539 static int sd_mhdioc_register_devid(dev_t dev); 1540 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1541 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1542 1543 /* 1544 * SCSI removable prototypes 1545 */ 1546 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1547 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1548 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1549 static int sr_pause_resume(dev_t dev, int mode); 1550 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1551 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1555 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1556 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1557 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1558 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1559 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1560 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1561 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1562 static int sr_eject(dev_t dev); 1563 static void sr_ejected(register struct sd_lun *un); 1564 static int sr_check_wp(dev_t dev); 1565 static int sd_check_media(dev_t dev, enum dkio_state state); 1566 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1567 static void sd_delayed_cv_broadcast(void *arg); 1568 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1569 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1570 1571 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1572 1573 /* 1574 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1575 */ 1576 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1577 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1578 static void sd_wm_cache_destructor(void *wm, void *un); 1579 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1580 daddr_t endb, ushort_t typ); 1581 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1582 daddr_t endb); 1583 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1584 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1585 static void sd_read_modify_write_task(void * arg); 1586 static int 1587 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1588 struct buf **bpp); 1589 1590 1591 /* 1592 * Function prototypes for failfast support. 1593 */ 1594 static void sd_failfast_flushq(struct sd_lun *un); 1595 static int sd_failfast_flushq_callback(struct buf *bp); 1596 1597 /* 1598 * Function prototypes to check for lsi devices 1599 */ 1600 static void sd_is_lsi(struct sd_lun *un); 1601 1602 /* 1603 * Function prototypes for partial DMA support 1604 */ 1605 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1606 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1607 1608 1609 /* Function prototypes for cmlb */ 1610 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1611 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1612 1613 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1614 1615 /* 1616 * For printing RMW warning message timely 1617 */ 1618 static void sd_rmw_msg_print_handler(void *arg); 1619 1620 /* 1621 * Constants for failfast support: 1622 * 1623 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1624 * failfast processing being performed. 1625 * 1626 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1627 * failfast processing on all bufs with B_FAILFAST set. 1628 */ 1629 1630 #define SD_FAILFAST_INACTIVE 0 1631 #define SD_FAILFAST_ACTIVE 1 1632 1633 /* 1634 * Bitmask to control behavior of buf(9S) flushes when a transition to 1635 * the failfast state occurs. Optional bits include: 1636 * 1637 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1638 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1639 * be flushed. 1640 * 1641 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1642 * driver, in addition to the regular wait queue. This includes the xbuf 1643 * queues. When clear, only the driver's wait queue will be flushed. 1644 */ 1645 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1646 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1647 1648 /* 1649 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1650 * to flush all queues within the driver. 1651 */ 1652 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1653 1654 1655 /* 1656 * SD Testing Fault Injection 1657 */ 1658 #ifdef SD_FAULT_INJECTION 1659 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1660 static void sd_faultinjection(struct scsi_pkt *pktp); 1661 static void sd_injection_log(char *buf, struct sd_lun *un); 1662 #endif 1663 1664 /* 1665 * Device driver ops vector 1666 */ 1667 static struct cb_ops sd_cb_ops = { 1668 sdopen, /* open */ 1669 sdclose, /* close */ 1670 sdstrategy, /* strategy */ 1671 nodev, /* print */ 1672 sddump, /* dump */ 1673 sdread, /* read */ 1674 sdwrite, /* write */ 1675 sdioctl, /* ioctl */ 1676 nodev, /* devmap */ 1677 nodev, /* mmap */ 1678 nodev, /* segmap */ 1679 nochpoll, /* poll */ 1680 sd_prop_op, /* cb_prop_op */ 1681 0, /* streamtab */ 1682 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1683 CB_REV, /* cb_rev */ 1684 sdaread, /* async I/O read entry point */ 1685 sdawrite /* async I/O write entry point */ 1686 }; 1687 1688 struct dev_ops sd_ops = { 1689 DEVO_REV, /* devo_rev, */ 1690 0, /* refcnt */ 1691 sdinfo, /* info */ 1692 nulldev, /* identify */ 1693 sdprobe, /* probe */ 1694 sdattach, /* attach */ 1695 sddetach, /* detach */ 1696 nodev, /* reset */ 1697 &sd_cb_ops, /* driver operations */ 1698 NULL, /* bus operations */ 1699 sdpower, /* power */ 1700 ddi_quiesce_not_needed, /* quiesce */ 1701 }; 1702 1703 /* 1704 * This is the loadable module wrapper. 1705 */ 1706 #include <sys/modctl.h> 1707 1708 #ifndef XPV_HVM_DRIVER 1709 static struct modldrv modldrv = { 1710 &mod_driverops, /* Type of module. This one is a driver */ 1711 SD_MODULE_NAME, /* Module name. */ 1712 &sd_ops /* driver ops */ 1713 }; 1714 1715 static struct modlinkage modlinkage = { 1716 MODREV_1, &modldrv, NULL 1717 }; 1718 1719 #else /* XPV_HVM_DRIVER */ 1720 static struct modlmisc modlmisc = { 1721 &mod_miscops, /* Type of module. This one is a misc */ 1722 "HVM " SD_MODULE_NAME, /* Module name. */ 1723 }; 1724 1725 static struct modlinkage modlinkage = { 1726 MODREV_1, &modlmisc, NULL 1727 }; 1728 1729 #endif /* XPV_HVM_DRIVER */ 1730 1731 static cmlb_tg_ops_t sd_tgops = { 1732 TG_DK_OPS_VERSION_1, 1733 sd_tg_rdwr, 1734 sd_tg_getinfo 1735 }; 1736 1737 static struct scsi_asq_key_strings sd_additional_codes[] = { 1738 0x81, 0, "Logical Unit is Reserved", 1739 0x85, 0, "Audio Address Not Valid", 1740 0xb6, 0, "Media Load Mechanism Failed", 1741 0xB9, 0, "Audio Play Operation Aborted", 1742 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1743 0x53, 2, "Medium removal prevented", 1744 0x6f, 0, "Authentication failed during key exchange", 1745 0x6f, 1, "Key not present", 1746 0x6f, 2, "Key not established", 1747 0x6f, 3, "Read without proper authentication", 1748 0x6f, 4, "Mismatched region to this logical unit", 1749 0x6f, 5, "Region reset count error", 1750 0xffff, 0x0, NULL 1751 }; 1752 1753 1754 /* 1755 * Struct for passing printing information for sense data messages 1756 */ 1757 struct sd_sense_info { 1758 int ssi_severity; 1759 int ssi_pfa_flag; 1760 }; 1761 1762 /* 1763 * Table of function pointers for iostart-side routines. Separate "chains" 1764 * of layered function calls are formed by placing the function pointers 1765 * sequentially in the desired order. Functions are called according to an 1766 * incrementing table index ordering. The last function in each chain must 1767 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1768 * in the sd_iodone_chain[] array. 1769 * 1770 * Note: It may seem more natural to organize both the iostart and iodone 1771 * functions together, into an array of structures (or some similar 1772 * organization) with a common index, rather than two separate arrays which 1773 * must be maintained in synchronization. The purpose of this division is 1774 * to achieve improved performance: individual arrays allows for more 1775 * effective cache line utilization on certain platforms. 1776 */ 1777 1778 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1779 1780 1781 static sd_chain_t sd_iostart_chain[] = { 1782 1783 /* Chain for buf IO for disk drive targets (PM enabled) */ 1784 sd_mapblockaddr_iostart, /* Index: 0 */ 1785 sd_pm_iostart, /* Index: 1 */ 1786 sd_core_iostart, /* Index: 2 */ 1787 1788 /* Chain for buf IO for disk drive targets (PM disabled) */ 1789 sd_mapblockaddr_iostart, /* Index: 3 */ 1790 sd_core_iostart, /* Index: 4 */ 1791 1792 /* 1793 * Chain for buf IO for removable-media or large sector size 1794 * disk drive targets with RMW needed (PM enabled) 1795 */ 1796 sd_mapblockaddr_iostart, /* Index: 5 */ 1797 sd_mapblocksize_iostart, /* Index: 6 */ 1798 sd_pm_iostart, /* Index: 7 */ 1799 sd_core_iostart, /* Index: 8 */ 1800 1801 /* 1802 * Chain for buf IO for removable-media or large sector size 1803 * disk drive targets with RMW needed (PM disabled) 1804 */ 1805 sd_mapblockaddr_iostart, /* Index: 9 */ 1806 sd_mapblocksize_iostart, /* Index: 10 */ 1807 sd_core_iostart, /* Index: 11 */ 1808 1809 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1810 sd_mapblockaddr_iostart, /* Index: 12 */ 1811 sd_checksum_iostart, /* Index: 13 */ 1812 sd_pm_iostart, /* Index: 14 */ 1813 sd_core_iostart, /* Index: 15 */ 1814 1815 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1816 sd_mapblockaddr_iostart, /* Index: 16 */ 1817 sd_checksum_iostart, /* Index: 17 */ 1818 sd_core_iostart, /* Index: 18 */ 1819 1820 /* Chain for USCSI commands (all targets) */ 1821 sd_pm_iostart, /* Index: 19 */ 1822 sd_core_iostart, /* Index: 20 */ 1823 1824 /* Chain for checksumming USCSI commands (all targets) */ 1825 sd_checksum_uscsi_iostart, /* Index: 21 */ 1826 sd_pm_iostart, /* Index: 22 */ 1827 sd_core_iostart, /* Index: 23 */ 1828 1829 /* Chain for "direct" USCSI commands (all targets) */ 1830 sd_core_iostart, /* Index: 24 */ 1831 1832 /* Chain for "direct priority" USCSI commands (all targets) */ 1833 sd_core_iostart, /* Index: 25 */ 1834 1835 /* 1836 * Chain for buf IO for large sector size disk drive targets 1837 * with RMW needed with checksumming (PM enabled) 1838 */ 1839 sd_mapblockaddr_iostart, /* Index: 26 */ 1840 sd_mapblocksize_iostart, /* Index: 27 */ 1841 sd_checksum_iostart, /* Index: 28 */ 1842 sd_pm_iostart, /* Index: 29 */ 1843 sd_core_iostart, /* Index: 30 */ 1844 1845 /* 1846 * Chain for buf IO for large sector size disk drive targets 1847 * with RMW needed with checksumming (PM disabled) 1848 */ 1849 sd_mapblockaddr_iostart, /* Index: 31 */ 1850 sd_mapblocksize_iostart, /* Index: 32 */ 1851 sd_checksum_iostart, /* Index: 33 */ 1852 sd_core_iostart, /* Index: 34 */ 1853 1854 }; 1855 1856 /* 1857 * Macros to locate the first function of each iostart chain in the 1858 * sd_iostart_chain[] array. These are located by the index in the array. 1859 */ 1860 #define SD_CHAIN_DISK_IOSTART 0 1861 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1862 #define SD_CHAIN_MSS_DISK_IOSTART 5 1863 #define SD_CHAIN_RMMEDIA_IOSTART 5 1864 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1865 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1866 #define SD_CHAIN_CHKSUM_IOSTART 12 1867 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1868 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1869 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1870 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1871 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1872 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1873 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1874 1875 1876 /* 1877 * Table of function pointers for the iodone-side routines for the driver- 1878 * internal layering mechanism. The calling sequence for iodone routines 1879 * uses a decrementing table index, so the last routine called in a chain 1880 * must be at the lowest array index location for that chain. The last 1881 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1882 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1883 * of the functions in an iodone side chain must correspond to the ordering 1884 * of the iostart routines for that chain. Note that there is no iodone 1885 * side routine that corresponds to sd_core_iostart(), so there is no 1886 * entry in the table for this. 1887 */ 1888 1889 static sd_chain_t sd_iodone_chain[] = { 1890 1891 /* Chain for buf IO for disk drive targets (PM enabled) */ 1892 sd_buf_iodone, /* Index: 0 */ 1893 sd_mapblockaddr_iodone, /* Index: 1 */ 1894 sd_pm_iodone, /* Index: 2 */ 1895 1896 /* Chain for buf IO for disk drive targets (PM disabled) */ 1897 sd_buf_iodone, /* Index: 3 */ 1898 sd_mapblockaddr_iodone, /* Index: 4 */ 1899 1900 /* 1901 * Chain for buf IO for removable-media or large sector size 1902 * disk drive targets with RMW needed (PM enabled) 1903 */ 1904 sd_buf_iodone, /* Index: 5 */ 1905 sd_mapblockaddr_iodone, /* Index: 6 */ 1906 sd_mapblocksize_iodone, /* Index: 7 */ 1907 sd_pm_iodone, /* Index: 8 */ 1908 1909 /* 1910 * Chain for buf IO for removable-media or large sector size 1911 * disk drive targets with RMW needed (PM disabled) 1912 */ 1913 sd_buf_iodone, /* Index: 9 */ 1914 sd_mapblockaddr_iodone, /* Index: 10 */ 1915 sd_mapblocksize_iodone, /* Index: 11 */ 1916 1917 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1918 sd_buf_iodone, /* Index: 12 */ 1919 sd_mapblockaddr_iodone, /* Index: 13 */ 1920 sd_checksum_iodone, /* Index: 14 */ 1921 sd_pm_iodone, /* Index: 15 */ 1922 1923 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1924 sd_buf_iodone, /* Index: 16 */ 1925 sd_mapblockaddr_iodone, /* Index: 17 */ 1926 sd_checksum_iodone, /* Index: 18 */ 1927 1928 /* Chain for USCSI commands (non-checksum targets) */ 1929 sd_uscsi_iodone, /* Index: 19 */ 1930 sd_pm_iodone, /* Index: 20 */ 1931 1932 /* Chain for USCSI commands (checksum targets) */ 1933 sd_uscsi_iodone, /* Index: 21 */ 1934 sd_checksum_uscsi_iodone, /* Index: 22 */ 1935 sd_pm_iodone, /* Index: 22 */ 1936 1937 /* Chain for "direct" USCSI commands (all targets) */ 1938 sd_uscsi_iodone, /* Index: 24 */ 1939 1940 /* Chain for "direct priority" USCSI commands (all targets) */ 1941 sd_uscsi_iodone, /* Index: 25 */ 1942 1943 /* 1944 * Chain for buf IO for large sector size disk drive targets 1945 * with checksumming (PM enabled) 1946 */ 1947 sd_buf_iodone, /* Index: 26 */ 1948 sd_mapblockaddr_iodone, /* Index: 27 */ 1949 sd_mapblocksize_iodone, /* Index: 28 */ 1950 sd_checksum_iodone, /* Index: 29 */ 1951 sd_pm_iodone, /* Index: 30 */ 1952 1953 /* 1954 * Chain for buf IO for large sector size disk drive targets 1955 * with checksumming (PM disabled) 1956 */ 1957 sd_buf_iodone, /* Index: 31 */ 1958 sd_mapblockaddr_iodone, /* Index: 32 */ 1959 sd_mapblocksize_iodone, /* Index: 33 */ 1960 sd_checksum_iodone, /* Index: 34 */ 1961 }; 1962 1963 1964 /* 1965 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1966 * each iodone-side chain. These are located by the array index, but as the 1967 * iodone side functions are called in a decrementing-index order, the 1968 * highest index number in each chain must be specified (as these correspond 1969 * to the first function in the iodone chain that will be called by the core 1970 * at IO completion time). 1971 */ 1972 1973 #define SD_CHAIN_DISK_IODONE 2 1974 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1975 #define SD_CHAIN_RMMEDIA_IODONE 8 1976 #define SD_CHAIN_MSS_DISK_IODONE 8 1977 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1978 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 1979 #define SD_CHAIN_CHKSUM_IODONE 15 1980 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1981 #define SD_CHAIN_USCSI_CMD_IODONE 20 1982 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1983 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1984 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1985 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 1986 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 1987 1988 1989 1990 /* 1991 * Array to map a layering chain index to the appropriate initpkt routine. 1992 * The redundant entries are present so that the index used for accessing 1993 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1994 * with this table as well. 1995 */ 1996 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1997 1998 static sd_initpkt_t sd_initpkt_map[] = { 1999 2000 /* Chain for buf IO for disk drive targets (PM enabled) */ 2001 sd_initpkt_for_buf, /* Index: 0 */ 2002 sd_initpkt_for_buf, /* Index: 1 */ 2003 sd_initpkt_for_buf, /* Index: 2 */ 2004 2005 /* Chain for buf IO for disk drive targets (PM disabled) */ 2006 sd_initpkt_for_buf, /* Index: 3 */ 2007 sd_initpkt_for_buf, /* Index: 4 */ 2008 2009 /* 2010 * Chain for buf IO for removable-media or large sector size 2011 * disk drive targets (PM enabled) 2012 */ 2013 sd_initpkt_for_buf, /* Index: 5 */ 2014 sd_initpkt_for_buf, /* Index: 6 */ 2015 sd_initpkt_for_buf, /* Index: 7 */ 2016 sd_initpkt_for_buf, /* Index: 8 */ 2017 2018 /* 2019 * Chain for buf IO for removable-media or large sector size 2020 * disk drive targets (PM disabled) 2021 */ 2022 sd_initpkt_for_buf, /* Index: 9 */ 2023 sd_initpkt_for_buf, /* Index: 10 */ 2024 sd_initpkt_for_buf, /* Index: 11 */ 2025 2026 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2027 sd_initpkt_for_buf, /* Index: 12 */ 2028 sd_initpkt_for_buf, /* Index: 13 */ 2029 sd_initpkt_for_buf, /* Index: 14 */ 2030 sd_initpkt_for_buf, /* Index: 15 */ 2031 2032 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2033 sd_initpkt_for_buf, /* Index: 16 */ 2034 sd_initpkt_for_buf, /* Index: 17 */ 2035 sd_initpkt_for_buf, /* Index: 18 */ 2036 2037 /* Chain for USCSI commands (non-checksum targets) */ 2038 sd_initpkt_for_uscsi, /* Index: 19 */ 2039 sd_initpkt_for_uscsi, /* Index: 20 */ 2040 2041 /* Chain for USCSI commands (checksum targets) */ 2042 sd_initpkt_for_uscsi, /* Index: 21 */ 2043 sd_initpkt_for_uscsi, /* Index: 22 */ 2044 sd_initpkt_for_uscsi, /* Index: 22 */ 2045 2046 /* Chain for "direct" USCSI commands (all targets) */ 2047 sd_initpkt_for_uscsi, /* Index: 24 */ 2048 2049 /* Chain for "direct priority" USCSI commands (all targets) */ 2050 sd_initpkt_for_uscsi, /* Index: 25 */ 2051 2052 /* 2053 * Chain for buf IO for large sector size disk drive targets 2054 * with checksumming (PM enabled) 2055 */ 2056 sd_initpkt_for_buf, /* Index: 26 */ 2057 sd_initpkt_for_buf, /* Index: 27 */ 2058 sd_initpkt_for_buf, /* Index: 28 */ 2059 sd_initpkt_for_buf, /* Index: 29 */ 2060 sd_initpkt_for_buf, /* Index: 30 */ 2061 2062 /* 2063 * Chain for buf IO for large sector size disk drive targets 2064 * with checksumming (PM disabled) 2065 */ 2066 sd_initpkt_for_buf, /* Index: 31 */ 2067 sd_initpkt_for_buf, /* Index: 32 */ 2068 sd_initpkt_for_buf, /* Index: 33 */ 2069 sd_initpkt_for_buf, /* Index: 34 */ 2070 }; 2071 2072 2073 /* 2074 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2075 * The redundant entries are present so that the index used for accessing 2076 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2077 * with this table as well. 2078 */ 2079 typedef void (*sd_destroypkt_t)(struct buf *); 2080 2081 static sd_destroypkt_t sd_destroypkt_map[] = { 2082 2083 /* Chain for buf IO for disk drive targets (PM enabled) */ 2084 sd_destroypkt_for_buf, /* Index: 0 */ 2085 sd_destroypkt_for_buf, /* Index: 1 */ 2086 sd_destroypkt_for_buf, /* Index: 2 */ 2087 2088 /* Chain for buf IO for disk drive targets (PM disabled) */ 2089 sd_destroypkt_for_buf, /* Index: 3 */ 2090 sd_destroypkt_for_buf, /* Index: 4 */ 2091 2092 /* 2093 * Chain for buf IO for removable-media or large sector size 2094 * disk drive targets (PM enabled) 2095 */ 2096 sd_destroypkt_for_buf, /* Index: 5 */ 2097 sd_destroypkt_for_buf, /* Index: 6 */ 2098 sd_destroypkt_for_buf, /* Index: 7 */ 2099 sd_destroypkt_for_buf, /* Index: 8 */ 2100 2101 /* 2102 * Chain for buf IO for removable-media or large sector size 2103 * disk drive targets (PM disabled) 2104 */ 2105 sd_destroypkt_for_buf, /* Index: 9 */ 2106 sd_destroypkt_for_buf, /* Index: 10 */ 2107 sd_destroypkt_for_buf, /* Index: 11 */ 2108 2109 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2110 sd_destroypkt_for_buf, /* Index: 12 */ 2111 sd_destroypkt_for_buf, /* Index: 13 */ 2112 sd_destroypkt_for_buf, /* Index: 14 */ 2113 sd_destroypkt_for_buf, /* Index: 15 */ 2114 2115 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2116 sd_destroypkt_for_buf, /* Index: 16 */ 2117 sd_destroypkt_for_buf, /* Index: 17 */ 2118 sd_destroypkt_for_buf, /* Index: 18 */ 2119 2120 /* Chain for USCSI commands (non-checksum targets) */ 2121 sd_destroypkt_for_uscsi, /* Index: 19 */ 2122 sd_destroypkt_for_uscsi, /* Index: 20 */ 2123 2124 /* Chain for USCSI commands (checksum targets) */ 2125 sd_destroypkt_for_uscsi, /* Index: 21 */ 2126 sd_destroypkt_for_uscsi, /* Index: 22 */ 2127 sd_destroypkt_for_uscsi, /* Index: 22 */ 2128 2129 /* Chain for "direct" USCSI commands (all targets) */ 2130 sd_destroypkt_for_uscsi, /* Index: 24 */ 2131 2132 /* Chain for "direct priority" USCSI commands (all targets) */ 2133 sd_destroypkt_for_uscsi, /* Index: 25 */ 2134 2135 /* 2136 * Chain for buf IO for large sector size disk drive targets 2137 * with checksumming (PM disabled) 2138 */ 2139 sd_destroypkt_for_buf, /* Index: 26 */ 2140 sd_destroypkt_for_buf, /* Index: 27 */ 2141 sd_destroypkt_for_buf, /* Index: 28 */ 2142 sd_destroypkt_for_buf, /* Index: 29 */ 2143 sd_destroypkt_for_buf, /* Index: 30 */ 2144 2145 /* 2146 * Chain for buf IO for large sector size disk drive targets 2147 * with checksumming (PM enabled) 2148 */ 2149 sd_destroypkt_for_buf, /* Index: 31 */ 2150 sd_destroypkt_for_buf, /* Index: 32 */ 2151 sd_destroypkt_for_buf, /* Index: 33 */ 2152 sd_destroypkt_for_buf, /* Index: 34 */ 2153 }; 2154 2155 2156 2157 /* 2158 * Array to map a layering chain index to the appropriate chain "type". 2159 * The chain type indicates a specific property/usage of the chain. 2160 * The redundant entries are present so that the index used for accessing 2161 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2162 * with this table as well. 2163 */ 2164 2165 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2166 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2167 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2168 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2169 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2170 /* (for error recovery) */ 2171 2172 static int sd_chain_type_map[] = { 2173 2174 /* Chain for buf IO for disk drive targets (PM enabled) */ 2175 SD_CHAIN_BUFIO, /* Index: 0 */ 2176 SD_CHAIN_BUFIO, /* Index: 1 */ 2177 SD_CHAIN_BUFIO, /* Index: 2 */ 2178 2179 /* Chain for buf IO for disk drive targets (PM disabled) */ 2180 SD_CHAIN_BUFIO, /* Index: 3 */ 2181 SD_CHAIN_BUFIO, /* Index: 4 */ 2182 2183 /* 2184 * Chain for buf IO for removable-media or large sector size 2185 * disk drive targets (PM enabled) 2186 */ 2187 SD_CHAIN_BUFIO, /* Index: 5 */ 2188 SD_CHAIN_BUFIO, /* Index: 6 */ 2189 SD_CHAIN_BUFIO, /* Index: 7 */ 2190 SD_CHAIN_BUFIO, /* Index: 8 */ 2191 2192 /* 2193 * Chain for buf IO for removable-media or large sector size 2194 * disk drive targets (PM disabled) 2195 */ 2196 SD_CHAIN_BUFIO, /* Index: 9 */ 2197 SD_CHAIN_BUFIO, /* Index: 10 */ 2198 SD_CHAIN_BUFIO, /* Index: 11 */ 2199 2200 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2201 SD_CHAIN_BUFIO, /* Index: 12 */ 2202 SD_CHAIN_BUFIO, /* Index: 13 */ 2203 SD_CHAIN_BUFIO, /* Index: 14 */ 2204 SD_CHAIN_BUFIO, /* Index: 15 */ 2205 2206 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2207 SD_CHAIN_BUFIO, /* Index: 16 */ 2208 SD_CHAIN_BUFIO, /* Index: 17 */ 2209 SD_CHAIN_BUFIO, /* Index: 18 */ 2210 2211 /* Chain for USCSI commands (non-checksum targets) */ 2212 SD_CHAIN_USCSI, /* Index: 19 */ 2213 SD_CHAIN_USCSI, /* Index: 20 */ 2214 2215 /* Chain for USCSI commands (checksum targets) */ 2216 SD_CHAIN_USCSI, /* Index: 21 */ 2217 SD_CHAIN_USCSI, /* Index: 22 */ 2218 SD_CHAIN_USCSI, /* Index: 23 */ 2219 2220 /* Chain for "direct" USCSI commands (all targets) */ 2221 SD_CHAIN_DIRECT, /* Index: 24 */ 2222 2223 /* Chain for "direct priority" USCSI commands (all targets) */ 2224 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2225 2226 /* 2227 * Chain for buf IO for large sector size disk drive targets 2228 * with checksumming (PM enabled) 2229 */ 2230 SD_CHAIN_BUFIO, /* Index: 26 */ 2231 SD_CHAIN_BUFIO, /* Index: 27 */ 2232 SD_CHAIN_BUFIO, /* Index: 28 */ 2233 SD_CHAIN_BUFIO, /* Index: 29 */ 2234 SD_CHAIN_BUFIO, /* Index: 30 */ 2235 2236 /* 2237 * Chain for buf IO for large sector size disk drive targets 2238 * with checksumming (PM disabled) 2239 */ 2240 SD_CHAIN_BUFIO, /* Index: 31 */ 2241 SD_CHAIN_BUFIO, /* Index: 32 */ 2242 SD_CHAIN_BUFIO, /* Index: 33 */ 2243 SD_CHAIN_BUFIO, /* Index: 34 */ 2244 }; 2245 2246 2247 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2248 #define SD_IS_BUFIO(xp) \ 2249 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2250 2251 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2252 #define SD_IS_DIRECT_PRIORITY(xp) \ 2253 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2254 2255 2256 2257 /* 2258 * Struct, array, and macros to map a specific chain to the appropriate 2259 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2260 * 2261 * The sd_chain_index_map[] array is used at attach time to set the various 2262 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2263 * chain to be used with the instance. This allows different instances to use 2264 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2265 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2266 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2267 * dynamically & without the use of locking; and (2) a layer may update the 2268 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2269 * to allow for deferred processing of an IO within the same chain from a 2270 * different execution context. 2271 */ 2272 2273 struct sd_chain_index { 2274 int sci_iostart_index; 2275 int sci_iodone_index; 2276 }; 2277 2278 static struct sd_chain_index sd_chain_index_map[] = { 2279 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2280 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2281 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2282 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2283 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2284 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2285 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2286 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2287 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2288 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2289 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2290 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2291 2292 }; 2293 2294 2295 /* 2296 * The following are indexes into the sd_chain_index_map[] array. 2297 */ 2298 2299 /* un->un_buf_chain_type must be set to one of these */ 2300 #define SD_CHAIN_INFO_DISK 0 2301 #define SD_CHAIN_INFO_DISK_NO_PM 1 2302 #define SD_CHAIN_INFO_RMMEDIA 2 2303 #define SD_CHAIN_INFO_MSS_DISK 2 2304 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2305 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2306 #define SD_CHAIN_INFO_CHKSUM 4 2307 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2308 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2309 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2310 2311 /* un->un_uscsi_chain_type must be set to one of these */ 2312 #define SD_CHAIN_INFO_USCSI_CMD 6 2313 /* USCSI with PM disabled is the same as DIRECT */ 2314 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2315 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2316 2317 /* un->un_direct_chain_type must be set to one of these */ 2318 #define SD_CHAIN_INFO_DIRECT_CMD 8 2319 2320 /* un->un_priority_chain_type must be set to one of these */ 2321 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2322 2323 /* size for devid inquiries */ 2324 #define MAX_INQUIRY_SIZE 0xF0 2325 2326 /* 2327 * Macros used by functions to pass a given buf(9S) struct along to the 2328 * next function in the layering chain for further processing. 2329 * 2330 * In the following macros, passing more than three arguments to the called 2331 * routines causes the optimizer for the SPARC compiler to stop doing tail 2332 * call elimination which results in significant performance degradation. 2333 */ 2334 #define SD_BEGIN_IOSTART(index, un, bp) \ 2335 ((*(sd_iostart_chain[index]))(index, un, bp)) 2336 2337 #define SD_BEGIN_IODONE(index, un, bp) \ 2338 ((*(sd_iodone_chain[index]))(index, un, bp)) 2339 2340 #define SD_NEXT_IOSTART(index, un, bp) \ 2341 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2342 2343 #define SD_NEXT_IODONE(index, un, bp) \ 2344 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2345 2346 /* 2347 * Function: _init 2348 * 2349 * Description: This is the driver _init(9E) entry point. 2350 * 2351 * Return Code: Returns the value from mod_install(9F) or 2352 * ddi_soft_state_init(9F) as appropriate. 2353 * 2354 * Context: Called when driver module loaded. 2355 */ 2356 2357 int 2358 _init(void) 2359 { 2360 int err; 2361 2362 /* establish driver name from module name */ 2363 sd_label = (char *)mod_modname(&modlinkage); 2364 2365 #ifndef XPV_HVM_DRIVER 2366 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2367 SD_MAXUNIT); 2368 if (err != 0) { 2369 return (err); 2370 } 2371 2372 #else /* XPV_HVM_DRIVER */ 2373 /* Remove the leading "hvm_" from the module name */ 2374 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2375 sd_label += strlen("hvm_"); 2376 2377 #endif /* XPV_HVM_DRIVER */ 2378 2379 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2380 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2381 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2382 2383 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2384 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2385 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2386 2387 /* 2388 * it's ok to init here even for fibre device 2389 */ 2390 sd_scsi_probe_cache_init(); 2391 2392 sd_scsi_target_lun_init(); 2393 2394 /* 2395 * Creating taskq before mod_install ensures that all callers (threads) 2396 * that enter the module after a successful mod_install encounter 2397 * a valid taskq. 2398 */ 2399 sd_taskq_create(); 2400 2401 err = mod_install(&modlinkage); 2402 if (err != 0) { 2403 /* delete taskq if install fails */ 2404 sd_taskq_delete(); 2405 2406 mutex_destroy(&sd_detach_mutex); 2407 mutex_destroy(&sd_log_mutex); 2408 mutex_destroy(&sd_label_mutex); 2409 2410 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2411 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2412 cv_destroy(&sd_tr.srq_inprocess_cv); 2413 2414 sd_scsi_probe_cache_fini(); 2415 2416 sd_scsi_target_lun_fini(); 2417 2418 #ifndef XPV_HVM_DRIVER 2419 ddi_soft_state_fini(&sd_state); 2420 #endif /* !XPV_HVM_DRIVER */ 2421 return (err); 2422 } 2423 2424 return (err); 2425 } 2426 2427 2428 /* 2429 * Function: _fini 2430 * 2431 * Description: This is the driver _fini(9E) entry point. 2432 * 2433 * Return Code: Returns the value from mod_remove(9F) 2434 * 2435 * Context: Called when driver module is unloaded. 2436 */ 2437 2438 int 2439 _fini(void) 2440 { 2441 int err; 2442 2443 if ((err = mod_remove(&modlinkage)) != 0) { 2444 return (err); 2445 } 2446 2447 sd_taskq_delete(); 2448 2449 mutex_destroy(&sd_detach_mutex); 2450 mutex_destroy(&sd_log_mutex); 2451 mutex_destroy(&sd_label_mutex); 2452 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2453 2454 sd_scsi_probe_cache_fini(); 2455 2456 sd_scsi_target_lun_fini(); 2457 2458 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2459 cv_destroy(&sd_tr.srq_inprocess_cv); 2460 2461 #ifndef XPV_HVM_DRIVER 2462 ddi_soft_state_fini(&sd_state); 2463 #endif /* !XPV_HVM_DRIVER */ 2464 2465 return (err); 2466 } 2467 2468 2469 /* 2470 * Function: _info 2471 * 2472 * Description: This is the driver _info(9E) entry point. 2473 * 2474 * Arguments: modinfop - pointer to the driver modinfo structure 2475 * 2476 * Return Code: Returns the value from mod_info(9F). 2477 * 2478 * Context: Kernel thread context 2479 */ 2480 2481 int 2482 _info(struct modinfo *modinfop) 2483 { 2484 return (mod_info(&modlinkage, modinfop)); 2485 } 2486 2487 2488 /* 2489 * The following routines implement the driver message logging facility. 2490 * They provide component- and level- based debug output filtering. 2491 * Output may also be restricted to messages for a single instance by 2492 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2493 * to NULL, then messages for all instances are printed. 2494 * 2495 * These routines have been cloned from each other due to the language 2496 * constraints of macros and variable argument list processing. 2497 */ 2498 2499 2500 /* 2501 * Function: sd_log_err 2502 * 2503 * Description: This routine is called by the SD_ERROR macro for debug 2504 * logging of error conditions. 2505 * 2506 * Arguments: comp - driver component being logged 2507 * dev - pointer to driver info structure 2508 * fmt - error string and format to be logged 2509 */ 2510 2511 static void 2512 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2513 { 2514 va_list ap; 2515 dev_info_t *dev; 2516 2517 ASSERT(un != NULL); 2518 dev = SD_DEVINFO(un); 2519 ASSERT(dev != NULL); 2520 2521 /* 2522 * Filter messages based on the global component and level masks. 2523 * Also print if un matches the value of sd_debug_un, or if 2524 * sd_debug_un is set to NULL. 2525 */ 2526 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2527 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2528 mutex_enter(&sd_log_mutex); 2529 va_start(ap, fmt); 2530 (void) vsprintf(sd_log_buf, fmt, ap); 2531 va_end(ap); 2532 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2533 mutex_exit(&sd_log_mutex); 2534 } 2535 #ifdef SD_FAULT_INJECTION 2536 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2537 if (un->sd_injection_mask & comp) { 2538 mutex_enter(&sd_log_mutex); 2539 va_start(ap, fmt); 2540 (void) vsprintf(sd_log_buf, fmt, ap); 2541 va_end(ap); 2542 sd_injection_log(sd_log_buf, un); 2543 mutex_exit(&sd_log_mutex); 2544 } 2545 #endif 2546 } 2547 2548 2549 /* 2550 * Function: sd_log_info 2551 * 2552 * Description: This routine is called by the SD_INFO macro for debug 2553 * logging of general purpose informational conditions. 2554 * 2555 * Arguments: comp - driver component being logged 2556 * dev - pointer to driver info structure 2557 * fmt - info string and format to be logged 2558 */ 2559 2560 static void 2561 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2562 { 2563 va_list ap; 2564 dev_info_t *dev; 2565 2566 ASSERT(un != NULL); 2567 dev = SD_DEVINFO(un); 2568 ASSERT(dev != NULL); 2569 2570 /* 2571 * Filter messages based on the global component and level masks. 2572 * Also print if un matches the value of sd_debug_un, or if 2573 * sd_debug_un is set to NULL. 2574 */ 2575 if ((sd_component_mask & component) && 2576 (sd_level_mask & SD_LOGMASK_INFO) && 2577 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2578 mutex_enter(&sd_log_mutex); 2579 va_start(ap, fmt); 2580 (void) vsprintf(sd_log_buf, fmt, ap); 2581 va_end(ap); 2582 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2583 mutex_exit(&sd_log_mutex); 2584 } 2585 #ifdef SD_FAULT_INJECTION 2586 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2587 if (un->sd_injection_mask & component) { 2588 mutex_enter(&sd_log_mutex); 2589 va_start(ap, fmt); 2590 (void) vsprintf(sd_log_buf, fmt, ap); 2591 va_end(ap); 2592 sd_injection_log(sd_log_buf, un); 2593 mutex_exit(&sd_log_mutex); 2594 } 2595 #endif 2596 } 2597 2598 2599 /* 2600 * Function: sd_log_trace 2601 * 2602 * Description: This routine is called by the SD_TRACE macro for debug 2603 * logging of trace conditions (i.e. function entry/exit). 2604 * 2605 * Arguments: comp - driver component being logged 2606 * dev - pointer to driver info structure 2607 * fmt - trace string and format to be logged 2608 */ 2609 2610 static void 2611 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2612 { 2613 va_list ap; 2614 dev_info_t *dev; 2615 2616 ASSERT(un != NULL); 2617 dev = SD_DEVINFO(un); 2618 ASSERT(dev != NULL); 2619 2620 /* 2621 * Filter messages based on the global component and level masks. 2622 * Also print if un matches the value of sd_debug_un, or if 2623 * sd_debug_un is set to NULL. 2624 */ 2625 if ((sd_component_mask & component) && 2626 (sd_level_mask & SD_LOGMASK_TRACE) && 2627 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2628 mutex_enter(&sd_log_mutex); 2629 va_start(ap, fmt); 2630 (void) vsprintf(sd_log_buf, fmt, ap); 2631 va_end(ap); 2632 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2633 mutex_exit(&sd_log_mutex); 2634 } 2635 #ifdef SD_FAULT_INJECTION 2636 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2637 if (un->sd_injection_mask & component) { 2638 mutex_enter(&sd_log_mutex); 2639 va_start(ap, fmt); 2640 (void) vsprintf(sd_log_buf, fmt, ap); 2641 va_end(ap); 2642 sd_injection_log(sd_log_buf, un); 2643 mutex_exit(&sd_log_mutex); 2644 } 2645 #endif 2646 } 2647 2648 2649 /* 2650 * Function: sdprobe 2651 * 2652 * Description: This is the driver probe(9e) entry point function. 2653 * 2654 * Arguments: devi - opaque device info handle 2655 * 2656 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2657 * DDI_PROBE_FAILURE: If the probe failed. 2658 * DDI_PROBE_PARTIAL: If the instance is not present now, 2659 * but may be present in the future. 2660 */ 2661 2662 static int 2663 sdprobe(dev_info_t *devi) 2664 { 2665 struct scsi_device *devp; 2666 int rval; 2667 #ifndef XPV_HVM_DRIVER 2668 int instance = ddi_get_instance(devi); 2669 #endif /* !XPV_HVM_DRIVER */ 2670 2671 /* 2672 * if it wasn't for pln, sdprobe could actually be nulldev 2673 * in the "__fibre" case. 2674 */ 2675 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2676 return (DDI_PROBE_DONTCARE); 2677 } 2678 2679 devp = ddi_get_driver_private(devi); 2680 2681 if (devp == NULL) { 2682 /* Ooops... nexus driver is mis-configured... */ 2683 return (DDI_PROBE_FAILURE); 2684 } 2685 2686 #ifndef XPV_HVM_DRIVER 2687 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2688 return (DDI_PROBE_PARTIAL); 2689 } 2690 #endif /* !XPV_HVM_DRIVER */ 2691 2692 /* 2693 * Call the SCSA utility probe routine to see if we actually 2694 * have a target at this SCSI nexus. 2695 */ 2696 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2697 case SCSIPROBE_EXISTS: 2698 switch (devp->sd_inq->inq_dtype) { 2699 case DTYPE_DIRECT: 2700 rval = DDI_PROBE_SUCCESS; 2701 break; 2702 case DTYPE_RODIRECT: 2703 /* CDs etc. Can be removable media */ 2704 rval = DDI_PROBE_SUCCESS; 2705 break; 2706 case DTYPE_OPTICAL: 2707 /* 2708 * Rewritable optical driver HP115AA 2709 * Can also be removable media 2710 */ 2711 2712 /* 2713 * Do not attempt to bind to DTYPE_OPTICAL if 2714 * pre solaris 9 sparc sd behavior is required 2715 * 2716 * If first time through and sd_dtype_optical_bind 2717 * has not been set in /etc/system check properties 2718 */ 2719 2720 if (sd_dtype_optical_bind < 0) { 2721 sd_dtype_optical_bind = ddi_prop_get_int 2722 (DDI_DEV_T_ANY, devi, 0, 2723 "optical-device-bind", 1); 2724 } 2725 2726 if (sd_dtype_optical_bind == 0) { 2727 rval = DDI_PROBE_FAILURE; 2728 } else { 2729 rval = DDI_PROBE_SUCCESS; 2730 } 2731 break; 2732 2733 case DTYPE_NOTPRESENT: 2734 default: 2735 rval = DDI_PROBE_FAILURE; 2736 break; 2737 } 2738 break; 2739 default: 2740 rval = DDI_PROBE_PARTIAL; 2741 break; 2742 } 2743 2744 /* 2745 * This routine checks for resource allocation prior to freeing, 2746 * so it will take care of the "smart probing" case where a 2747 * scsi_probe() may or may not have been issued and will *not* 2748 * free previously-freed resources. 2749 */ 2750 scsi_unprobe(devp); 2751 return (rval); 2752 } 2753 2754 2755 /* 2756 * Function: sdinfo 2757 * 2758 * Description: This is the driver getinfo(9e) entry point function. 2759 * Given the device number, return the devinfo pointer from 2760 * the scsi_device structure or the instance number 2761 * associated with the dev_t. 2762 * 2763 * Arguments: dip - pointer to device info structure 2764 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2765 * DDI_INFO_DEVT2INSTANCE) 2766 * arg - driver dev_t 2767 * resultp - user buffer for request response 2768 * 2769 * Return Code: DDI_SUCCESS 2770 * DDI_FAILURE 2771 */ 2772 /* ARGSUSED */ 2773 static int 2774 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2775 { 2776 struct sd_lun *un; 2777 dev_t dev; 2778 int instance; 2779 int error; 2780 2781 switch (infocmd) { 2782 case DDI_INFO_DEVT2DEVINFO: 2783 dev = (dev_t)arg; 2784 instance = SDUNIT(dev); 2785 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2786 return (DDI_FAILURE); 2787 } 2788 *result = (void *) SD_DEVINFO(un); 2789 error = DDI_SUCCESS; 2790 break; 2791 case DDI_INFO_DEVT2INSTANCE: 2792 dev = (dev_t)arg; 2793 instance = SDUNIT(dev); 2794 *result = (void *)(uintptr_t)instance; 2795 error = DDI_SUCCESS; 2796 break; 2797 default: 2798 error = DDI_FAILURE; 2799 } 2800 return (error); 2801 } 2802 2803 /* 2804 * Function: sd_prop_op 2805 * 2806 * Description: This is the driver prop_op(9e) entry point function. 2807 * Return the number of blocks for the partition in question 2808 * or forward the request to the property facilities. 2809 * 2810 * Arguments: dev - device number 2811 * dip - pointer to device info structure 2812 * prop_op - property operator 2813 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2814 * name - pointer to property name 2815 * valuep - pointer or address of the user buffer 2816 * lengthp - property length 2817 * 2818 * Return Code: DDI_PROP_SUCCESS 2819 * DDI_PROP_NOT_FOUND 2820 * DDI_PROP_UNDEFINED 2821 * DDI_PROP_NO_MEMORY 2822 * DDI_PROP_BUF_TOO_SMALL 2823 */ 2824 2825 static int 2826 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2827 char *name, caddr_t valuep, int *lengthp) 2828 { 2829 struct sd_lun *un; 2830 2831 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2832 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2833 name, valuep, lengthp)); 2834 2835 return (cmlb_prop_op(un->un_cmlbhandle, 2836 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2837 SDPART(dev), (void *)SD_PATH_DIRECT)); 2838 } 2839 2840 /* 2841 * The following functions are for smart probing: 2842 * sd_scsi_probe_cache_init() 2843 * sd_scsi_probe_cache_fini() 2844 * sd_scsi_clear_probe_cache() 2845 * sd_scsi_probe_with_cache() 2846 */ 2847 2848 /* 2849 * Function: sd_scsi_probe_cache_init 2850 * 2851 * Description: Initializes the probe response cache mutex and head pointer. 2852 * 2853 * Context: Kernel thread context 2854 */ 2855 2856 static void 2857 sd_scsi_probe_cache_init(void) 2858 { 2859 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2860 sd_scsi_probe_cache_head = NULL; 2861 } 2862 2863 2864 /* 2865 * Function: sd_scsi_probe_cache_fini 2866 * 2867 * Description: Frees all resources associated with the probe response cache. 2868 * 2869 * Context: Kernel thread context 2870 */ 2871 2872 static void 2873 sd_scsi_probe_cache_fini(void) 2874 { 2875 struct sd_scsi_probe_cache *cp; 2876 struct sd_scsi_probe_cache *ncp; 2877 2878 /* Clean up our smart probing linked list */ 2879 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2880 ncp = cp->next; 2881 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2882 } 2883 sd_scsi_probe_cache_head = NULL; 2884 mutex_destroy(&sd_scsi_probe_cache_mutex); 2885 } 2886 2887 2888 /* 2889 * Function: sd_scsi_clear_probe_cache 2890 * 2891 * Description: This routine clears the probe response cache. This is 2892 * done when open() returns ENXIO so that when deferred 2893 * attach is attempted (possibly after a device has been 2894 * turned on) we will retry the probe. Since we don't know 2895 * which target we failed to open, we just clear the 2896 * entire cache. 2897 * 2898 * Context: Kernel thread context 2899 */ 2900 2901 static void 2902 sd_scsi_clear_probe_cache(void) 2903 { 2904 struct sd_scsi_probe_cache *cp; 2905 int i; 2906 2907 mutex_enter(&sd_scsi_probe_cache_mutex); 2908 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2909 /* 2910 * Reset all entries to SCSIPROBE_EXISTS. This will 2911 * force probing to be performed the next time 2912 * sd_scsi_probe_with_cache is called. 2913 */ 2914 for (i = 0; i < NTARGETS_WIDE; i++) { 2915 cp->cache[i] = SCSIPROBE_EXISTS; 2916 } 2917 } 2918 mutex_exit(&sd_scsi_probe_cache_mutex); 2919 } 2920 2921 2922 /* 2923 * Function: sd_scsi_probe_with_cache 2924 * 2925 * Description: This routine implements support for a scsi device probe 2926 * with cache. The driver maintains a cache of the target 2927 * responses to scsi probes. If we get no response from a 2928 * target during a probe inquiry, we remember that, and we 2929 * avoid additional calls to scsi_probe on non-zero LUNs 2930 * on the same target until the cache is cleared. By doing 2931 * so we avoid the 1/4 sec selection timeout for nonzero 2932 * LUNs. lun0 of a target is always probed. 2933 * 2934 * Arguments: devp - Pointer to a scsi_device(9S) structure 2935 * waitfunc - indicates what the allocator routines should 2936 * do when resources are not available. This value 2937 * is passed on to scsi_probe() when that routine 2938 * is called. 2939 * 2940 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2941 * otherwise the value returned by scsi_probe(9F). 2942 * 2943 * Context: Kernel thread context 2944 */ 2945 2946 static int 2947 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2948 { 2949 struct sd_scsi_probe_cache *cp; 2950 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2951 int lun, tgt; 2952 2953 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2954 SCSI_ADDR_PROP_LUN, 0); 2955 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2956 SCSI_ADDR_PROP_TARGET, -1); 2957 2958 /* Make sure caching enabled and target in range */ 2959 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2960 /* do it the old way (no cache) */ 2961 return (scsi_probe(devp, waitfn)); 2962 } 2963 2964 mutex_enter(&sd_scsi_probe_cache_mutex); 2965 2966 /* Find the cache for this scsi bus instance */ 2967 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2968 if (cp->pdip == pdip) { 2969 break; 2970 } 2971 } 2972 2973 /* If we can't find a cache for this pdip, create one */ 2974 if (cp == NULL) { 2975 int i; 2976 2977 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2978 KM_SLEEP); 2979 cp->pdip = pdip; 2980 cp->next = sd_scsi_probe_cache_head; 2981 sd_scsi_probe_cache_head = cp; 2982 for (i = 0; i < NTARGETS_WIDE; i++) { 2983 cp->cache[i] = SCSIPROBE_EXISTS; 2984 } 2985 } 2986 2987 mutex_exit(&sd_scsi_probe_cache_mutex); 2988 2989 /* Recompute the cache for this target if LUN zero */ 2990 if (lun == 0) { 2991 cp->cache[tgt] = SCSIPROBE_EXISTS; 2992 } 2993 2994 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2995 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2996 return (SCSIPROBE_NORESP); 2997 } 2998 2999 /* Do the actual probe; save & return the result */ 3000 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3001 } 3002 3003 3004 /* 3005 * Function: sd_scsi_target_lun_init 3006 * 3007 * Description: Initializes the attached lun chain mutex and head pointer. 3008 * 3009 * Context: Kernel thread context 3010 */ 3011 3012 static void 3013 sd_scsi_target_lun_init(void) 3014 { 3015 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3016 sd_scsi_target_lun_head = NULL; 3017 } 3018 3019 3020 /* 3021 * Function: sd_scsi_target_lun_fini 3022 * 3023 * Description: Frees all resources associated with the attached lun 3024 * chain 3025 * 3026 * Context: Kernel thread context 3027 */ 3028 3029 static void 3030 sd_scsi_target_lun_fini(void) 3031 { 3032 struct sd_scsi_hba_tgt_lun *cp; 3033 struct sd_scsi_hba_tgt_lun *ncp; 3034 3035 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3036 ncp = cp->next; 3037 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3038 } 3039 sd_scsi_target_lun_head = NULL; 3040 mutex_destroy(&sd_scsi_target_lun_mutex); 3041 } 3042 3043 3044 /* 3045 * Function: sd_scsi_get_target_lun_count 3046 * 3047 * Description: This routine will check in the attached lun chain to see 3048 * how many luns are attached on the required SCSI controller 3049 * and target. Currently, some capabilities like tagged queue 3050 * are supported per target based by HBA. So all luns in a 3051 * target have the same capabilities. Based on this assumption, 3052 * sd should only set these capabilities once per target. This 3053 * function is called when sd needs to decide how many luns 3054 * already attached on a target. 3055 * 3056 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3057 * controller device. 3058 * target - The target ID on the controller's SCSI bus. 3059 * 3060 * Return Code: The number of luns attached on the required target and 3061 * controller. 3062 * -1 if target ID is not in parallel SCSI scope or the given 3063 * dip is not in the chain. 3064 * 3065 * Context: Kernel thread context 3066 */ 3067 3068 static int 3069 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3070 { 3071 struct sd_scsi_hba_tgt_lun *cp; 3072 3073 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3074 return (-1); 3075 } 3076 3077 mutex_enter(&sd_scsi_target_lun_mutex); 3078 3079 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3080 if (cp->pdip == dip) { 3081 break; 3082 } 3083 } 3084 3085 mutex_exit(&sd_scsi_target_lun_mutex); 3086 3087 if (cp == NULL) { 3088 return (-1); 3089 } 3090 3091 return (cp->nlun[target]); 3092 } 3093 3094 3095 /* 3096 * Function: sd_scsi_update_lun_on_target 3097 * 3098 * Description: This routine is used to update the attached lun chain when a 3099 * lun is attached or detached on a target. 3100 * 3101 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3102 * controller device. 3103 * target - The target ID on the controller's SCSI bus. 3104 * flag - Indicate the lun is attached or detached. 3105 * 3106 * Context: Kernel thread context 3107 */ 3108 3109 static void 3110 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3111 { 3112 struct sd_scsi_hba_tgt_lun *cp; 3113 3114 mutex_enter(&sd_scsi_target_lun_mutex); 3115 3116 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3117 if (cp->pdip == dip) { 3118 break; 3119 } 3120 } 3121 3122 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3123 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3124 KM_SLEEP); 3125 cp->pdip = dip; 3126 cp->next = sd_scsi_target_lun_head; 3127 sd_scsi_target_lun_head = cp; 3128 } 3129 3130 mutex_exit(&sd_scsi_target_lun_mutex); 3131 3132 if (cp != NULL) { 3133 if (flag == SD_SCSI_LUN_ATTACH) { 3134 cp->nlun[target] ++; 3135 } else { 3136 cp->nlun[target] --; 3137 } 3138 } 3139 } 3140 3141 3142 /* 3143 * Function: sd_spin_up_unit 3144 * 3145 * Description: Issues the following commands to spin-up the device: 3146 * START STOP UNIT, and INQUIRY. 3147 * 3148 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3149 * structure for this target. 3150 * 3151 * Return Code: 0 - success 3152 * EIO - failure 3153 * EACCES - reservation conflict 3154 * 3155 * Context: Kernel thread context 3156 */ 3157 3158 static int 3159 sd_spin_up_unit(sd_ssc_t *ssc) 3160 { 3161 size_t resid = 0; 3162 int has_conflict = FALSE; 3163 uchar_t *bufaddr; 3164 int status; 3165 struct sd_lun *un; 3166 3167 ASSERT(ssc != NULL); 3168 un = ssc->ssc_un; 3169 ASSERT(un != NULL); 3170 3171 /* 3172 * Send a throwaway START UNIT command. 3173 * 3174 * If we fail on this, we don't care presently what precisely 3175 * is wrong. EMC's arrays will also fail this with a check 3176 * condition (0x2/0x4/0x3) if the device is "inactive," but 3177 * we don't want to fail the attach because it may become 3178 * "active" later. 3179 */ 3180 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3181 SD_PATH_DIRECT); 3182 3183 if (status != 0) { 3184 if (status == EACCES) 3185 has_conflict = TRUE; 3186 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3187 } 3188 3189 /* 3190 * Send another INQUIRY command to the target. This is necessary for 3191 * non-removable media direct access devices because their INQUIRY data 3192 * may not be fully qualified until they are spun up (perhaps via the 3193 * START command above). Note: This seems to be needed for some 3194 * legacy devices only.) The INQUIRY command should succeed even if a 3195 * Reservation Conflict is present. 3196 */ 3197 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3198 3199 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3200 != 0) { 3201 kmem_free(bufaddr, SUN_INQSIZE); 3202 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3203 return (EIO); 3204 } 3205 3206 /* 3207 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3208 * Note that this routine does not return a failure here even if the 3209 * INQUIRY command did not return any data. This is a legacy behavior. 3210 */ 3211 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3212 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3213 } 3214 3215 kmem_free(bufaddr, SUN_INQSIZE); 3216 3217 /* If we hit a reservation conflict above, tell the caller. */ 3218 if (has_conflict == TRUE) { 3219 return (EACCES); 3220 } 3221 3222 return (0); 3223 } 3224 3225 #ifdef _LP64 3226 /* 3227 * Function: sd_enable_descr_sense 3228 * 3229 * Description: This routine attempts to select descriptor sense format 3230 * using the Control mode page. Devices that support 64 bit 3231 * LBAs (for >2TB luns) should also implement descriptor 3232 * sense data so we will call this function whenever we see 3233 * a lun larger than 2TB. If for some reason the device 3234 * supports 64 bit LBAs but doesn't support descriptor sense 3235 * presumably the mode select will fail. Everything will 3236 * continue to work normally except that we will not get 3237 * complete sense data for commands that fail with an LBA 3238 * larger than 32 bits. 3239 * 3240 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3241 * structure for this target. 3242 * 3243 * Context: Kernel thread context only 3244 */ 3245 3246 static void 3247 sd_enable_descr_sense(sd_ssc_t *ssc) 3248 { 3249 uchar_t *header; 3250 struct mode_control_scsi3 *ctrl_bufp; 3251 size_t buflen; 3252 size_t bd_len; 3253 int status; 3254 struct sd_lun *un; 3255 3256 ASSERT(ssc != NULL); 3257 un = ssc->ssc_un; 3258 ASSERT(un != NULL); 3259 3260 /* 3261 * Read MODE SENSE page 0xA, Control Mode Page 3262 */ 3263 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3264 sizeof (struct mode_control_scsi3); 3265 header = kmem_zalloc(buflen, KM_SLEEP); 3266 3267 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3268 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3269 3270 if (status != 0) { 3271 SD_ERROR(SD_LOG_COMMON, un, 3272 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3273 goto eds_exit; 3274 } 3275 3276 /* 3277 * Determine size of Block Descriptors in order to locate 3278 * the mode page data. ATAPI devices return 0, SCSI devices 3279 * should return MODE_BLK_DESC_LENGTH. 3280 */ 3281 bd_len = ((struct mode_header *)header)->bdesc_length; 3282 3283 /* Clear the mode data length field for MODE SELECT */ 3284 ((struct mode_header *)header)->length = 0; 3285 3286 ctrl_bufp = (struct mode_control_scsi3 *) 3287 (header + MODE_HEADER_LENGTH + bd_len); 3288 3289 /* 3290 * If the page length is smaller than the expected value, 3291 * the target device doesn't support D_SENSE. Bail out here. 3292 */ 3293 if (ctrl_bufp->mode_page.length < 3294 sizeof (struct mode_control_scsi3) - 2) { 3295 SD_ERROR(SD_LOG_COMMON, un, 3296 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3297 goto eds_exit; 3298 } 3299 3300 /* 3301 * Clear PS bit for MODE SELECT 3302 */ 3303 ctrl_bufp->mode_page.ps = 0; 3304 3305 /* 3306 * Set D_SENSE to enable descriptor sense format. 3307 */ 3308 ctrl_bufp->d_sense = 1; 3309 3310 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3311 3312 /* 3313 * Use MODE SELECT to commit the change to the D_SENSE bit 3314 */ 3315 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3316 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3317 3318 if (status != 0) { 3319 SD_INFO(SD_LOG_COMMON, un, 3320 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3321 } else { 3322 kmem_free(header, buflen); 3323 return; 3324 } 3325 3326 eds_exit: 3327 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3328 kmem_free(header, buflen); 3329 } 3330 3331 /* 3332 * Function: sd_reenable_dsense_task 3333 * 3334 * Description: Re-enable descriptor sense after device or bus reset 3335 * 3336 * Context: Executes in a taskq() thread context 3337 */ 3338 static void 3339 sd_reenable_dsense_task(void *arg) 3340 { 3341 struct sd_lun *un = arg; 3342 sd_ssc_t *ssc; 3343 3344 ASSERT(un != NULL); 3345 3346 ssc = sd_ssc_init(un); 3347 sd_enable_descr_sense(ssc); 3348 sd_ssc_fini(ssc); 3349 } 3350 #endif /* _LP64 */ 3351 3352 /* 3353 * Function: sd_set_mmc_caps 3354 * 3355 * Description: This routine determines if the device is MMC compliant and if 3356 * the device supports CDDA via a mode sense of the CDVD 3357 * capabilities mode page. Also checks if the device is a 3358 * dvdram writable device. 3359 * 3360 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3361 * structure for this target. 3362 * 3363 * Context: Kernel thread context only 3364 */ 3365 3366 static void 3367 sd_set_mmc_caps(sd_ssc_t *ssc) 3368 { 3369 struct mode_header_grp2 *sense_mhp; 3370 uchar_t *sense_page; 3371 caddr_t buf; 3372 int bd_len; 3373 int status; 3374 struct uscsi_cmd com; 3375 int rtn; 3376 uchar_t *out_data_rw, *out_data_hd; 3377 uchar_t *rqbuf_rw, *rqbuf_hd; 3378 struct sd_lun *un; 3379 3380 ASSERT(ssc != NULL); 3381 un = ssc->ssc_un; 3382 ASSERT(un != NULL); 3383 3384 /* 3385 * The flags which will be set in this function are - mmc compliant, 3386 * dvdram writable device, cdda support. Initialize them to FALSE 3387 * and if a capability is detected - it will be set to TRUE. 3388 */ 3389 un->un_f_mmc_cap = FALSE; 3390 un->un_f_dvdram_writable_device = FALSE; 3391 un->un_f_cfg_cdda = FALSE; 3392 3393 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3394 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3395 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3396 3397 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3398 3399 if (status != 0) { 3400 /* command failed; just return */ 3401 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3402 return; 3403 } 3404 /* 3405 * If the mode sense request for the CDROM CAPABILITIES 3406 * page (0x2A) succeeds the device is assumed to be MMC. 3407 */ 3408 un->un_f_mmc_cap = TRUE; 3409 3410 /* Get to the page data */ 3411 sense_mhp = (struct mode_header_grp2 *)buf; 3412 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3413 sense_mhp->bdesc_length_lo; 3414 if (bd_len > MODE_BLK_DESC_LENGTH) { 3415 /* 3416 * We did not get back the expected block descriptor 3417 * length so we cannot determine if the device supports 3418 * CDDA. However, we still indicate the device is MMC 3419 * according to the successful response to the page 3420 * 0x2A mode sense request. 3421 */ 3422 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3423 "sd_set_mmc_caps: Mode Sense returned " 3424 "invalid block descriptor length\n"); 3425 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3426 return; 3427 } 3428 3429 /* See if read CDDA is supported */ 3430 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3431 bd_len); 3432 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3433 3434 /* See if writing DVD RAM is supported. */ 3435 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3436 if (un->un_f_dvdram_writable_device == TRUE) { 3437 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3438 return; 3439 } 3440 3441 /* 3442 * If the device presents DVD or CD capabilities in the mode 3443 * page, we can return here since a RRD will not have 3444 * these capabilities. 3445 */ 3446 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3447 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3448 return; 3449 } 3450 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3451 3452 /* 3453 * If un->un_f_dvdram_writable_device is still FALSE, 3454 * check for a Removable Rigid Disk (RRD). A RRD 3455 * device is identified by the features RANDOM_WRITABLE and 3456 * HARDWARE_DEFECT_MANAGEMENT. 3457 */ 3458 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3459 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3460 3461 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3462 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3463 RANDOM_WRITABLE, SD_PATH_STANDARD); 3464 3465 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3466 3467 if (rtn != 0) { 3468 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3469 kmem_free(rqbuf_rw, SENSE_LENGTH); 3470 return; 3471 } 3472 3473 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3474 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3475 3476 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3477 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3478 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3479 3480 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3481 3482 if (rtn == 0) { 3483 /* 3484 * We have good information, check for random writable 3485 * and hardware defect features. 3486 */ 3487 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3488 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3489 un->un_f_dvdram_writable_device = TRUE; 3490 } 3491 } 3492 3493 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3494 kmem_free(rqbuf_rw, SENSE_LENGTH); 3495 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3496 kmem_free(rqbuf_hd, SENSE_LENGTH); 3497 } 3498 3499 /* 3500 * Function: sd_check_for_writable_cd 3501 * 3502 * Description: This routine determines if the media in the device is 3503 * writable or not. It uses the get configuration command (0x46) 3504 * to determine if the media is writable 3505 * 3506 * Arguments: un - driver soft state (unit) structure 3507 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3508 * chain and the normal command waitq, or 3509 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3510 * "direct" chain and bypass the normal command 3511 * waitq. 3512 * 3513 * Context: Never called at interrupt context. 3514 */ 3515 3516 static void 3517 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3518 { 3519 struct uscsi_cmd com; 3520 uchar_t *out_data; 3521 uchar_t *rqbuf; 3522 int rtn; 3523 uchar_t *out_data_rw, *out_data_hd; 3524 uchar_t *rqbuf_rw, *rqbuf_hd; 3525 struct mode_header_grp2 *sense_mhp; 3526 uchar_t *sense_page; 3527 caddr_t buf; 3528 int bd_len; 3529 int status; 3530 struct sd_lun *un; 3531 3532 ASSERT(ssc != NULL); 3533 un = ssc->ssc_un; 3534 ASSERT(un != NULL); 3535 ASSERT(mutex_owned(SD_MUTEX(un))); 3536 3537 /* 3538 * Initialize the writable media to false, if configuration info. 3539 * tells us otherwise then only we will set it. 3540 */ 3541 un->un_f_mmc_writable_media = FALSE; 3542 mutex_exit(SD_MUTEX(un)); 3543 3544 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3545 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3546 3547 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3548 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3549 3550 if (rtn != 0) 3551 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3552 3553 mutex_enter(SD_MUTEX(un)); 3554 if (rtn == 0) { 3555 /* 3556 * We have good information, check for writable DVD. 3557 */ 3558 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3559 un->un_f_mmc_writable_media = TRUE; 3560 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3561 kmem_free(rqbuf, SENSE_LENGTH); 3562 return; 3563 } 3564 } 3565 3566 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3567 kmem_free(rqbuf, SENSE_LENGTH); 3568 3569 /* 3570 * Determine if this is a RRD type device. 3571 */ 3572 mutex_exit(SD_MUTEX(un)); 3573 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3574 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3575 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3576 3577 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3578 3579 mutex_enter(SD_MUTEX(un)); 3580 if (status != 0) { 3581 /* command failed; just return */ 3582 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3583 return; 3584 } 3585 3586 /* Get to the page data */ 3587 sense_mhp = (struct mode_header_grp2 *)buf; 3588 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3589 if (bd_len > MODE_BLK_DESC_LENGTH) { 3590 /* 3591 * We did not get back the expected block descriptor length so 3592 * we cannot check the mode page. 3593 */ 3594 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3595 "sd_check_for_writable_cd: Mode Sense returned " 3596 "invalid block descriptor length\n"); 3597 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3598 return; 3599 } 3600 3601 /* 3602 * If the device presents DVD or CD capabilities in the mode 3603 * page, we can return here since a RRD device will not have 3604 * these capabilities. 3605 */ 3606 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3607 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3608 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3609 return; 3610 } 3611 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3612 3613 /* 3614 * If un->un_f_mmc_writable_media is still FALSE, 3615 * check for RRD type media. A RRD device is identified 3616 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3617 */ 3618 mutex_exit(SD_MUTEX(un)); 3619 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3620 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3621 3622 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3623 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3624 RANDOM_WRITABLE, path_flag); 3625 3626 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3627 if (rtn != 0) { 3628 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3629 kmem_free(rqbuf_rw, SENSE_LENGTH); 3630 mutex_enter(SD_MUTEX(un)); 3631 return; 3632 } 3633 3634 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3635 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3636 3637 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3638 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3639 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3640 3641 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3642 mutex_enter(SD_MUTEX(un)); 3643 if (rtn == 0) { 3644 /* 3645 * We have good information, check for random writable 3646 * and hardware defect features as current. 3647 */ 3648 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3649 (out_data_rw[10] & 0x1) && 3650 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3651 (out_data_hd[10] & 0x1)) { 3652 un->un_f_mmc_writable_media = TRUE; 3653 } 3654 } 3655 3656 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3657 kmem_free(rqbuf_rw, SENSE_LENGTH); 3658 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3659 kmem_free(rqbuf_hd, SENSE_LENGTH); 3660 } 3661 3662 /* 3663 * Function: sd_read_unit_properties 3664 * 3665 * Description: The following implements a property lookup mechanism. 3666 * Properties for particular disks (keyed on vendor, model 3667 * and rev numbers) are sought in the sd.conf file via 3668 * sd_process_sdconf_file(), and if not found there, are 3669 * looked for in a list hardcoded in this driver via 3670 * sd_process_sdconf_table() Once located the properties 3671 * are used to update the driver unit structure. 3672 * 3673 * Arguments: un - driver soft state (unit) structure 3674 */ 3675 3676 static void 3677 sd_read_unit_properties(struct sd_lun *un) 3678 { 3679 /* 3680 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3681 * the "sd-config-list" property (from the sd.conf file) or if 3682 * there was not a match for the inquiry vid/pid. If this event 3683 * occurs the static driver configuration table is searched for 3684 * a match. 3685 */ 3686 ASSERT(un != NULL); 3687 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3688 sd_process_sdconf_table(un); 3689 } 3690 3691 /* check for LSI device */ 3692 sd_is_lsi(un); 3693 3694 3695 } 3696 3697 3698 /* 3699 * Function: sd_process_sdconf_file 3700 * 3701 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3702 * driver's config file (ie, sd.conf) and update the driver 3703 * soft state structure accordingly. 3704 * 3705 * Arguments: un - driver soft state (unit) structure 3706 * 3707 * Return Code: SD_SUCCESS - The properties were successfully set according 3708 * to the driver configuration file. 3709 * SD_FAILURE - The driver config list was not obtained or 3710 * there was no vid/pid match. This indicates that 3711 * the static config table should be used. 3712 * 3713 * The config file has a property, "sd-config-list". Currently we support 3714 * two kinds of formats. For both formats, the value of this property 3715 * is a list of duplets: 3716 * 3717 * sd-config-list= 3718 * <duplet>, 3719 * [,<duplet>]*; 3720 * 3721 * For the improved format, where 3722 * 3723 * <duplet>:= "<vid+pid>","<tunable-list>" 3724 * 3725 * and 3726 * 3727 * <tunable-list>:= <tunable> [, <tunable> ]*; 3728 * <tunable> = <name> : <value> 3729 * 3730 * The <vid+pid> is the string that is returned by the target device on a 3731 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3732 * to apply to all target devices with the specified <vid+pid>. 3733 * 3734 * Each <tunable> is a "<name> : <value>" pair. 3735 * 3736 * For the old format, the structure of each duplet is as follows: 3737 * 3738 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3739 * 3740 * The first entry of the duplet is the device ID string (the concatenated 3741 * vid & pid; not to be confused with a device_id). This is defined in 3742 * the same way as in the sd_disk_table. 3743 * 3744 * The second part of the duplet is a string that identifies a 3745 * data-property-name-list. The data-property-name-list is defined as 3746 * follows: 3747 * 3748 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3749 * 3750 * The syntax of <data-property-name> depends on the <version> field. 3751 * 3752 * If version = SD_CONF_VERSION_1 we have the following syntax: 3753 * 3754 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3755 * 3756 * where the prop0 value will be used to set prop0 if bit0 set in the 3757 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3758 * 3759 */ 3760 3761 static int 3762 sd_process_sdconf_file(struct sd_lun *un) 3763 { 3764 char **config_list = NULL; 3765 uint_t nelements; 3766 char *vidptr; 3767 int vidlen; 3768 char *dnlist_ptr; 3769 char *dataname_ptr; 3770 char *dataname_lasts; 3771 int *data_list = NULL; 3772 uint_t data_list_len; 3773 int rval = SD_FAILURE; 3774 int i; 3775 3776 ASSERT(un != NULL); 3777 3778 /* Obtain the configuration list associated with the .conf file */ 3779 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3780 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3781 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3782 return (SD_FAILURE); 3783 } 3784 3785 /* 3786 * Compare vids in each duplet to the inquiry vid - if a match is 3787 * made, get the data value and update the soft state structure 3788 * accordingly. 3789 * 3790 * Each duplet should show as a pair of strings, return SD_FAILURE 3791 * otherwise. 3792 */ 3793 if (nelements & 1) { 3794 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3795 "sd-config-list should show as pairs of strings.\n"); 3796 if (config_list) 3797 ddi_prop_free(config_list); 3798 return (SD_FAILURE); 3799 } 3800 3801 for (i = 0; i < nelements; i += 2) { 3802 /* 3803 * Note: The assumption here is that each vid entry is on 3804 * a unique line from its associated duplet. 3805 */ 3806 vidptr = config_list[i]; 3807 vidlen = (int)strlen(vidptr); 3808 if ((vidlen == 0) || 3809 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3810 continue; 3811 } 3812 3813 /* 3814 * dnlist contains 1 or more blank separated 3815 * data-property-name entries 3816 */ 3817 dnlist_ptr = config_list[i + 1]; 3818 3819 if (strchr(dnlist_ptr, ':') != NULL) { 3820 /* 3821 * Decode the improved format sd-config-list. 3822 */ 3823 sd_nvpair_str_decode(un, dnlist_ptr); 3824 } else { 3825 /* 3826 * The old format sd-config-list, loop through all 3827 * data-property-name entries in the 3828 * data-property-name-list 3829 * setting the properties for each. 3830 */ 3831 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3832 &dataname_lasts); dataname_ptr != NULL; 3833 dataname_ptr = sd_strtok_r(NULL, " \t", 3834 &dataname_lasts)) { 3835 int version; 3836 3837 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3838 "sd_process_sdconf_file: disk:%s, " 3839 "data:%s\n", vidptr, dataname_ptr); 3840 3841 /* Get the data list */ 3842 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3843 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3844 &data_list_len) != DDI_PROP_SUCCESS) { 3845 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3846 "sd_process_sdconf_file: data " 3847 "property (%s) has no value\n", 3848 dataname_ptr); 3849 continue; 3850 } 3851 3852 version = data_list[0]; 3853 3854 if (version == SD_CONF_VERSION_1) { 3855 sd_tunables values; 3856 3857 /* Set the properties */ 3858 if (sd_chk_vers1_data(un, data_list[1], 3859 &data_list[2], data_list_len, 3860 dataname_ptr) == SD_SUCCESS) { 3861 sd_get_tunables_from_conf(un, 3862 data_list[1], &data_list[2], 3863 &values); 3864 sd_set_vers1_properties(un, 3865 data_list[1], &values); 3866 rval = SD_SUCCESS; 3867 } else { 3868 rval = SD_FAILURE; 3869 } 3870 } else { 3871 scsi_log(SD_DEVINFO(un), sd_label, 3872 CE_WARN, "data property %s version " 3873 "0x%x is invalid.", 3874 dataname_ptr, version); 3875 rval = SD_FAILURE; 3876 } 3877 if (data_list) 3878 ddi_prop_free(data_list); 3879 } 3880 } 3881 } 3882 3883 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3884 if (config_list) { 3885 ddi_prop_free(config_list); 3886 } 3887 3888 return (rval); 3889 } 3890 3891 /* 3892 * Function: sd_nvpair_str_decode() 3893 * 3894 * Description: Parse the improved format sd-config-list to get 3895 * each entry of tunable, which includes a name-value pair. 3896 * Then call sd_set_properties() to set the property. 3897 * 3898 * Arguments: un - driver soft state (unit) structure 3899 * nvpair_str - the tunable list 3900 */ 3901 static void 3902 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3903 { 3904 char *nv, *name, *value, *token; 3905 char *nv_lasts, *v_lasts, *x_lasts; 3906 3907 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3908 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3909 token = sd_strtok_r(nv, ":", &v_lasts); 3910 name = sd_strtok_r(token, " \t", &x_lasts); 3911 token = sd_strtok_r(NULL, ":", &v_lasts); 3912 value = sd_strtok_r(token, " \t", &x_lasts); 3913 if (name == NULL || value == NULL) { 3914 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3915 "sd_nvpair_str_decode: " 3916 "name or value is not valid!\n"); 3917 } else { 3918 sd_set_properties(un, name, value); 3919 } 3920 } 3921 } 3922 3923 /* 3924 * Function: sd_strtok_r() 3925 * 3926 * Description: This function uses strpbrk and strspn to break 3927 * string into tokens on sequentially subsequent calls. Return 3928 * NULL when no non-separator characters remain. The first 3929 * argument is NULL for subsequent calls. 3930 */ 3931 static char * 3932 sd_strtok_r(char *string, const char *sepset, char **lasts) 3933 { 3934 char *q, *r; 3935 3936 /* First or subsequent call */ 3937 if (string == NULL) 3938 string = *lasts; 3939 3940 if (string == NULL) 3941 return (NULL); 3942 3943 /* Skip leading separators */ 3944 q = string + strspn(string, sepset); 3945 3946 if (*q == '\0') 3947 return (NULL); 3948 3949 if ((r = strpbrk(q, sepset)) == NULL) 3950 *lasts = NULL; 3951 else { 3952 *r = '\0'; 3953 *lasts = r + 1; 3954 } 3955 return (q); 3956 } 3957 3958 /* 3959 * Function: sd_set_properties() 3960 * 3961 * Description: Set device properties based on the improved 3962 * format sd-config-list. 3963 * 3964 * Arguments: un - driver soft state (unit) structure 3965 * name - supported tunable name 3966 * value - tunable value 3967 */ 3968 static void 3969 sd_set_properties(struct sd_lun *un, char *name, char *value) 3970 { 3971 char *endptr = NULL; 3972 long val = 0; 3973 3974 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3975 if (strcasecmp(value, "true") == 0) { 3976 un->un_f_suppress_cache_flush = TRUE; 3977 } else if (strcasecmp(value, "false") == 0) { 3978 un->un_f_suppress_cache_flush = FALSE; 3979 } else { 3980 goto value_invalid; 3981 } 3982 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3983 "suppress_cache_flush flag set to %d\n", 3984 un->un_f_suppress_cache_flush); 3985 return; 3986 } 3987 3988 if (strcasecmp(name, "controller-type") == 0) { 3989 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3990 un->un_ctype = val; 3991 } else { 3992 goto value_invalid; 3993 } 3994 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3995 "ctype set to %d\n", un->un_ctype); 3996 return; 3997 } 3998 3999 if (strcasecmp(name, "delay-busy") == 0) { 4000 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4001 un->un_busy_timeout = drv_usectohz(val / 1000); 4002 } else { 4003 goto value_invalid; 4004 } 4005 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4006 "busy_timeout set to %d\n", un->un_busy_timeout); 4007 return; 4008 } 4009 4010 if (strcasecmp(name, "disksort") == 0) { 4011 if (strcasecmp(value, "true") == 0) { 4012 un->un_f_disksort_disabled = FALSE; 4013 } else if (strcasecmp(value, "false") == 0) { 4014 un->un_f_disksort_disabled = TRUE; 4015 } else { 4016 goto value_invalid; 4017 } 4018 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4019 "disksort disabled flag set to %d\n", 4020 un->un_f_disksort_disabled); 4021 return; 4022 } 4023 4024 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4025 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4026 un->un_reserve_release_time = val; 4027 } else { 4028 goto value_invalid; 4029 } 4030 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4031 "reservation release timeout set to %d\n", 4032 un->un_reserve_release_time); 4033 return; 4034 } 4035 4036 if (strcasecmp(name, "reset-lun") == 0) { 4037 if (strcasecmp(value, "true") == 0) { 4038 un->un_f_lun_reset_enabled = TRUE; 4039 } else if (strcasecmp(value, "false") == 0) { 4040 un->un_f_lun_reset_enabled = FALSE; 4041 } else { 4042 goto value_invalid; 4043 } 4044 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4045 "lun reset enabled flag set to %d\n", 4046 un->un_f_lun_reset_enabled); 4047 return; 4048 } 4049 4050 if (strcasecmp(name, "retries-busy") == 0) { 4051 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4052 un->un_busy_retry_count = val; 4053 } else { 4054 goto value_invalid; 4055 } 4056 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4057 "busy retry count set to %d\n", un->un_busy_retry_count); 4058 return; 4059 } 4060 4061 if (strcasecmp(name, "retries-timeout") == 0) { 4062 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4063 un->un_retry_count = val; 4064 } else { 4065 goto value_invalid; 4066 } 4067 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4068 "timeout retry count set to %d\n", un->un_retry_count); 4069 return; 4070 } 4071 4072 if (strcasecmp(name, "retries-notready") == 0) { 4073 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4074 un->un_notready_retry_count = val; 4075 } else { 4076 goto value_invalid; 4077 } 4078 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4079 "notready retry count set to %d\n", 4080 un->un_notready_retry_count); 4081 return; 4082 } 4083 4084 if (strcasecmp(name, "retries-reset") == 0) { 4085 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4086 un->un_reset_retry_count = val; 4087 } else { 4088 goto value_invalid; 4089 } 4090 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4091 "reset retry count set to %d\n", 4092 un->un_reset_retry_count); 4093 return; 4094 } 4095 4096 if (strcasecmp(name, "throttle-max") == 0) { 4097 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4098 un->un_saved_throttle = un->un_throttle = val; 4099 } else { 4100 goto value_invalid; 4101 } 4102 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4103 "throttle set to %d\n", un->un_throttle); 4104 } 4105 4106 if (strcasecmp(name, "throttle-min") == 0) { 4107 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4108 un->un_min_throttle = val; 4109 } else { 4110 goto value_invalid; 4111 } 4112 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4113 "min throttle set to %d\n", un->un_min_throttle); 4114 } 4115 4116 if (strcasecmp(name, "rmw-type") == 0) { 4117 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4118 un->un_f_rmw_type = val; 4119 } else { 4120 goto value_invalid; 4121 } 4122 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4123 "RMW type set to %d\n", un->un_f_rmw_type); 4124 } 4125 4126 /* 4127 * Validate the throttle values. 4128 * If any of the numbers are invalid, set everything to defaults. 4129 */ 4130 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4131 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4132 (un->un_min_throttle > un->un_throttle)) { 4133 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4134 un->un_min_throttle = sd_min_throttle; 4135 } 4136 return; 4137 4138 value_invalid: 4139 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4140 "value of prop %s is invalid\n", name); 4141 } 4142 4143 /* 4144 * Function: sd_get_tunables_from_conf() 4145 * 4146 * 4147 * This function reads the data list from the sd.conf file and pulls 4148 * the values that can have numeric values as arguments and places 4149 * the values in the appropriate sd_tunables member. 4150 * Since the order of the data list members varies across platforms 4151 * This function reads them from the data list in a platform specific 4152 * order and places them into the correct sd_tunable member that is 4153 * consistent across all platforms. 4154 */ 4155 static void 4156 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4157 sd_tunables *values) 4158 { 4159 int i; 4160 int mask; 4161 4162 bzero(values, sizeof (sd_tunables)); 4163 4164 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4165 4166 mask = 1 << i; 4167 if (mask > flags) { 4168 break; 4169 } 4170 4171 switch (mask & flags) { 4172 case 0: /* This mask bit not set in flags */ 4173 continue; 4174 case SD_CONF_BSET_THROTTLE: 4175 values->sdt_throttle = data_list[i]; 4176 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4177 "sd_get_tunables_from_conf: throttle = %d\n", 4178 values->sdt_throttle); 4179 break; 4180 case SD_CONF_BSET_CTYPE: 4181 values->sdt_ctype = data_list[i]; 4182 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4183 "sd_get_tunables_from_conf: ctype = %d\n", 4184 values->sdt_ctype); 4185 break; 4186 case SD_CONF_BSET_NRR_COUNT: 4187 values->sdt_not_rdy_retries = data_list[i]; 4188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4189 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4190 values->sdt_not_rdy_retries); 4191 break; 4192 case SD_CONF_BSET_BSY_RETRY_COUNT: 4193 values->sdt_busy_retries = data_list[i]; 4194 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4195 "sd_get_tunables_from_conf: busy_retries = %d\n", 4196 values->sdt_busy_retries); 4197 break; 4198 case SD_CONF_BSET_RST_RETRIES: 4199 values->sdt_reset_retries = data_list[i]; 4200 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4201 "sd_get_tunables_from_conf: reset_retries = %d\n", 4202 values->sdt_reset_retries); 4203 break; 4204 case SD_CONF_BSET_RSV_REL_TIME: 4205 values->sdt_reserv_rel_time = data_list[i]; 4206 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4207 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4208 values->sdt_reserv_rel_time); 4209 break; 4210 case SD_CONF_BSET_MIN_THROTTLE: 4211 values->sdt_min_throttle = data_list[i]; 4212 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4213 "sd_get_tunables_from_conf: min_throttle = %d\n", 4214 values->sdt_min_throttle); 4215 break; 4216 case SD_CONF_BSET_DISKSORT_DISABLED: 4217 values->sdt_disk_sort_dis = data_list[i]; 4218 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4219 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4220 values->sdt_disk_sort_dis); 4221 break; 4222 case SD_CONF_BSET_LUN_RESET_ENABLED: 4223 values->sdt_lun_reset_enable = data_list[i]; 4224 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4225 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4226 "\n", values->sdt_lun_reset_enable); 4227 break; 4228 case SD_CONF_BSET_CACHE_IS_NV: 4229 values->sdt_suppress_cache_flush = data_list[i]; 4230 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4231 "sd_get_tunables_from_conf: \ 4232 suppress_cache_flush = %d" 4233 "\n", values->sdt_suppress_cache_flush); 4234 break; 4235 } 4236 } 4237 } 4238 4239 /* 4240 * Function: sd_process_sdconf_table 4241 * 4242 * Description: Search the static configuration table for a match on the 4243 * inquiry vid/pid and update the driver soft state structure 4244 * according to the table property values for the device. 4245 * 4246 * The form of a configuration table entry is: 4247 * <vid+pid>,<flags>,<property-data> 4248 * "SEAGATE ST42400N",1,0x40000, 4249 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4250 * 4251 * Arguments: un - driver soft state (unit) structure 4252 */ 4253 4254 static void 4255 sd_process_sdconf_table(struct sd_lun *un) 4256 { 4257 char *id = NULL; 4258 int table_index; 4259 int idlen; 4260 4261 ASSERT(un != NULL); 4262 for (table_index = 0; table_index < sd_disk_table_size; 4263 table_index++) { 4264 id = sd_disk_table[table_index].device_id; 4265 idlen = strlen(id); 4266 if (idlen == 0) { 4267 continue; 4268 } 4269 4270 /* 4271 * The static configuration table currently does not 4272 * implement version 10 properties. Additionally, 4273 * multiple data-property-name entries are not 4274 * implemented in the static configuration table. 4275 */ 4276 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4277 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4278 "sd_process_sdconf_table: disk %s\n", id); 4279 sd_set_vers1_properties(un, 4280 sd_disk_table[table_index].flags, 4281 sd_disk_table[table_index].properties); 4282 break; 4283 } 4284 } 4285 } 4286 4287 4288 /* 4289 * Function: sd_sdconf_id_match 4290 * 4291 * Description: This local function implements a case sensitive vid/pid 4292 * comparison as well as the boundary cases of wild card and 4293 * multiple blanks. 4294 * 4295 * Note: An implicit assumption made here is that the scsi 4296 * inquiry structure will always keep the vid, pid and 4297 * revision strings in consecutive sequence, so they can be 4298 * read as a single string. If this assumption is not the 4299 * case, a separate string, to be used for the check, needs 4300 * to be built with these strings concatenated. 4301 * 4302 * Arguments: un - driver soft state (unit) structure 4303 * id - table or config file vid/pid 4304 * idlen - length of the vid/pid (bytes) 4305 * 4306 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4307 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4308 */ 4309 4310 static int 4311 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4312 { 4313 struct scsi_inquiry *sd_inq; 4314 int rval = SD_SUCCESS; 4315 4316 ASSERT(un != NULL); 4317 sd_inq = un->un_sd->sd_inq; 4318 ASSERT(id != NULL); 4319 4320 /* 4321 * We use the inq_vid as a pointer to a buffer containing the 4322 * vid and pid and use the entire vid/pid length of the table 4323 * entry for the comparison. This works because the inq_pid 4324 * data member follows inq_vid in the scsi_inquiry structure. 4325 */ 4326 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4327 /* 4328 * The user id string is compared to the inquiry vid/pid 4329 * using a case insensitive comparison and ignoring 4330 * multiple spaces. 4331 */ 4332 rval = sd_blank_cmp(un, id, idlen); 4333 if (rval != SD_SUCCESS) { 4334 /* 4335 * User id strings that start and end with a "*" 4336 * are a special case. These do not have a 4337 * specific vendor, and the product string can 4338 * appear anywhere in the 16 byte PID portion of 4339 * the inquiry data. This is a simple strstr() 4340 * type search for the user id in the inquiry data. 4341 */ 4342 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4343 char *pidptr = &id[1]; 4344 int i; 4345 int j; 4346 int pidstrlen = idlen - 2; 4347 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4348 pidstrlen; 4349 4350 if (j < 0) { 4351 return (SD_FAILURE); 4352 } 4353 for (i = 0; i < j; i++) { 4354 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4355 pidptr, pidstrlen) == 0) { 4356 rval = SD_SUCCESS; 4357 break; 4358 } 4359 } 4360 } 4361 } 4362 } 4363 return (rval); 4364 } 4365 4366 4367 /* 4368 * Function: sd_blank_cmp 4369 * 4370 * Description: If the id string starts and ends with a space, treat 4371 * multiple consecutive spaces as equivalent to a single 4372 * space. For example, this causes a sd_disk_table entry 4373 * of " NEC CDROM " to match a device's id string of 4374 * "NEC CDROM". 4375 * 4376 * Note: The success exit condition for this routine is if 4377 * the pointer to the table entry is '\0' and the cnt of 4378 * the inquiry length is zero. This will happen if the inquiry 4379 * string returned by the device is padded with spaces to be 4380 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4381 * SCSI spec states that the inquiry string is to be padded with 4382 * spaces. 4383 * 4384 * Arguments: un - driver soft state (unit) structure 4385 * id - table or config file vid/pid 4386 * idlen - length of the vid/pid (bytes) 4387 * 4388 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4389 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4390 */ 4391 4392 static int 4393 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4394 { 4395 char *p1; 4396 char *p2; 4397 int cnt; 4398 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4399 sizeof (SD_INQUIRY(un)->inq_pid); 4400 4401 ASSERT(un != NULL); 4402 p2 = un->un_sd->sd_inq->inq_vid; 4403 ASSERT(id != NULL); 4404 p1 = id; 4405 4406 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4407 /* 4408 * Note: string p1 is terminated by a NUL but string p2 4409 * isn't. The end of p2 is determined by cnt. 4410 */ 4411 for (;;) { 4412 /* skip over any extra blanks in both strings */ 4413 while ((*p1 != '\0') && (*p1 == ' ')) { 4414 p1++; 4415 } 4416 while ((cnt != 0) && (*p2 == ' ')) { 4417 p2++; 4418 cnt--; 4419 } 4420 4421 /* compare the two strings */ 4422 if ((cnt == 0) || 4423 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4424 break; 4425 } 4426 while ((cnt > 0) && 4427 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4428 p1++; 4429 p2++; 4430 cnt--; 4431 } 4432 } 4433 } 4434 4435 /* return SD_SUCCESS if both strings match */ 4436 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4437 } 4438 4439 4440 /* 4441 * Function: sd_chk_vers1_data 4442 * 4443 * Description: Verify the version 1 device properties provided by the 4444 * user via the configuration file 4445 * 4446 * Arguments: un - driver soft state (unit) structure 4447 * flags - integer mask indicating properties to be set 4448 * prop_list - integer list of property values 4449 * list_len - number of the elements 4450 * 4451 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4452 * SD_FAILURE - Indicates the user provided data is invalid 4453 */ 4454 4455 static int 4456 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4457 int list_len, char *dataname_ptr) 4458 { 4459 int i; 4460 int mask = 1; 4461 int index = 0; 4462 4463 ASSERT(un != NULL); 4464 4465 /* Check for a NULL property name and list */ 4466 if (dataname_ptr == NULL) { 4467 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4468 "sd_chk_vers1_data: NULL data property name."); 4469 return (SD_FAILURE); 4470 } 4471 if (prop_list == NULL) { 4472 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4473 "sd_chk_vers1_data: %s NULL data property list.", 4474 dataname_ptr); 4475 return (SD_FAILURE); 4476 } 4477 4478 /* Display a warning if undefined bits are set in the flags */ 4479 if (flags & ~SD_CONF_BIT_MASK) { 4480 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4481 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4482 "Properties not set.", 4483 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4484 return (SD_FAILURE); 4485 } 4486 4487 /* 4488 * Verify the length of the list by identifying the highest bit set 4489 * in the flags and validating that the property list has a length 4490 * up to the index of this bit. 4491 */ 4492 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4493 if (flags & mask) { 4494 index++; 4495 } 4496 mask = 1 << i; 4497 } 4498 if (list_len < (index + 2)) { 4499 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4500 "sd_chk_vers1_data: " 4501 "Data property list %s size is incorrect. " 4502 "Properties not set.", dataname_ptr); 4503 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4504 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4505 return (SD_FAILURE); 4506 } 4507 return (SD_SUCCESS); 4508 } 4509 4510 4511 /* 4512 * Function: sd_set_vers1_properties 4513 * 4514 * Description: Set version 1 device properties based on a property list 4515 * retrieved from the driver configuration file or static 4516 * configuration table. Version 1 properties have the format: 4517 * 4518 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4519 * 4520 * where the prop0 value will be used to set prop0 if bit0 4521 * is set in the flags 4522 * 4523 * Arguments: un - driver soft state (unit) structure 4524 * flags - integer mask indicating properties to be set 4525 * prop_list - integer list of property values 4526 */ 4527 4528 static void 4529 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4530 { 4531 ASSERT(un != NULL); 4532 4533 /* 4534 * Set the flag to indicate cache is to be disabled. An attempt 4535 * to disable the cache via sd_cache_control() will be made 4536 * later during attach once the basic initialization is complete. 4537 */ 4538 if (flags & SD_CONF_BSET_NOCACHE) { 4539 un->un_f_opt_disable_cache = TRUE; 4540 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4541 "sd_set_vers1_properties: caching disabled flag set\n"); 4542 } 4543 4544 /* CD-specific configuration parameters */ 4545 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4546 un->un_f_cfg_playmsf_bcd = TRUE; 4547 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4548 "sd_set_vers1_properties: playmsf_bcd set\n"); 4549 } 4550 if (flags & SD_CONF_BSET_READSUB_BCD) { 4551 un->un_f_cfg_readsub_bcd = TRUE; 4552 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4553 "sd_set_vers1_properties: readsub_bcd set\n"); 4554 } 4555 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4556 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4557 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4558 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4559 } 4560 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4561 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4562 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4563 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4564 } 4565 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4566 un->un_f_cfg_no_read_header = TRUE; 4567 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4568 "sd_set_vers1_properties: no_read_header set\n"); 4569 } 4570 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4571 un->un_f_cfg_read_cd_xd4 = TRUE; 4572 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4573 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4574 } 4575 4576 /* Support for devices which do not have valid/unique serial numbers */ 4577 if (flags & SD_CONF_BSET_FAB_DEVID) { 4578 un->un_f_opt_fab_devid = TRUE; 4579 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4580 "sd_set_vers1_properties: fab_devid bit set\n"); 4581 } 4582 4583 /* Support for user throttle configuration */ 4584 if (flags & SD_CONF_BSET_THROTTLE) { 4585 ASSERT(prop_list != NULL); 4586 un->un_saved_throttle = un->un_throttle = 4587 prop_list->sdt_throttle; 4588 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4589 "sd_set_vers1_properties: throttle set to %d\n", 4590 prop_list->sdt_throttle); 4591 } 4592 4593 /* Set the per disk retry count according to the conf file or table. */ 4594 if (flags & SD_CONF_BSET_NRR_COUNT) { 4595 ASSERT(prop_list != NULL); 4596 if (prop_list->sdt_not_rdy_retries) { 4597 un->un_notready_retry_count = 4598 prop_list->sdt_not_rdy_retries; 4599 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4600 "sd_set_vers1_properties: not ready retry count" 4601 " set to %d\n", un->un_notready_retry_count); 4602 } 4603 } 4604 4605 /* The controller type is reported for generic disk driver ioctls */ 4606 if (flags & SD_CONF_BSET_CTYPE) { 4607 ASSERT(prop_list != NULL); 4608 switch (prop_list->sdt_ctype) { 4609 case CTYPE_CDROM: 4610 un->un_ctype = prop_list->sdt_ctype; 4611 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4612 "sd_set_vers1_properties: ctype set to " 4613 "CTYPE_CDROM\n"); 4614 break; 4615 case CTYPE_CCS: 4616 un->un_ctype = prop_list->sdt_ctype; 4617 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4618 "sd_set_vers1_properties: ctype set to " 4619 "CTYPE_CCS\n"); 4620 break; 4621 case CTYPE_ROD: /* RW optical */ 4622 un->un_ctype = prop_list->sdt_ctype; 4623 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4624 "sd_set_vers1_properties: ctype set to " 4625 "CTYPE_ROD\n"); 4626 break; 4627 default: 4628 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4629 "sd_set_vers1_properties: Could not set " 4630 "invalid ctype value (%d)", 4631 prop_list->sdt_ctype); 4632 } 4633 } 4634 4635 /* Purple failover timeout */ 4636 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4637 ASSERT(prop_list != NULL); 4638 un->un_busy_retry_count = 4639 prop_list->sdt_busy_retries; 4640 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4641 "sd_set_vers1_properties: " 4642 "busy retry count set to %d\n", 4643 un->un_busy_retry_count); 4644 } 4645 4646 /* Purple reset retry count */ 4647 if (flags & SD_CONF_BSET_RST_RETRIES) { 4648 ASSERT(prop_list != NULL); 4649 un->un_reset_retry_count = 4650 prop_list->sdt_reset_retries; 4651 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4652 "sd_set_vers1_properties: " 4653 "reset retry count set to %d\n", 4654 un->un_reset_retry_count); 4655 } 4656 4657 /* Purple reservation release timeout */ 4658 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4659 ASSERT(prop_list != NULL); 4660 un->un_reserve_release_time = 4661 prop_list->sdt_reserv_rel_time; 4662 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4663 "sd_set_vers1_properties: " 4664 "reservation release timeout set to %d\n", 4665 un->un_reserve_release_time); 4666 } 4667 4668 /* 4669 * Driver flag telling the driver to verify that no commands are pending 4670 * for a device before issuing a Test Unit Ready. This is a workaround 4671 * for a firmware bug in some Seagate eliteI drives. 4672 */ 4673 if (flags & SD_CONF_BSET_TUR_CHECK) { 4674 un->un_f_cfg_tur_check = TRUE; 4675 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4676 "sd_set_vers1_properties: tur queue check set\n"); 4677 } 4678 4679 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4680 un->un_min_throttle = prop_list->sdt_min_throttle; 4681 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4682 "sd_set_vers1_properties: min throttle set to %d\n", 4683 un->un_min_throttle); 4684 } 4685 4686 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4687 un->un_f_disksort_disabled = 4688 (prop_list->sdt_disk_sort_dis != 0) ? 4689 TRUE : FALSE; 4690 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4691 "sd_set_vers1_properties: disksort disabled " 4692 "flag set to %d\n", 4693 prop_list->sdt_disk_sort_dis); 4694 } 4695 4696 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4697 un->un_f_lun_reset_enabled = 4698 (prop_list->sdt_lun_reset_enable != 0) ? 4699 TRUE : FALSE; 4700 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4701 "sd_set_vers1_properties: lun reset enabled " 4702 "flag set to %d\n", 4703 prop_list->sdt_lun_reset_enable); 4704 } 4705 4706 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4707 un->un_f_suppress_cache_flush = 4708 (prop_list->sdt_suppress_cache_flush != 0) ? 4709 TRUE : FALSE; 4710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4711 "sd_set_vers1_properties: suppress_cache_flush " 4712 "flag set to %d\n", 4713 prop_list->sdt_suppress_cache_flush); 4714 } 4715 4716 /* 4717 * Validate the throttle values. 4718 * If any of the numbers are invalid, set everything to defaults. 4719 */ 4720 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4721 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4722 (un->un_min_throttle > un->un_throttle)) { 4723 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4724 un->un_min_throttle = sd_min_throttle; 4725 } 4726 } 4727 4728 /* 4729 * Function: sd_is_lsi() 4730 * 4731 * Description: Check for lsi devices, step through the static device 4732 * table to match vid/pid. 4733 * 4734 * Args: un - ptr to sd_lun 4735 * 4736 * Notes: When creating new LSI property, need to add the new LSI property 4737 * to this function. 4738 */ 4739 static void 4740 sd_is_lsi(struct sd_lun *un) 4741 { 4742 char *id = NULL; 4743 int table_index; 4744 int idlen; 4745 void *prop; 4746 4747 ASSERT(un != NULL); 4748 for (table_index = 0; table_index < sd_disk_table_size; 4749 table_index++) { 4750 id = sd_disk_table[table_index].device_id; 4751 idlen = strlen(id); 4752 if (idlen == 0) { 4753 continue; 4754 } 4755 4756 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4757 prop = sd_disk_table[table_index].properties; 4758 if (prop == &lsi_properties || 4759 prop == &lsi_oem_properties || 4760 prop == &lsi_properties_scsi || 4761 prop == &symbios_properties) { 4762 un->un_f_cfg_is_lsi = TRUE; 4763 } 4764 break; 4765 } 4766 } 4767 } 4768 4769 /* 4770 * Function: sd_get_physical_geometry 4771 * 4772 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4773 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4774 * target, and use this information to initialize the physical 4775 * geometry cache specified by pgeom_p. 4776 * 4777 * MODE SENSE is an optional command, so failure in this case 4778 * does not necessarily denote an error. We want to use the 4779 * MODE SENSE commands to derive the physical geometry of the 4780 * device, but if either command fails, the logical geometry is 4781 * used as the fallback for disk label geometry in cmlb. 4782 * 4783 * This requires that un->un_blockcount and un->un_tgt_blocksize 4784 * have already been initialized for the current target and 4785 * that the current values be passed as args so that we don't 4786 * end up ever trying to use -1 as a valid value. This could 4787 * happen if either value is reset while we're not holding 4788 * the mutex. 4789 * 4790 * Arguments: un - driver soft state (unit) structure 4791 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4792 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4793 * to use the USCSI "direct" chain and bypass the normal 4794 * command waitq. 4795 * 4796 * Context: Kernel thread only (can sleep). 4797 */ 4798 4799 static int 4800 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4801 diskaddr_t capacity, int lbasize, int path_flag) 4802 { 4803 struct mode_format *page3p; 4804 struct mode_geometry *page4p; 4805 struct mode_header *headerp; 4806 int sector_size; 4807 int nsect; 4808 int nhead; 4809 int ncyl; 4810 int intrlv; 4811 int spc; 4812 diskaddr_t modesense_capacity; 4813 int rpm; 4814 int bd_len; 4815 int mode_header_length; 4816 uchar_t *p3bufp; 4817 uchar_t *p4bufp; 4818 int cdbsize; 4819 int ret = EIO; 4820 sd_ssc_t *ssc; 4821 int status; 4822 4823 ASSERT(un != NULL); 4824 4825 if (lbasize == 0) { 4826 if (ISCD(un)) { 4827 lbasize = 2048; 4828 } else { 4829 lbasize = un->un_sys_blocksize; 4830 } 4831 } 4832 pgeom_p->g_secsize = (unsigned short)lbasize; 4833 4834 /* 4835 * If the unit is a cd/dvd drive MODE SENSE page three 4836 * and MODE SENSE page four are reserved (see SBC spec 4837 * and MMC spec). To prevent soft errors just return 4838 * using the default LBA size. 4839 */ 4840 if (ISCD(un)) 4841 return (ret); 4842 4843 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4844 4845 /* 4846 * Retrieve MODE SENSE page 3 - Format Device Page 4847 */ 4848 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4849 ssc = sd_ssc_init(un); 4850 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4851 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4852 if (status != 0) { 4853 SD_ERROR(SD_LOG_COMMON, un, 4854 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4855 goto page3_exit; 4856 } 4857 4858 /* 4859 * Determine size of Block Descriptors in order to locate the mode 4860 * page data. ATAPI devices return 0, SCSI devices should return 4861 * MODE_BLK_DESC_LENGTH. 4862 */ 4863 headerp = (struct mode_header *)p3bufp; 4864 if (un->un_f_cfg_is_atapi == TRUE) { 4865 struct mode_header_grp2 *mhp = 4866 (struct mode_header_grp2 *)headerp; 4867 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4868 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4869 } else { 4870 mode_header_length = MODE_HEADER_LENGTH; 4871 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4872 } 4873 4874 if (bd_len > MODE_BLK_DESC_LENGTH) { 4875 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4876 "sd_get_physical_geometry: received unexpected bd_len " 4877 "of %d, page3\n", bd_len); 4878 status = EIO; 4879 goto page3_exit; 4880 } 4881 4882 page3p = (struct mode_format *) 4883 ((caddr_t)headerp + mode_header_length + bd_len); 4884 4885 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4886 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4887 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4888 "%d\n", page3p->mode_page.code); 4889 status = EIO; 4890 goto page3_exit; 4891 } 4892 4893 /* 4894 * Use this physical geometry data only if BOTH MODE SENSE commands 4895 * complete successfully; otherwise, revert to the logical geometry. 4896 * So, we need to save everything in temporary variables. 4897 */ 4898 sector_size = BE_16(page3p->data_bytes_sect); 4899 4900 /* 4901 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4902 */ 4903 if (sector_size == 0) { 4904 sector_size = un->un_sys_blocksize; 4905 } else { 4906 sector_size &= ~(un->un_sys_blocksize - 1); 4907 } 4908 4909 nsect = BE_16(page3p->sect_track); 4910 intrlv = BE_16(page3p->interleave); 4911 4912 SD_INFO(SD_LOG_COMMON, un, 4913 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4914 SD_INFO(SD_LOG_COMMON, un, 4915 " mode page: %d; nsect: %d; sector size: %d;\n", 4916 page3p->mode_page.code, nsect, sector_size); 4917 SD_INFO(SD_LOG_COMMON, un, 4918 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4919 BE_16(page3p->track_skew), 4920 BE_16(page3p->cylinder_skew)); 4921 4922 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4923 4924 /* 4925 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4926 */ 4927 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4928 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4929 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4930 if (status != 0) { 4931 SD_ERROR(SD_LOG_COMMON, un, 4932 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4933 goto page4_exit; 4934 } 4935 4936 /* 4937 * Determine size of Block Descriptors in order to locate the mode 4938 * page data. ATAPI devices return 0, SCSI devices should return 4939 * MODE_BLK_DESC_LENGTH. 4940 */ 4941 headerp = (struct mode_header *)p4bufp; 4942 if (un->un_f_cfg_is_atapi == TRUE) { 4943 struct mode_header_grp2 *mhp = 4944 (struct mode_header_grp2 *)headerp; 4945 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4946 } else { 4947 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4948 } 4949 4950 if (bd_len > MODE_BLK_DESC_LENGTH) { 4951 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4952 "sd_get_physical_geometry: received unexpected bd_len of " 4953 "%d, page4\n", bd_len); 4954 status = EIO; 4955 goto page4_exit; 4956 } 4957 4958 page4p = (struct mode_geometry *) 4959 ((caddr_t)headerp + mode_header_length + bd_len); 4960 4961 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4962 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4963 "sd_get_physical_geometry: mode sense pg4 code mismatch " 4964 "%d\n", page4p->mode_page.code); 4965 status = EIO; 4966 goto page4_exit; 4967 } 4968 4969 /* 4970 * Stash the data now, after we know that both commands completed. 4971 */ 4972 4973 4974 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4975 spc = nhead * nsect; 4976 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4977 rpm = BE_16(page4p->rpm); 4978 4979 modesense_capacity = spc * ncyl; 4980 4981 SD_INFO(SD_LOG_COMMON, un, 4982 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4983 SD_INFO(SD_LOG_COMMON, un, 4984 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4985 SD_INFO(SD_LOG_COMMON, un, 4986 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4987 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4988 (void *)pgeom_p, capacity); 4989 4990 /* 4991 * Compensate if the drive's geometry is not rectangular, i.e., 4992 * the product of C * H * S returned by MODE SENSE >= that returned 4993 * by read capacity. This is an idiosyncrasy of the original x86 4994 * disk subsystem. 4995 */ 4996 if (modesense_capacity >= capacity) { 4997 SD_INFO(SD_LOG_COMMON, un, 4998 "sd_get_physical_geometry: adjusting acyl; " 4999 "old: %d; new: %d\n", pgeom_p->g_acyl, 5000 (modesense_capacity - capacity + spc - 1) / spc); 5001 if (sector_size != 0) { 5002 /* 1243403: NEC D38x7 drives don't support sec size */ 5003 pgeom_p->g_secsize = (unsigned short)sector_size; 5004 } 5005 pgeom_p->g_nsect = (unsigned short)nsect; 5006 pgeom_p->g_nhead = (unsigned short)nhead; 5007 pgeom_p->g_capacity = capacity; 5008 pgeom_p->g_acyl = 5009 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5010 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5011 } 5012 5013 pgeom_p->g_rpm = (unsigned short)rpm; 5014 pgeom_p->g_intrlv = (unsigned short)intrlv; 5015 ret = 0; 5016 5017 SD_INFO(SD_LOG_COMMON, un, 5018 "sd_get_physical_geometry: mode sense geometry:\n"); 5019 SD_INFO(SD_LOG_COMMON, un, 5020 " nsect: %d; sector size: %d; interlv: %d\n", 5021 nsect, sector_size, intrlv); 5022 SD_INFO(SD_LOG_COMMON, un, 5023 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5024 nhead, ncyl, rpm, modesense_capacity); 5025 SD_INFO(SD_LOG_COMMON, un, 5026 "sd_get_physical_geometry: (cached)\n"); 5027 SD_INFO(SD_LOG_COMMON, un, 5028 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5029 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5030 pgeom_p->g_nhead, pgeom_p->g_nsect); 5031 SD_INFO(SD_LOG_COMMON, un, 5032 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5033 pgeom_p->g_secsize, pgeom_p->g_capacity, 5034 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5035 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5036 5037 page4_exit: 5038 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5039 5040 page3_exit: 5041 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5042 5043 if (status != 0) { 5044 if (status == EIO) { 5045 /* 5046 * Some disks do not support mode sense(6), we 5047 * should ignore this kind of error(sense key is 5048 * 0x5 - illegal request). 5049 */ 5050 uint8_t *sensep; 5051 int senlen; 5052 5053 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5054 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5055 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5056 5057 if (senlen > 0 && 5058 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5059 sd_ssc_assessment(ssc, 5060 SD_FMT_IGNORE_COMPROMISE); 5061 } else { 5062 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5063 } 5064 } else { 5065 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5066 } 5067 } 5068 sd_ssc_fini(ssc); 5069 return (ret); 5070 } 5071 5072 /* 5073 * Function: sd_get_virtual_geometry 5074 * 5075 * Description: Ask the controller to tell us about the target device. 5076 * 5077 * Arguments: un - pointer to softstate 5078 * capacity - disk capacity in #blocks 5079 * lbasize - disk block size in bytes 5080 * 5081 * Context: Kernel thread only 5082 */ 5083 5084 static int 5085 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5086 diskaddr_t capacity, int lbasize) 5087 { 5088 uint_t geombuf; 5089 int spc; 5090 5091 ASSERT(un != NULL); 5092 5093 /* Set sector size, and total number of sectors */ 5094 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5095 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5096 5097 /* Let the HBA tell us its geometry */ 5098 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5099 5100 /* A value of -1 indicates an undefined "geometry" property */ 5101 if (geombuf == (-1)) { 5102 return (EINVAL); 5103 } 5104 5105 /* Initialize the logical geometry cache. */ 5106 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5107 lgeom_p->g_nsect = geombuf & 0xffff; 5108 lgeom_p->g_secsize = un->un_sys_blocksize; 5109 5110 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5111 5112 /* 5113 * Note: The driver originally converted the capacity value from 5114 * target blocks to system blocks. However, the capacity value passed 5115 * to this routine is already in terms of system blocks (this scaling 5116 * is done when the READ CAPACITY command is issued and processed). 5117 * This 'error' may have gone undetected because the usage of g_ncyl 5118 * (which is based upon g_capacity) is very limited within the driver 5119 */ 5120 lgeom_p->g_capacity = capacity; 5121 5122 /* 5123 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5124 * hba may return zero values if the device has been removed. 5125 */ 5126 if (spc == 0) { 5127 lgeom_p->g_ncyl = 0; 5128 } else { 5129 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5130 } 5131 lgeom_p->g_acyl = 0; 5132 5133 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5134 return (0); 5135 5136 } 5137 /* 5138 * Function: sd_update_block_info 5139 * 5140 * Description: Calculate a byte count to sector count bitshift value 5141 * from sector size. 5142 * 5143 * Arguments: un: unit struct. 5144 * lbasize: new target sector size 5145 * capacity: new target capacity, ie. block count 5146 * 5147 * Context: Kernel thread context 5148 */ 5149 5150 static void 5151 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5152 { 5153 if (lbasize != 0) { 5154 un->un_tgt_blocksize = lbasize; 5155 un->un_f_tgt_blocksize_is_valid = TRUE; 5156 if (!un->un_f_has_removable_media) { 5157 un->un_sys_blocksize = lbasize; 5158 } 5159 } 5160 5161 if (capacity != 0) { 5162 un->un_blockcount = capacity; 5163 un->un_f_blockcount_is_valid = TRUE; 5164 } 5165 } 5166 5167 5168 /* 5169 * Function: sd_register_devid 5170 * 5171 * Description: This routine will obtain the device id information from the 5172 * target, obtain the serial number, and register the device 5173 * id with the ddi framework. 5174 * 5175 * Arguments: devi - the system's dev_info_t for the device. 5176 * un - driver soft state (unit) structure 5177 * reservation_flag - indicates if a reservation conflict 5178 * occurred during attach 5179 * 5180 * Context: Kernel Thread 5181 */ 5182 static void 5183 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5184 { 5185 int rval = 0; 5186 uchar_t *inq80 = NULL; 5187 size_t inq80_len = MAX_INQUIRY_SIZE; 5188 size_t inq80_resid = 0; 5189 uchar_t *inq83 = NULL; 5190 size_t inq83_len = MAX_INQUIRY_SIZE; 5191 size_t inq83_resid = 0; 5192 int dlen, len; 5193 char *sn; 5194 struct sd_lun *un; 5195 5196 ASSERT(ssc != NULL); 5197 un = ssc->ssc_un; 5198 ASSERT(un != NULL); 5199 ASSERT(mutex_owned(SD_MUTEX(un))); 5200 ASSERT((SD_DEVINFO(un)) == devi); 5201 5202 5203 /* 5204 * We check the availability of the World Wide Name (0x83) and Unit 5205 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5206 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5207 * 0x83 is available, that is the best choice. Our next choice is 5208 * 0x80. If neither are available, we munge the devid from the device 5209 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5210 * to fabricate a devid for non-Sun qualified disks. 5211 */ 5212 if (sd_check_vpd_page_support(ssc) == 0) { 5213 /* collect page 80 data if available */ 5214 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5215 5216 mutex_exit(SD_MUTEX(un)); 5217 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5218 5219 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5220 0x01, 0x80, &inq80_resid); 5221 5222 if (rval != 0) { 5223 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5224 kmem_free(inq80, inq80_len); 5225 inq80 = NULL; 5226 inq80_len = 0; 5227 } else if (ddi_prop_exists( 5228 DDI_DEV_T_NONE, SD_DEVINFO(un), 5229 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5230 INQUIRY_SERIAL_NO) == 0) { 5231 /* 5232 * If we don't already have a serial number 5233 * property, do quick verify of data returned 5234 * and define property. 5235 */ 5236 dlen = inq80_len - inq80_resid; 5237 len = (size_t)inq80[3]; 5238 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5239 /* 5240 * Ensure sn termination, skip leading 5241 * blanks, and create property 5242 * 'inquiry-serial-no'. 5243 */ 5244 sn = (char *)&inq80[4]; 5245 sn[len] = 0; 5246 while (*sn && (*sn == ' ')) 5247 sn++; 5248 if (*sn) { 5249 (void) ddi_prop_update_string( 5250 DDI_DEV_T_NONE, 5251 SD_DEVINFO(un), 5252 INQUIRY_SERIAL_NO, sn); 5253 } 5254 } 5255 } 5256 mutex_enter(SD_MUTEX(un)); 5257 } 5258 5259 /* collect page 83 data if available */ 5260 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5261 mutex_exit(SD_MUTEX(un)); 5262 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5263 5264 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5265 0x01, 0x83, &inq83_resid); 5266 5267 if (rval != 0) { 5268 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5269 kmem_free(inq83, inq83_len); 5270 inq83 = NULL; 5271 inq83_len = 0; 5272 } 5273 mutex_enter(SD_MUTEX(un)); 5274 } 5275 } 5276 5277 /* 5278 * If transport has already registered a devid for this target 5279 * then that takes precedence over the driver's determination 5280 * of the devid. 5281 * 5282 * NOTE: The reason this check is done here instead of at the beginning 5283 * of the function is to allow the code above to create the 5284 * 'inquiry-serial-no' property. 5285 */ 5286 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5287 ASSERT(un->un_devid); 5288 un->un_f_devid_transport_defined = TRUE; 5289 goto cleanup; /* use devid registered by the transport */ 5290 } 5291 5292 /* 5293 * This is the case of antiquated Sun disk drives that have the 5294 * FAB_DEVID property set in the disk_table. These drives 5295 * manage the devid's by storing them in last 2 available sectors 5296 * on the drive and have them fabricated by the ddi layer by calling 5297 * ddi_devid_init and passing the DEVID_FAB flag. 5298 */ 5299 if (un->un_f_opt_fab_devid == TRUE) { 5300 /* 5301 * Depending on EINVAL isn't reliable, since a reserved disk 5302 * may result in invalid geometry, so check to make sure a 5303 * reservation conflict did not occur during attach. 5304 */ 5305 if ((sd_get_devid(ssc) == EINVAL) && 5306 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5307 /* 5308 * The devid is invalid AND there is no reservation 5309 * conflict. Fabricate a new devid. 5310 */ 5311 (void) sd_create_devid(ssc); 5312 } 5313 5314 /* Register the devid if it exists */ 5315 if (un->un_devid != NULL) { 5316 (void) ddi_devid_register(SD_DEVINFO(un), 5317 un->un_devid); 5318 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5319 "sd_register_devid: Devid Fabricated\n"); 5320 } 5321 goto cleanup; 5322 } 5323 5324 /* encode best devid possible based on data available */ 5325 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5326 (char *)ddi_driver_name(SD_DEVINFO(un)), 5327 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5328 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5329 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5330 5331 /* devid successfully encoded, register devid */ 5332 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5333 5334 } else { 5335 /* 5336 * Unable to encode a devid based on data available. 5337 * This is not a Sun qualified disk. Older Sun disk 5338 * drives that have the SD_FAB_DEVID property 5339 * set in the disk_table and non Sun qualified 5340 * disks are treated in the same manner. These 5341 * drives manage the devid's by storing them in 5342 * last 2 available sectors on the drive and 5343 * have them fabricated by the ddi layer by 5344 * calling ddi_devid_init and passing the 5345 * DEVID_FAB flag. 5346 * Create a fabricate devid only if there's no 5347 * fabricate devid existed. 5348 */ 5349 if (sd_get_devid(ssc) == EINVAL) { 5350 (void) sd_create_devid(ssc); 5351 } 5352 un->un_f_opt_fab_devid = TRUE; 5353 5354 /* Register the devid if it exists */ 5355 if (un->un_devid != NULL) { 5356 (void) ddi_devid_register(SD_DEVINFO(un), 5357 un->un_devid); 5358 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5359 "sd_register_devid: devid fabricated using " 5360 "ddi framework\n"); 5361 } 5362 } 5363 5364 cleanup: 5365 /* clean up resources */ 5366 if (inq80 != NULL) { 5367 kmem_free(inq80, inq80_len); 5368 } 5369 if (inq83 != NULL) { 5370 kmem_free(inq83, inq83_len); 5371 } 5372 } 5373 5374 5375 5376 /* 5377 * Function: sd_get_devid 5378 * 5379 * Description: This routine will return 0 if a valid device id has been 5380 * obtained from the target and stored in the soft state. If a 5381 * valid device id has not been previously read and stored, a 5382 * read attempt will be made. 5383 * 5384 * Arguments: un - driver soft state (unit) structure 5385 * 5386 * Return Code: 0 if we successfully get the device id 5387 * 5388 * Context: Kernel Thread 5389 */ 5390 5391 static int 5392 sd_get_devid(sd_ssc_t *ssc) 5393 { 5394 struct dk_devid *dkdevid; 5395 ddi_devid_t tmpid; 5396 uint_t *ip; 5397 size_t sz; 5398 diskaddr_t blk; 5399 int status; 5400 int chksum; 5401 int i; 5402 size_t buffer_size; 5403 struct sd_lun *un; 5404 5405 ASSERT(ssc != NULL); 5406 un = ssc->ssc_un; 5407 ASSERT(un != NULL); 5408 ASSERT(mutex_owned(SD_MUTEX(un))); 5409 5410 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5411 un); 5412 5413 if (un->un_devid != NULL) { 5414 return (0); 5415 } 5416 5417 mutex_exit(SD_MUTEX(un)); 5418 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5419 (void *)SD_PATH_DIRECT) != 0) { 5420 mutex_enter(SD_MUTEX(un)); 5421 return (EINVAL); 5422 } 5423 5424 /* 5425 * Read and verify device id, stored in the reserved cylinders at the 5426 * end of the disk. Backup label is on the odd sectors of the last 5427 * track of the last cylinder. Device id will be on track of the next 5428 * to last cylinder. 5429 */ 5430 mutex_enter(SD_MUTEX(un)); 5431 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5432 mutex_exit(SD_MUTEX(un)); 5433 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5434 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5435 SD_PATH_DIRECT); 5436 5437 if (status != 0) { 5438 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5439 goto error; 5440 } 5441 5442 /* Validate the revision */ 5443 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5444 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5445 status = EINVAL; 5446 goto error; 5447 } 5448 5449 /* Calculate the checksum */ 5450 chksum = 0; 5451 ip = (uint_t *)dkdevid; 5452 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5453 i++) { 5454 chksum ^= ip[i]; 5455 } 5456 5457 /* Compare the checksums */ 5458 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5459 status = EINVAL; 5460 goto error; 5461 } 5462 5463 /* Validate the device id */ 5464 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5465 status = EINVAL; 5466 goto error; 5467 } 5468 5469 /* 5470 * Store the device id in the driver soft state 5471 */ 5472 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5473 tmpid = kmem_alloc(sz, KM_SLEEP); 5474 5475 mutex_enter(SD_MUTEX(un)); 5476 5477 un->un_devid = tmpid; 5478 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5479 5480 kmem_free(dkdevid, buffer_size); 5481 5482 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5483 5484 return (status); 5485 error: 5486 mutex_enter(SD_MUTEX(un)); 5487 kmem_free(dkdevid, buffer_size); 5488 return (status); 5489 } 5490 5491 5492 /* 5493 * Function: sd_create_devid 5494 * 5495 * Description: This routine will fabricate the device id and write it 5496 * to the disk. 5497 * 5498 * Arguments: un - driver soft state (unit) structure 5499 * 5500 * Return Code: value of the fabricated device id 5501 * 5502 * Context: Kernel Thread 5503 */ 5504 5505 static ddi_devid_t 5506 sd_create_devid(sd_ssc_t *ssc) 5507 { 5508 struct sd_lun *un; 5509 5510 ASSERT(ssc != NULL); 5511 un = ssc->ssc_un; 5512 ASSERT(un != NULL); 5513 5514 /* Fabricate the devid */ 5515 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5516 == DDI_FAILURE) { 5517 return (NULL); 5518 } 5519 5520 /* Write the devid to disk */ 5521 if (sd_write_deviceid(ssc) != 0) { 5522 ddi_devid_free(un->un_devid); 5523 un->un_devid = NULL; 5524 } 5525 5526 return (un->un_devid); 5527 } 5528 5529 5530 /* 5531 * Function: sd_write_deviceid 5532 * 5533 * Description: This routine will write the device id to the disk 5534 * reserved sector. 5535 * 5536 * Arguments: un - driver soft state (unit) structure 5537 * 5538 * Return Code: EINVAL 5539 * value returned by sd_send_scsi_cmd 5540 * 5541 * Context: Kernel Thread 5542 */ 5543 5544 static int 5545 sd_write_deviceid(sd_ssc_t *ssc) 5546 { 5547 struct dk_devid *dkdevid; 5548 uchar_t *buf; 5549 diskaddr_t blk; 5550 uint_t *ip, chksum; 5551 int status; 5552 int i; 5553 struct sd_lun *un; 5554 5555 ASSERT(ssc != NULL); 5556 un = ssc->ssc_un; 5557 ASSERT(un != NULL); 5558 ASSERT(mutex_owned(SD_MUTEX(un))); 5559 5560 mutex_exit(SD_MUTEX(un)); 5561 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5562 (void *)SD_PATH_DIRECT) != 0) { 5563 mutex_enter(SD_MUTEX(un)); 5564 return (-1); 5565 } 5566 5567 5568 /* Allocate the buffer */ 5569 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5570 dkdevid = (struct dk_devid *)buf; 5571 5572 /* Fill in the revision */ 5573 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5574 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5575 5576 /* Copy in the device id */ 5577 mutex_enter(SD_MUTEX(un)); 5578 bcopy(un->un_devid, &dkdevid->dkd_devid, 5579 ddi_devid_sizeof(un->un_devid)); 5580 mutex_exit(SD_MUTEX(un)); 5581 5582 /* Calculate the checksum */ 5583 chksum = 0; 5584 ip = (uint_t *)dkdevid; 5585 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5586 i++) { 5587 chksum ^= ip[i]; 5588 } 5589 5590 /* Fill-in checksum */ 5591 DKD_FORMCHKSUM(chksum, dkdevid); 5592 5593 /* Write the reserved sector */ 5594 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5595 SD_PATH_DIRECT); 5596 if (status != 0) 5597 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5598 5599 kmem_free(buf, un->un_sys_blocksize); 5600 5601 mutex_enter(SD_MUTEX(un)); 5602 return (status); 5603 } 5604 5605 5606 /* 5607 * Function: sd_check_vpd_page_support 5608 * 5609 * Description: This routine sends an inquiry command with the EVPD bit set and 5610 * a page code of 0x00 to the device. It is used to determine which 5611 * vital product pages are available to find the devid. We are 5612 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5613 * device does not support that command. 5614 * 5615 * Arguments: un - driver soft state (unit) structure 5616 * 5617 * Return Code: 0 - success 5618 * 1 - check condition 5619 * 5620 * Context: This routine can sleep. 5621 */ 5622 5623 static int 5624 sd_check_vpd_page_support(sd_ssc_t *ssc) 5625 { 5626 uchar_t *page_list = NULL; 5627 uchar_t page_length = 0xff; /* Use max possible length */ 5628 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5629 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5630 int rval = 0; 5631 int counter; 5632 struct sd_lun *un; 5633 5634 ASSERT(ssc != NULL); 5635 un = ssc->ssc_un; 5636 ASSERT(un != NULL); 5637 ASSERT(mutex_owned(SD_MUTEX(un))); 5638 5639 mutex_exit(SD_MUTEX(un)); 5640 5641 /* 5642 * We'll set the page length to the maximum to save figuring it out 5643 * with an additional call. 5644 */ 5645 page_list = kmem_zalloc(page_length, KM_SLEEP); 5646 5647 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5648 page_code, NULL); 5649 5650 if (rval != 0) 5651 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5652 5653 mutex_enter(SD_MUTEX(un)); 5654 5655 /* 5656 * Now we must validate that the device accepted the command, as some 5657 * drives do not support it. If the drive does support it, we will 5658 * return 0, and the supported pages will be in un_vpd_page_mask. If 5659 * not, we return -1. 5660 */ 5661 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5662 /* Loop to find one of the 2 pages we need */ 5663 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5664 5665 /* 5666 * Pages are returned in ascending order, and 0x83 is what we 5667 * are hoping for. 5668 */ 5669 while ((page_list[counter] <= 0x86) && 5670 (counter <= (page_list[VPD_PAGE_LENGTH] + 5671 VPD_HEAD_OFFSET))) { 5672 /* 5673 * Add 3 because page_list[3] is the number of 5674 * pages minus 3 5675 */ 5676 5677 switch (page_list[counter]) { 5678 case 0x00: 5679 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5680 break; 5681 case 0x80: 5682 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5683 break; 5684 case 0x81: 5685 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5686 break; 5687 case 0x82: 5688 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5689 break; 5690 case 0x83: 5691 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5692 break; 5693 case 0x86: 5694 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5695 break; 5696 } 5697 counter++; 5698 } 5699 5700 } else { 5701 rval = -1; 5702 5703 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5704 "sd_check_vpd_page_support: This drive does not implement " 5705 "VPD pages.\n"); 5706 } 5707 5708 kmem_free(page_list, page_length); 5709 5710 return (rval); 5711 } 5712 5713 5714 /* 5715 * Function: sd_setup_pm 5716 * 5717 * Description: Initialize Power Management on the device 5718 * 5719 * Context: Kernel Thread 5720 */ 5721 5722 static void 5723 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5724 { 5725 uint_t log_page_size; 5726 uchar_t *log_page_data; 5727 int rval = 0; 5728 struct sd_lun *un; 5729 5730 ASSERT(ssc != NULL); 5731 un = ssc->ssc_un; 5732 ASSERT(un != NULL); 5733 5734 /* 5735 * Since we are called from attach, holding a mutex for 5736 * un is unnecessary. Because some of the routines called 5737 * from here require SD_MUTEX to not be held, assert this 5738 * right up front. 5739 */ 5740 ASSERT(!mutex_owned(SD_MUTEX(un))); 5741 /* 5742 * Since the sd device does not have the 'reg' property, 5743 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5744 * The following code is to tell cpr that this device 5745 * DOES need to be suspended and resumed. 5746 */ 5747 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5748 "pm-hardware-state", "needs-suspend-resume"); 5749 5750 /* 5751 * This complies with the new power management framework 5752 * for certain desktop machines. Create the pm_components 5753 * property as a string array property. 5754 */ 5755 if (un->un_f_pm_supported) { 5756 /* 5757 * not all devices have a motor, try it first. 5758 * some devices may return ILLEGAL REQUEST, some 5759 * will hang 5760 * The following START_STOP_UNIT is used to check if target 5761 * device has a motor. 5762 */ 5763 un->un_f_start_stop_supported = TRUE; 5764 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5765 SD_PATH_DIRECT); 5766 5767 if (rval != 0) { 5768 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5769 un->un_f_start_stop_supported = FALSE; 5770 } 5771 5772 /* 5773 * create pm properties anyways otherwise the parent can't 5774 * go to sleep 5775 */ 5776 (void) sd_create_pm_components(devi, un); 5777 un->un_f_pm_is_enabled = TRUE; 5778 return; 5779 } 5780 5781 if (!un->un_f_log_sense_supported) { 5782 un->un_power_level = SD_SPINDLE_ON; 5783 un->un_f_pm_is_enabled = FALSE; 5784 return; 5785 } 5786 5787 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5788 5789 #ifdef SDDEBUG 5790 if (sd_force_pm_supported) { 5791 /* Force a successful result */ 5792 rval = 1; 5793 } 5794 #endif 5795 5796 /* 5797 * If the start-stop cycle counter log page is not supported 5798 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5799 * then we should not create the pm_components property. 5800 */ 5801 if (rval == -1) { 5802 /* 5803 * Error. 5804 * Reading log sense failed, most likely this is 5805 * an older drive that does not support log sense. 5806 * If this fails auto-pm is not supported. 5807 */ 5808 un->un_power_level = SD_SPINDLE_ON; 5809 un->un_f_pm_is_enabled = FALSE; 5810 5811 } else if (rval == 0) { 5812 /* 5813 * Page not found. 5814 * The start stop cycle counter is implemented as page 5815 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5816 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5817 */ 5818 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5819 /* 5820 * Page found, use this one. 5821 */ 5822 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5823 un->un_f_pm_is_enabled = TRUE; 5824 } else { 5825 /* 5826 * Error or page not found. 5827 * auto-pm is not supported for this device. 5828 */ 5829 un->un_power_level = SD_SPINDLE_ON; 5830 un->un_f_pm_is_enabled = FALSE; 5831 } 5832 } else { 5833 /* 5834 * Page found, use it. 5835 */ 5836 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5837 un->un_f_pm_is_enabled = TRUE; 5838 } 5839 5840 5841 if (un->un_f_pm_is_enabled == TRUE) { 5842 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5843 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5844 5845 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5846 log_page_size, un->un_start_stop_cycle_page, 5847 0x01, 0, SD_PATH_DIRECT); 5848 5849 if (rval != 0) { 5850 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5851 } 5852 5853 #ifdef SDDEBUG 5854 if (sd_force_pm_supported) { 5855 /* Force a successful result */ 5856 rval = 0; 5857 } 5858 #endif 5859 5860 /* 5861 * If the Log sense for Page( Start/stop cycle counter page) 5862 * succeeds, then power management is supported and we can 5863 * enable auto-pm. 5864 */ 5865 if (rval == 0) { 5866 (void) sd_create_pm_components(devi, un); 5867 } else { 5868 un->un_power_level = SD_SPINDLE_ON; 5869 un->un_f_pm_is_enabled = FALSE; 5870 } 5871 5872 kmem_free(log_page_data, log_page_size); 5873 } 5874 } 5875 5876 5877 /* 5878 * Function: sd_create_pm_components 5879 * 5880 * Description: Initialize PM property. 5881 * 5882 * Context: Kernel thread context 5883 */ 5884 5885 static void 5886 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5887 { 5888 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5889 5890 ASSERT(!mutex_owned(SD_MUTEX(un))); 5891 5892 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5893 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5894 /* 5895 * When components are initially created they are idle, 5896 * power up any non-removables. 5897 * Note: the return value of pm_raise_power can't be used 5898 * for determining if PM should be enabled for this device. 5899 * Even if you check the return values and remove this 5900 * property created above, the PM framework will not honor the 5901 * change after the first call to pm_raise_power. Hence, 5902 * removal of that property does not help if pm_raise_power 5903 * fails. In the case of removable media, the start/stop 5904 * will fail if the media is not present. 5905 */ 5906 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5907 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5908 mutex_enter(SD_MUTEX(un)); 5909 un->un_power_level = SD_SPINDLE_ON; 5910 mutex_enter(&un->un_pm_mutex); 5911 /* Set to on and not busy. */ 5912 un->un_pm_count = 0; 5913 } else { 5914 mutex_enter(SD_MUTEX(un)); 5915 un->un_power_level = SD_SPINDLE_OFF; 5916 mutex_enter(&un->un_pm_mutex); 5917 /* Set to off. */ 5918 un->un_pm_count = -1; 5919 } 5920 mutex_exit(&un->un_pm_mutex); 5921 mutex_exit(SD_MUTEX(un)); 5922 } else { 5923 un->un_power_level = SD_SPINDLE_ON; 5924 un->un_f_pm_is_enabled = FALSE; 5925 } 5926 } 5927 5928 5929 /* 5930 * Function: sd_ddi_suspend 5931 * 5932 * Description: Performs system power-down operations. This includes 5933 * setting the drive state to indicate its suspended so 5934 * that no new commands will be accepted. Also, wait for 5935 * all commands that are in transport or queued to a timer 5936 * for retry to complete. All timeout threads are cancelled. 5937 * 5938 * Return Code: DDI_FAILURE or DDI_SUCCESS 5939 * 5940 * Context: Kernel thread context 5941 */ 5942 5943 static int 5944 sd_ddi_suspend(dev_info_t *devi) 5945 { 5946 struct sd_lun *un; 5947 clock_t wait_cmds_complete; 5948 5949 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5950 if (un == NULL) { 5951 return (DDI_FAILURE); 5952 } 5953 5954 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5955 5956 mutex_enter(SD_MUTEX(un)); 5957 5958 /* Return success if the device is already suspended. */ 5959 if (un->un_state == SD_STATE_SUSPENDED) { 5960 mutex_exit(SD_MUTEX(un)); 5961 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5962 "device already suspended, exiting\n"); 5963 return (DDI_SUCCESS); 5964 } 5965 5966 /* Return failure if the device is being used by HA */ 5967 if (un->un_resvd_status & 5968 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5969 mutex_exit(SD_MUTEX(un)); 5970 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5971 "device in use by HA, exiting\n"); 5972 return (DDI_FAILURE); 5973 } 5974 5975 /* 5976 * Return failure if the device is in a resource wait 5977 * or power changing state. 5978 */ 5979 if ((un->un_state == SD_STATE_RWAIT) || 5980 (un->un_state == SD_STATE_PM_CHANGING)) { 5981 mutex_exit(SD_MUTEX(un)); 5982 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5983 "device in resource wait state, exiting\n"); 5984 return (DDI_FAILURE); 5985 } 5986 5987 5988 un->un_save_state = un->un_last_state; 5989 New_state(un, SD_STATE_SUSPENDED); 5990 5991 /* 5992 * Wait for all commands that are in transport or queued to a timer 5993 * for retry to complete. 5994 * 5995 * While waiting, no new commands will be accepted or sent because of 5996 * the new state we set above. 5997 * 5998 * Wait till current operation has completed. If we are in the resource 5999 * wait state (with an intr outstanding) then we need to wait till the 6000 * intr completes and starts the next cmd. We want to wait for 6001 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6002 */ 6003 wait_cmds_complete = ddi_get_lbolt() + 6004 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6005 6006 while (un->un_ncmds_in_transport != 0) { 6007 /* 6008 * Fail if commands do not finish in the specified time. 6009 */ 6010 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6011 wait_cmds_complete) == -1) { 6012 /* 6013 * Undo the state changes made above. Everything 6014 * must go back to it's original value. 6015 */ 6016 Restore_state(un); 6017 un->un_last_state = un->un_save_state; 6018 /* Wake up any threads that might be waiting. */ 6019 cv_broadcast(&un->un_suspend_cv); 6020 mutex_exit(SD_MUTEX(un)); 6021 SD_ERROR(SD_LOG_IO_PM, un, 6022 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6023 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6024 return (DDI_FAILURE); 6025 } 6026 } 6027 6028 /* 6029 * Cancel SCSI watch thread and timeouts, if any are active 6030 */ 6031 6032 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6033 opaque_t temp_token = un->un_swr_token; 6034 mutex_exit(SD_MUTEX(un)); 6035 scsi_watch_suspend(temp_token); 6036 mutex_enter(SD_MUTEX(un)); 6037 } 6038 6039 if (un->un_reset_throttle_timeid != NULL) { 6040 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6041 un->un_reset_throttle_timeid = NULL; 6042 mutex_exit(SD_MUTEX(un)); 6043 (void) untimeout(temp_id); 6044 mutex_enter(SD_MUTEX(un)); 6045 } 6046 6047 if (un->un_dcvb_timeid != NULL) { 6048 timeout_id_t temp_id = un->un_dcvb_timeid; 6049 un->un_dcvb_timeid = NULL; 6050 mutex_exit(SD_MUTEX(un)); 6051 (void) untimeout(temp_id); 6052 mutex_enter(SD_MUTEX(un)); 6053 } 6054 6055 mutex_enter(&un->un_pm_mutex); 6056 if (un->un_pm_timeid != NULL) { 6057 timeout_id_t temp_id = un->un_pm_timeid; 6058 un->un_pm_timeid = NULL; 6059 mutex_exit(&un->un_pm_mutex); 6060 mutex_exit(SD_MUTEX(un)); 6061 (void) untimeout(temp_id); 6062 mutex_enter(SD_MUTEX(un)); 6063 } else { 6064 mutex_exit(&un->un_pm_mutex); 6065 } 6066 6067 if (un->un_rmw_msg_timeid != NULL) { 6068 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6069 un->un_rmw_msg_timeid = NULL; 6070 mutex_exit(SD_MUTEX(un)); 6071 (void) untimeout(temp_id); 6072 mutex_enter(SD_MUTEX(un)); 6073 } 6074 6075 if (un->un_retry_timeid != NULL) { 6076 timeout_id_t temp_id = un->un_retry_timeid; 6077 un->un_retry_timeid = NULL; 6078 mutex_exit(SD_MUTEX(un)); 6079 (void) untimeout(temp_id); 6080 mutex_enter(SD_MUTEX(un)); 6081 6082 if (un->un_retry_bp != NULL) { 6083 un->un_retry_bp->av_forw = un->un_waitq_headp; 6084 un->un_waitq_headp = un->un_retry_bp; 6085 if (un->un_waitq_tailp == NULL) { 6086 un->un_waitq_tailp = un->un_retry_bp; 6087 } 6088 un->un_retry_bp = NULL; 6089 un->un_retry_statp = NULL; 6090 } 6091 } 6092 6093 if (un->un_direct_priority_timeid != NULL) { 6094 timeout_id_t temp_id = un->un_direct_priority_timeid; 6095 un->un_direct_priority_timeid = NULL; 6096 mutex_exit(SD_MUTEX(un)); 6097 (void) untimeout(temp_id); 6098 mutex_enter(SD_MUTEX(un)); 6099 } 6100 6101 if (un->un_f_is_fibre == TRUE) { 6102 /* 6103 * Remove callbacks for insert and remove events 6104 */ 6105 if (un->un_insert_event != NULL) { 6106 mutex_exit(SD_MUTEX(un)); 6107 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6108 mutex_enter(SD_MUTEX(un)); 6109 un->un_insert_event = NULL; 6110 } 6111 6112 if (un->un_remove_event != NULL) { 6113 mutex_exit(SD_MUTEX(un)); 6114 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6115 mutex_enter(SD_MUTEX(un)); 6116 un->un_remove_event = NULL; 6117 } 6118 } 6119 6120 mutex_exit(SD_MUTEX(un)); 6121 6122 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6123 6124 return (DDI_SUCCESS); 6125 } 6126 6127 6128 /* 6129 * Function: sd_ddi_pm_suspend 6130 * 6131 * Description: Set the drive state to low power. 6132 * Someone else is required to actually change the drive 6133 * power level. 6134 * 6135 * Arguments: un - driver soft state (unit) structure 6136 * 6137 * Return Code: DDI_FAILURE or DDI_SUCCESS 6138 * 6139 * Context: Kernel thread context 6140 */ 6141 6142 static int 6143 sd_ddi_pm_suspend(struct sd_lun *un) 6144 { 6145 ASSERT(un != NULL); 6146 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6147 6148 ASSERT(!mutex_owned(SD_MUTEX(un))); 6149 mutex_enter(SD_MUTEX(un)); 6150 6151 /* 6152 * Exit if power management is not enabled for this device, or if 6153 * the device is being used by HA. 6154 */ 6155 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6156 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6157 mutex_exit(SD_MUTEX(un)); 6158 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6159 return (DDI_SUCCESS); 6160 } 6161 6162 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6163 un->un_ncmds_in_driver); 6164 6165 /* 6166 * See if the device is not busy, ie.: 6167 * - we have no commands in the driver for this device 6168 * - not waiting for resources 6169 */ 6170 if ((un->un_ncmds_in_driver == 0) && 6171 (un->un_state != SD_STATE_RWAIT)) { 6172 /* 6173 * The device is not busy, so it is OK to go to low power state. 6174 * Indicate low power, but rely on someone else to actually 6175 * change it. 6176 */ 6177 mutex_enter(&un->un_pm_mutex); 6178 un->un_pm_count = -1; 6179 mutex_exit(&un->un_pm_mutex); 6180 un->un_power_level = SD_SPINDLE_OFF; 6181 } 6182 6183 mutex_exit(SD_MUTEX(un)); 6184 6185 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6186 6187 return (DDI_SUCCESS); 6188 } 6189 6190 6191 /* 6192 * Function: sd_ddi_resume 6193 * 6194 * Description: Performs system power-up operations.. 6195 * 6196 * Return Code: DDI_SUCCESS 6197 * DDI_FAILURE 6198 * 6199 * Context: Kernel thread context 6200 */ 6201 6202 static int 6203 sd_ddi_resume(dev_info_t *devi) 6204 { 6205 struct sd_lun *un; 6206 6207 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6208 if (un == NULL) { 6209 return (DDI_FAILURE); 6210 } 6211 6212 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6213 6214 mutex_enter(SD_MUTEX(un)); 6215 Restore_state(un); 6216 6217 /* 6218 * Restore the state which was saved to give the 6219 * the right state in un_last_state 6220 */ 6221 un->un_last_state = un->un_save_state; 6222 /* 6223 * Note: throttle comes back at full. 6224 * Also note: this MUST be done before calling pm_raise_power 6225 * otherwise the system can get hung in biowait. The scenario where 6226 * this'll happen is under cpr suspend. Writing of the system 6227 * state goes through sddump, which writes 0 to un_throttle. If 6228 * writing the system state then fails, example if the partition is 6229 * too small, then cpr attempts a resume. If throttle isn't restored 6230 * from the saved value until after calling pm_raise_power then 6231 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6232 * in biowait. 6233 */ 6234 un->un_throttle = un->un_saved_throttle; 6235 6236 /* 6237 * The chance of failure is very rare as the only command done in power 6238 * entry point is START command when you transition from 0->1 or 6239 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6240 * which suspend was done. Ignore the return value as the resume should 6241 * not be failed. In the case of removable media the media need not be 6242 * inserted and hence there is a chance that raise power will fail with 6243 * media not present. 6244 */ 6245 if (un->un_f_attach_spinup) { 6246 mutex_exit(SD_MUTEX(un)); 6247 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6248 mutex_enter(SD_MUTEX(un)); 6249 } 6250 6251 /* 6252 * Don't broadcast to the suspend cv and therefore possibly 6253 * start I/O until after power has been restored. 6254 */ 6255 cv_broadcast(&un->un_suspend_cv); 6256 cv_broadcast(&un->un_state_cv); 6257 6258 /* restart thread */ 6259 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6260 scsi_watch_resume(un->un_swr_token); 6261 } 6262 6263 #if (defined(__fibre)) 6264 if (un->un_f_is_fibre == TRUE) { 6265 /* 6266 * Add callbacks for insert and remove events 6267 */ 6268 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6269 sd_init_event_callbacks(un); 6270 } 6271 } 6272 #endif 6273 6274 /* 6275 * Transport any pending commands to the target. 6276 * 6277 * If this is a low-activity device commands in queue will have to wait 6278 * until new commands come in, which may take awhile. Also, we 6279 * specifically don't check un_ncmds_in_transport because we know that 6280 * there really are no commands in progress after the unit was 6281 * suspended and we could have reached the throttle level, been 6282 * suspended, and have no new commands coming in for awhile. Highly 6283 * unlikely, but so is the low-activity disk scenario. 6284 */ 6285 ddi_xbuf_dispatch(un->un_xbuf_attr); 6286 6287 sd_start_cmds(un, NULL); 6288 mutex_exit(SD_MUTEX(un)); 6289 6290 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6291 6292 return (DDI_SUCCESS); 6293 } 6294 6295 6296 /* 6297 * Function: sd_ddi_pm_resume 6298 * 6299 * Description: Set the drive state to powered on. 6300 * Someone else is required to actually change the drive 6301 * power level. 6302 * 6303 * Arguments: un - driver soft state (unit) structure 6304 * 6305 * Return Code: DDI_SUCCESS 6306 * 6307 * Context: Kernel thread context 6308 */ 6309 6310 static int 6311 sd_ddi_pm_resume(struct sd_lun *un) 6312 { 6313 ASSERT(un != NULL); 6314 6315 ASSERT(!mutex_owned(SD_MUTEX(un))); 6316 mutex_enter(SD_MUTEX(un)); 6317 un->un_power_level = SD_SPINDLE_ON; 6318 6319 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6320 mutex_enter(&un->un_pm_mutex); 6321 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6322 un->un_pm_count++; 6323 ASSERT(un->un_pm_count == 0); 6324 /* 6325 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6326 * un_suspend_cv is for a system resume, not a power management 6327 * device resume. (4297749) 6328 * cv_broadcast(&un->un_suspend_cv); 6329 */ 6330 } 6331 mutex_exit(&un->un_pm_mutex); 6332 mutex_exit(SD_MUTEX(un)); 6333 6334 return (DDI_SUCCESS); 6335 } 6336 6337 6338 /* 6339 * Function: sd_pm_idletimeout_handler 6340 * 6341 * Description: A timer routine that's active only while a device is busy. 6342 * The purpose is to extend slightly the pm framework's busy 6343 * view of the device to prevent busy/idle thrashing for 6344 * back-to-back commands. Do this by comparing the current time 6345 * to the time at which the last command completed and when the 6346 * difference is greater than sd_pm_idletime, call 6347 * pm_idle_component. In addition to indicating idle to the pm 6348 * framework, update the chain type to again use the internal pm 6349 * layers of the driver. 6350 * 6351 * Arguments: arg - driver soft state (unit) structure 6352 * 6353 * Context: Executes in a timeout(9F) thread context 6354 */ 6355 6356 static void 6357 sd_pm_idletimeout_handler(void *arg) 6358 { 6359 struct sd_lun *un = arg; 6360 6361 time_t now; 6362 6363 mutex_enter(&sd_detach_mutex); 6364 if (un->un_detach_count != 0) { 6365 /* Abort if the instance is detaching */ 6366 mutex_exit(&sd_detach_mutex); 6367 return; 6368 } 6369 mutex_exit(&sd_detach_mutex); 6370 6371 now = ddi_get_time(); 6372 /* 6373 * Grab both mutexes, in the proper order, since we're accessing 6374 * both PM and softstate variables. 6375 */ 6376 mutex_enter(SD_MUTEX(un)); 6377 mutex_enter(&un->un_pm_mutex); 6378 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6379 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6380 /* 6381 * Update the chain types. 6382 * This takes affect on the next new command received. 6383 */ 6384 if (un->un_f_non_devbsize_supported) { 6385 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6386 } else { 6387 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6388 } 6389 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6390 6391 SD_TRACE(SD_LOG_IO_PM, un, 6392 "sd_pm_idletimeout_handler: idling device\n"); 6393 (void) pm_idle_component(SD_DEVINFO(un), 0); 6394 un->un_pm_idle_timeid = NULL; 6395 } else { 6396 un->un_pm_idle_timeid = 6397 timeout(sd_pm_idletimeout_handler, un, 6398 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6399 } 6400 mutex_exit(&un->un_pm_mutex); 6401 mutex_exit(SD_MUTEX(un)); 6402 } 6403 6404 6405 /* 6406 * Function: sd_pm_timeout_handler 6407 * 6408 * Description: Callback to tell framework we are idle. 6409 * 6410 * Context: timeout(9f) thread context. 6411 */ 6412 6413 static void 6414 sd_pm_timeout_handler(void *arg) 6415 { 6416 struct sd_lun *un = arg; 6417 6418 (void) pm_idle_component(SD_DEVINFO(un), 0); 6419 mutex_enter(&un->un_pm_mutex); 6420 un->un_pm_timeid = NULL; 6421 mutex_exit(&un->un_pm_mutex); 6422 } 6423 6424 6425 /* 6426 * Function: sdpower 6427 * 6428 * Description: PM entry point. 6429 * 6430 * Return Code: DDI_SUCCESS 6431 * DDI_FAILURE 6432 * 6433 * Context: Kernel thread context 6434 */ 6435 6436 static int 6437 sdpower(dev_info_t *devi, int component, int level) 6438 { 6439 struct sd_lun *un; 6440 int instance; 6441 int rval = DDI_SUCCESS; 6442 uint_t i, log_page_size, maxcycles, ncycles; 6443 uchar_t *log_page_data; 6444 int log_sense_page; 6445 int medium_present; 6446 time_t intvlp; 6447 dev_t dev; 6448 struct pm_trans_data sd_pm_tran_data; 6449 uchar_t save_state; 6450 int sval; 6451 uchar_t state_before_pm; 6452 int got_semaphore_here; 6453 sd_ssc_t *ssc; 6454 6455 instance = ddi_get_instance(devi); 6456 6457 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6458 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6459 component != 0) { 6460 return (DDI_FAILURE); 6461 } 6462 6463 dev = sd_make_device(SD_DEVINFO(un)); 6464 ssc = sd_ssc_init(un); 6465 6466 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6467 6468 /* 6469 * Must synchronize power down with close. 6470 * Attempt to decrement/acquire the open/close semaphore, 6471 * but do NOT wait on it. If it's not greater than zero, 6472 * ie. it can't be decremented without waiting, then 6473 * someone else, either open or close, already has it 6474 * and the try returns 0. Use that knowledge here to determine 6475 * if it's OK to change the device power level. 6476 * Also, only increment it on exit if it was decremented, ie. gotten, 6477 * here. 6478 */ 6479 got_semaphore_here = sema_tryp(&un->un_semoclose); 6480 6481 mutex_enter(SD_MUTEX(un)); 6482 6483 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6484 un->un_ncmds_in_driver); 6485 6486 /* 6487 * If un_ncmds_in_driver is non-zero it indicates commands are 6488 * already being processed in the driver, or if the semaphore was 6489 * not gotten here it indicates an open or close is being processed. 6490 * At the same time somebody is requesting to go low power which 6491 * can't happen, therefore we need to return failure. 6492 */ 6493 if ((level == SD_SPINDLE_OFF) && 6494 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6495 mutex_exit(SD_MUTEX(un)); 6496 6497 if (got_semaphore_here != 0) { 6498 sema_v(&un->un_semoclose); 6499 } 6500 SD_TRACE(SD_LOG_IO_PM, un, 6501 "sdpower: exit, device has queued cmds.\n"); 6502 6503 goto sdpower_failed; 6504 } 6505 6506 /* 6507 * if it is OFFLINE that means the disk is completely dead 6508 * in our case we have to put the disk in on or off by sending commands 6509 * Of course that will fail anyway so return back here. 6510 * 6511 * Power changes to a device that's OFFLINE or SUSPENDED 6512 * are not allowed. 6513 */ 6514 if ((un->un_state == SD_STATE_OFFLINE) || 6515 (un->un_state == SD_STATE_SUSPENDED)) { 6516 mutex_exit(SD_MUTEX(un)); 6517 6518 if (got_semaphore_here != 0) { 6519 sema_v(&un->un_semoclose); 6520 } 6521 SD_TRACE(SD_LOG_IO_PM, un, 6522 "sdpower: exit, device is off-line.\n"); 6523 6524 goto sdpower_failed; 6525 } 6526 6527 /* 6528 * Change the device's state to indicate it's power level 6529 * is being changed. Do this to prevent a power off in the 6530 * middle of commands, which is especially bad on devices 6531 * that are really powered off instead of just spun down. 6532 */ 6533 state_before_pm = un->un_state; 6534 un->un_state = SD_STATE_PM_CHANGING; 6535 6536 mutex_exit(SD_MUTEX(un)); 6537 6538 /* 6539 * If "pm-capable" property is set to TRUE by HBA drivers, 6540 * bypass the following checking, otherwise, check the log 6541 * sense information for this device 6542 */ 6543 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6544 /* 6545 * Get the log sense information to understand whether the 6546 * the powercycle counts have gone beyond the threshhold. 6547 */ 6548 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6549 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6550 6551 mutex_enter(SD_MUTEX(un)); 6552 log_sense_page = un->un_start_stop_cycle_page; 6553 mutex_exit(SD_MUTEX(un)); 6554 6555 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6556 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6557 6558 if (rval != 0) { 6559 if (rval == EIO) 6560 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6561 else 6562 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6563 } 6564 6565 #ifdef SDDEBUG 6566 if (sd_force_pm_supported) { 6567 /* Force a successful result */ 6568 rval = 0; 6569 } 6570 #endif 6571 if (rval != 0) { 6572 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6573 "Log Sense Failed\n"); 6574 6575 kmem_free(log_page_data, log_page_size); 6576 /* Cannot support power management on those drives */ 6577 6578 if (got_semaphore_here != 0) { 6579 sema_v(&un->un_semoclose); 6580 } 6581 /* 6582 * On exit put the state back to it's original value 6583 * and broadcast to anyone waiting for the power 6584 * change completion. 6585 */ 6586 mutex_enter(SD_MUTEX(un)); 6587 un->un_state = state_before_pm; 6588 cv_broadcast(&un->un_suspend_cv); 6589 mutex_exit(SD_MUTEX(un)); 6590 SD_TRACE(SD_LOG_IO_PM, un, 6591 "sdpower: exit, Log Sense Failed.\n"); 6592 6593 goto sdpower_failed; 6594 } 6595 6596 /* 6597 * From the page data - Convert the essential information to 6598 * pm_trans_data 6599 */ 6600 maxcycles = 6601 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6602 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6603 6604 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6605 6606 ncycles = 6607 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6608 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6609 6610 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6611 6612 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6613 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6614 log_page_data[8+i]; 6615 } 6616 6617 kmem_free(log_page_data, log_page_size); 6618 6619 /* 6620 * Call pm_trans_check routine to get the Ok from 6621 * the global policy 6622 */ 6623 6624 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6625 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6626 6627 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6628 #ifdef SDDEBUG 6629 if (sd_force_pm_supported) { 6630 /* Force a successful result */ 6631 rval = 1; 6632 } 6633 #endif 6634 switch (rval) { 6635 case 0: 6636 /* 6637 * Not Ok to Power cycle or error in parameters passed 6638 * Would have given the advised time to consider power 6639 * cycle. Based on the new intvlp parameter we are 6640 * supposed to pretend we are busy so that pm framework 6641 * will never call our power entry point. Because of 6642 * that install a timeout handler and wait for the 6643 * recommended time to elapse so that power management 6644 * can be effective again. 6645 * 6646 * To effect this behavior, call pm_busy_component to 6647 * indicate to the framework this device is busy. 6648 * By not adjusting un_pm_count the rest of PM in 6649 * the driver will function normally, and independent 6650 * of this but because the framework is told the device 6651 * is busy it won't attempt powering down until it gets 6652 * a matching idle. The timeout handler sends this. 6653 * Note: sd_pm_entry can't be called here to do this 6654 * because sdpower may have been called as a result 6655 * of a call to pm_raise_power from within sd_pm_entry. 6656 * 6657 * If a timeout handler is already active then 6658 * don't install another. 6659 */ 6660 mutex_enter(&un->un_pm_mutex); 6661 if (un->un_pm_timeid == NULL) { 6662 un->un_pm_timeid = 6663 timeout(sd_pm_timeout_handler, 6664 un, intvlp * drv_usectohz(1000000)); 6665 mutex_exit(&un->un_pm_mutex); 6666 (void) pm_busy_component(SD_DEVINFO(un), 0); 6667 } else { 6668 mutex_exit(&un->un_pm_mutex); 6669 } 6670 if (got_semaphore_here != 0) { 6671 sema_v(&un->un_semoclose); 6672 } 6673 /* 6674 * On exit put the state back to it's original value 6675 * and broadcast to anyone waiting for the power 6676 * change completion. 6677 */ 6678 mutex_enter(SD_MUTEX(un)); 6679 un->un_state = state_before_pm; 6680 cv_broadcast(&un->un_suspend_cv); 6681 mutex_exit(SD_MUTEX(un)); 6682 6683 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6684 "trans check Failed, not ok to power cycle.\n"); 6685 6686 goto sdpower_failed; 6687 case -1: 6688 if (got_semaphore_here != 0) { 6689 sema_v(&un->un_semoclose); 6690 } 6691 /* 6692 * On exit put the state back to it's original value 6693 * and broadcast to anyone waiting for the power 6694 * change completion. 6695 */ 6696 mutex_enter(SD_MUTEX(un)); 6697 un->un_state = state_before_pm; 6698 cv_broadcast(&un->un_suspend_cv); 6699 mutex_exit(SD_MUTEX(un)); 6700 SD_TRACE(SD_LOG_IO_PM, un, 6701 "sdpower: exit, trans check command Failed.\n"); 6702 6703 goto sdpower_failed; 6704 } 6705 } 6706 6707 if (level == SD_SPINDLE_OFF) { 6708 /* 6709 * Save the last state... if the STOP FAILS we need it 6710 * for restoring 6711 */ 6712 mutex_enter(SD_MUTEX(un)); 6713 save_state = un->un_last_state; 6714 /* 6715 * There must not be any cmds. getting processed 6716 * in the driver when we get here. Power to the 6717 * device is potentially going off. 6718 */ 6719 ASSERT(un->un_ncmds_in_driver == 0); 6720 mutex_exit(SD_MUTEX(un)); 6721 6722 /* 6723 * For now suspend the device completely before spindle is 6724 * turned off 6725 */ 6726 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6727 if (got_semaphore_here != 0) { 6728 sema_v(&un->un_semoclose); 6729 } 6730 /* 6731 * On exit put the state back to it's original value 6732 * and broadcast to anyone waiting for the power 6733 * change completion. 6734 */ 6735 mutex_enter(SD_MUTEX(un)); 6736 un->un_state = state_before_pm; 6737 cv_broadcast(&un->un_suspend_cv); 6738 mutex_exit(SD_MUTEX(un)); 6739 SD_TRACE(SD_LOG_IO_PM, un, 6740 "sdpower: exit, PM suspend Failed.\n"); 6741 6742 goto sdpower_failed; 6743 } 6744 } 6745 6746 /* 6747 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6748 * close, or strategy. Dump no long uses this routine, it uses it's 6749 * own code so it can be done in polled mode. 6750 */ 6751 6752 medium_present = TRUE; 6753 6754 /* 6755 * When powering up, issue a TUR in case the device is at unit 6756 * attention. Don't do retries. Bypass the PM layer, otherwise 6757 * a deadlock on un_pm_busy_cv will occur. 6758 */ 6759 if (level == SD_SPINDLE_ON) { 6760 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6761 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6762 if (sval != 0) 6763 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6764 } 6765 6766 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6767 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6768 6769 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6770 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6771 SD_PATH_DIRECT); 6772 if (sval != 0) { 6773 if (sval == EIO) 6774 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6775 else 6776 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6777 } 6778 6779 /* Command failed, check for media present. */ 6780 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6781 medium_present = FALSE; 6782 } 6783 6784 /* 6785 * The conditions of interest here are: 6786 * if a spindle off with media present fails, 6787 * then restore the state and return an error. 6788 * else if a spindle on fails, 6789 * then return an error (there's no state to restore). 6790 * In all other cases we setup for the new state 6791 * and return success. 6792 */ 6793 switch (level) { 6794 case SD_SPINDLE_OFF: 6795 if ((medium_present == TRUE) && (sval != 0)) { 6796 /* The stop command from above failed */ 6797 rval = DDI_FAILURE; 6798 /* 6799 * The stop command failed, and we have media 6800 * present. Put the level back by calling the 6801 * sd_pm_resume() and set the state back to 6802 * it's previous value. 6803 */ 6804 (void) sd_ddi_pm_resume(un); 6805 mutex_enter(SD_MUTEX(un)); 6806 un->un_last_state = save_state; 6807 mutex_exit(SD_MUTEX(un)); 6808 break; 6809 } 6810 /* 6811 * The stop command from above succeeded. 6812 */ 6813 if (un->un_f_monitor_media_state) { 6814 /* 6815 * Terminate watch thread in case of removable media 6816 * devices going into low power state. This is as per 6817 * the requirements of pm framework, otherwise commands 6818 * will be generated for the device (through watch 6819 * thread), even when the device is in low power state. 6820 */ 6821 mutex_enter(SD_MUTEX(un)); 6822 un->un_f_watcht_stopped = FALSE; 6823 if (un->un_swr_token != NULL) { 6824 opaque_t temp_token = un->un_swr_token; 6825 un->un_f_watcht_stopped = TRUE; 6826 un->un_swr_token = NULL; 6827 mutex_exit(SD_MUTEX(un)); 6828 (void) scsi_watch_request_terminate(temp_token, 6829 SCSI_WATCH_TERMINATE_ALL_WAIT); 6830 } else { 6831 mutex_exit(SD_MUTEX(un)); 6832 } 6833 } 6834 break; 6835 6836 default: /* The level requested is spindle on... */ 6837 /* 6838 * Legacy behavior: return success on a failed spinup 6839 * if there is no media in the drive. 6840 * Do this by looking at medium_present here. 6841 */ 6842 if ((sval != 0) && medium_present) { 6843 /* The start command from above failed */ 6844 rval = DDI_FAILURE; 6845 break; 6846 } 6847 /* 6848 * The start command from above succeeded 6849 * Resume the devices now that we have 6850 * started the disks 6851 */ 6852 (void) sd_ddi_pm_resume(un); 6853 6854 /* 6855 * Resume the watch thread since it was suspended 6856 * when the device went into low power mode. 6857 */ 6858 if (un->un_f_monitor_media_state) { 6859 mutex_enter(SD_MUTEX(un)); 6860 if (un->un_f_watcht_stopped == TRUE) { 6861 opaque_t temp_token; 6862 6863 un->un_f_watcht_stopped = FALSE; 6864 mutex_exit(SD_MUTEX(un)); 6865 temp_token = scsi_watch_request_submit( 6866 SD_SCSI_DEVP(un), 6867 sd_check_media_time, 6868 SENSE_LENGTH, sd_media_watch_cb, 6869 (caddr_t)dev); 6870 mutex_enter(SD_MUTEX(un)); 6871 un->un_swr_token = temp_token; 6872 } 6873 mutex_exit(SD_MUTEX(un)); 6874 } 6875 } 6876 if (got_semaphore_here != 0) { 6877 sema_v(&un->un_semoclose); 6878 } 6879 /* 6880 * On exit put the state back to it's original value 6881 * and broadcast to anyone waiting for the power 6882 * change completion. 6883 */ 6884 mutex_enter(SD_MUTEX(un)); 6885 un->un_state = state_before_pm; 6886 cv_broadcast(&un->un_suspend_cv); 6887 mutex_exit(SD_MUTEX(un)); 6888 6889 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6890 6891 sd_ssc_fini(ssc); 6892 return (rval); 6893 6894 sdpower_failed: 6895 6896 sd_ssc_fini(ssc); 6897 return (DDI_FAILURE); 6898 } 6899 6900 6901 6902 /* 6903 * Function: sdattach 6904 * 6905 * Description: Driver's attach(9e) entry point function. 6906 * 6907 * Arguments: devi - opaque device info handle 6908 * cmd - attach type 6909 * 6910 * Return Code: DDI_SUCCESS 6911 * DDI_FAILURE 6912 * 6913 * Context: Kernel thread context 6914 */ 6915 6916 static int 6917 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6918 { 6919 switch (cmd) { 6920 case DDI_ATTACH: 6921 return (sd_unit_attach(devi)); 6922 case DDI_RESUME: 6923 return (sd_ddi_resume(devi)); 6924 default: 6925 break; 6926 } 6927 return (DDI_FAILURE); 6928 } 6929 6930 6931 /* 6932 * Function: sddetach 6933 * 6934 * Description: Driver's detach(9E) entry point function. 6935 * 6936 * Arguments: devi - opaque device info handle 6937 * cmd - detach type 6938 * 6939 * Return Code: DDI_SUCCESS 6940 * DDI_FAILURE 6941 * 6942 * Context: Kernel thread context 6943 */ 6944 6945 static int 6946 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6947 { 6948 switch (cmd) { 6949 case DDI_DETACH: 6950 return (sd_unit_detach(devi)); 6951 case DDI_SUSPEND: 6952 return (sd_ddi_suspend(devi)); 6953 default: 6954 break; 6955 } 6956 return (DDI_FAILURE); 6957 } 6958 6959 6960 /* 6961 * Function: sd_sync_with_callback 6962 * 6963 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6964 * state while the callback routine is active. 6965 * 6966 * Arguments: un: softstate structure for the instance 6967 * 6968 * Context: Kernel thread context 6969 */ 6970 6971 static void 6972 sd_sync_with_callback(struct sd_lun *un) 6973 { 6974 ASSERT(un != NULL); 6975 6976 mutex_enter(SD_MUTEX(un)); 6977 6978 ASSERT(un->un_in_callback >= 0); 6979 6980 while (un->un_in_callback > 0) { 6981 mutex_exit(SD_MUTEX(un)); 6982 delay(2); 6983 mutex_enter(SD_MUTEX(un)); 6984 } 6985 6986 mutex_exit(SD_MUTEX(un)); 6987 } 6988 6989 /* 6990 * Function: sd_unit_attach 6991 * 6992 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6993 * the soft state structure for the device and performs 6994 * all necessary structure and device initializations. 6995 * 6996 * Arguments: devi: the system's dev_info_t for the device. 6997 * 6998 * Return Code: DDI_SUCCESS if attach is successful. 6999 * DDI_FAILURE if any part of the attach fails. 7000 * 7001 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7002 * Kernel thread context only. Can sleep. 7003 */ 7004 7005 static int 7006 sd_unit_attach(dev_info_t *devi) 7007 { 7008 struct scsi_device *devp; 7009 struct sd_lun *un; 7010 char *variantp; 7011 char name_str[48]; 7012 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7013 int instance; 7014 int rval; 7015 int wc_enabled; 7016 int tgt; 7017 uint64_t capacity; 7018 uint_t lbasize = 0; 7019 dev_info_t *pdip = ddi_get_parent(devi); 7020 int offbyone = 0; 7021 int geom_label_valid = 0; 7022 sd_ssc_t *ssc; 7023 int status; 7024 struct sd_fm_internal *sfip = NULL; 7025 int max_xfer_size; 7026 7027 /* 7028 * Retrieve the target driver's private data area. This was set 7029 * up by the HBA. 7030 */ 7031 devp = ddi_get_driver_private(devi); 7032 7033 /* 7034 * Retrieve the target ID of the device. 7035 */ 7036 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7037 SCSI_ADDR_PROP_TARGET, -1); 7038 7039 /* 7040 * Since we have no idea what state things were left in by the last 7041 * user of the device, set up some 'default' settings, ie. turn 'em 7042 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7043 * Do this before the scsi_probe, which sends an inquiry. 7044 * This is a fix for bug (4430280). 7045 * Of special importance is wide-xfer. The drive could have been left 7046 * in wide transfer mode by the last driver to communicate with it, 7047 * this includes us. If that's the case, and if the following is not 7048 * setup properly or we don't re-negotiate with the drive prior to 7049 * transferring data to/from the drive, it causes bus parity errors, 7050 * data overruns, and unexpected interrupts. This first occurred when 7051 * the fix for bug (4378686) was made. 7052 */ 7053 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7054 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7055 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7056 7057 /* 7058 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7059 * on a target. Setting it per lun instance actually sets the 7060 * capability of this target, which affects those luns already 7061 * attached on the same target. So during attach, we can only disable 7062 * this capability only when no other lun has been attached on this 7063 * target. By doing this, we assume a target has the same tagged-qing 7064 * capability for every lun. The condition can be removed when HBA 7065 * is changed to support per lun based tagged-qing capability. 7066 */ 7067 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7068 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7069 } 7070 7071 /* 7072 * Use scsi_probe() to issue an INQUIRY command to the device. 7073 * This call will allocate and fill in the scsi_inquiry structure 7074 * and point the sd_inq member of the scsi_device structure to it. 7075 * If the attach succeeds, then this memory will not be de-allocated 7076 * (via scsi_unprobe()) until the instance is detached. 7077 */ 7078 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7079 goto probe_failed; 7080 } 7081 7082 /* 7083 * Check the device type as specified in the inquiry data and 7084 * claim it if it is of a type that we support. 7085 */ 7086 switch (devp->sd_inq->inq_dtype) { 7087 case DTYPE_DIRECT: 7088 break; 7089 case DTYPE_RODIRECT: 7090 break; 7091 case DTYPE_OPTICAL: 7092 break; 7093 case DTYPE_NOTPRESENT: 7094 default: 7095 /* Unsupported device type; fail the attach. */ 7096 goto probe_failed; 7097 } 7098 7099 /* 7100 * Allocate the soft state structure for this unit. 7101 * 7102 * We rely upon this memory being set to all zeroes by 7103 * ddi_soft_state_zalloc(). We assume that any member of the 7104 * soft state structure that is not explicitly initialized by 7105 * this routine will have a value of zero. 7106 */ 7107 instance = ddi_get_instance(devp->sd_dev); 7108 #ifndef XPV_HVM_DRIVER 7109 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7110 goto probe_failed; 7111 } 7112 #endif /* !XPV_HVM_DRIVER */ 7113 7114 /* 7115 * Retrieve a pointer to the newly-allocated soft state. 7116 * 7117 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7118 * was successful, unless something has gone horribly wrong and the 7119 * ddi's soft state internals are corrupt (in which case it is 7120 * probably better to halt here than just fail the attach....) 7121 */ 7122 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7123 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7124 instance); 7125 /*NOTREACHED*/ 7126 } 7127 7128 /* 7129 * Link the back ptr of the driver soft state to the scsi_device 7130 * struct for this lun. 7131 * Save a pointer to the softstate in the driver-private area of 7132 * the scsi_device struct. 7133 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7134 * we first set un->un_sd below. 7135 */ 7136 un->un_sd = devp; 7137 devp->sd_private = (opaque_t)un; 7138 7139 /* 7140 * The following must be after devp is stored in the soft state struct. 7141 */ 7142 #ifdef SDDEBUG 7143 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7144 "%s_unit_attach: un:0x%p instance:%d\n", 7145 ddi_driver_name(devi), un, instance); 7146 #endif 7147 7148 /* 7149 * Set up the device type and node type (for the minor nodes). 7150 * By default we assume that the device can at least support the 7151 * Common Command Set. Call it a CD-ROM if it reports itself 7152 * as a RODIRECT device. 7153 */ 7154 switch (devp->sd_inq->inq_dtype) { 7155 case DTYPE_RODIRECT: 7156 un->un_node_type = DDI_NT_CD_CHAN; 7157 un->un_ctype = CTYPE_CDROM; 7158 break; 7159 case DTYPE_OPTICAL: 7160 un->un_node_type = DDI_NT_BLOCK_CHAN; 7161 un->un_ctype = CTYPE_ROD; 7162 break; 7163 default: 7164 un->un_node_type = DDI_NT_BLOCK_CHAN; 7165 un->un_ctype = CTYPE_CCS; 7166 break; 7167 } 7168 7169 /* 7170 * Try to read the interconnect type from the HBA. 7171 * 7172 * Note: This driver is currently compiled as two binaries, a parallel 7173 * scsi version (sd) and a fibre channel version (ssd). All functional 7174 * differences are determined at compile time. In the future a single 7175 * binary will be provided and the interconnect type will be used to 7176 * differentiate between fibre and parallel scsi behaviors. At that time 7177 * it will be necessary for all fibre channel HBAs to support this 7178 * property. 7179 * 7180 * set un_f_is_fiber to TRUE ( default fiber ) 7181 */ 7182 un->un_f_is_fibre = TRUE; 7183 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7184 case INTERCONNECT_SSA: 7185 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7186 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7187 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7188 break; 7189 case INTERCONNECT_PARALLEL: 7190 un->un_f_is_fibre = FALSE; 7191 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7192 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7193 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7194 break; 7195 case INTERCONNECT_SAS: 7196 un->un_f_is_fibre = FALSE; 7197 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7198 un->un_node_type = DDI_NT_BLOCK_SAS; 7199 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7200 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7201 break; 7202 case INTERCONNECT_SATA: 7203 un->un_f_is_fibre = FALSE; 7204 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7205 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7206 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7207 break; 7208 case INTERCONNECT_FIBRE: 7209 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7210 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7211 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7212 break; 7213 case INTERCONNECT_FABRIC: 7214 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7215 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7216 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7217 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7218 break; 7219 default: 7220 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7221 /* 7222 * The HBA does not support the "interconnect-type" property 7223 * (or did not provide a recognized type). 7224 * 7225 * Note: This will be obsoleted when a single fibre channel 7226 * and parallel scsi driver is delivered. In the meantime the 7227 * interconnect type will be set to the platform default.If that 7228 * type is not parallel SCSI, it means that we should be 7229 * assuming "ssd" semantics. However, here this also means that 7230 * the FC HBA is not supporting the "interconnect-type" property 7231 * like we expect it to, so log this occurrence. 7232 */ 7233 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7234 if (!SD_IS_PARALLEL_SCSI(un)) { 7235 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7236 "sd_unit_attach: un:0x%p Assuming " 7237 "INTERCONNECT_FIBRE\n", un); 7238 } else { 7239 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7240 "sd_unit_attach: un:0x%p Assuming " 7241 "INTERCONNECT_PARALLEL\n", un); 7242 un->un_f_is_fibre = FALSE; 7243 } 7244 #else 7245 /* 7246 * Note: This source will be implemented when a single fibre 7247 * channel and parallel scsi driver is delivered. The default 7248 * will be to assume that if a device does not support the 7249 * "interconnect-type" property it is a parallel SCSI HBA and 7250 * we will set the interconnect type for parallel scsi. 7251 */ 7252 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7253 un->un_f_is_fibre = FALSE; 7254 #endif 7255 break; 7256 } 7257 7258 if (un->un_f_is_fibre == TRUE) { 7259 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7260 SCSI_VERSION_3) { 7261 switch (un->un_interconnect_type) { 7262 case SD_INTERCONNECT_FIBRE: 7263 case SD_INTERCONNECT_SSA: 7264 un->un_node_type = DDI_NT_BLOCK_WWN; 7265 break; 7266 default: 7267 break; 7268 } 7269 } 7270 } 7271 7272 /* 7273 * Initialize the Request Sense command for the target 7274 */ 7275 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7276 goto alloc_rqs_failed; 7277 } 7278 7279 /* 7280 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7281 * with separate binary for sd and ssd. 7282 * 7283 * x86 has 1 binary, un_retry_count is set base on connection type. 7284 * The hardcoded values will go away when Sparc uses 1 binary 7285 * for sd and ssd. This hardcoded values need to match 7286 * SD_RETRY_COUNT in sddef.h 7287 * The value used is base on interconnect type. 7288 * fibre = 3, parallel = 5 7289 */ 7290 #if defined(__i386) || defined(__amd64) 7291 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7292 #else 7293 un->un_retry_count = SD_RETRY_COUNT; 7294 #endif 7295 7296 /* 7297 * Set the per disk retry count to the default number of retries 7298 * for disks and CDROMs. This value can be overridden by the 7299 * disk property list or an entry in sd.conf. 7300 */ 7301 un->un_notready_retry_count = 7302 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7303 : DISK_NOT_READY_RETRY_COUNT(un); 7304 7305 /* 7306 * Set the busy retry count to the default value of un_retry_count. 7307 * This can be overridden by entries in sd.conf or the device 7308 * config table. 7309 */ 7310 un->un_busy_retry_count = un->un_retry_count; 7311 7312 /* 7313 * Init the reset threshold for retries. This number determines 7314 * how many retries must be performed before a reset can be issued 7315 * (for certain error conditions). This can be overridden by entries 7316 * in sd.conf or the device config table. 7317 */ 7318 un->un_reset_retry_count = (un->un_retry_count / 2); 7319 7320 /* 7321 * Set the victim_retry_count to the default un_retry_count 7322 */ 7323 un->un_victim_retry_count = (2 * un->un_retry_count); 7324 7325 /* 7326 * Set the reservation release timeout to the default value of 7327 * 5 seconds. This can be overridden by entries in ssd.conf or the 7328 * device config table. 7329 */ 7330 un->un_reserve_release_time = 5; 7331 7332 /* 7333 * Set up the default maximum transfer size. Note that this may 7334 * get updated later in the attach, when setting up default wide 7335 * operations for disks. 7336 */ 7337 #if defined(__i386) || defined(__amd64) 7338 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7339 un->un_partial_dma_supported = 1; 7340 #else 7341 un->un_max_xfer_size = (uint_t)maxphys; 7342 #endif 7343 7344 /* 7345 * Get "allow bus device reset" property (defaults to "enabled" if 7346 * the property was not defined). This is to disable bus resets for 7347 * certain kinds of error recovery. Note: In the future when a run-time 7348 * fibre check is available the soft state flag should default to 7349 * enabled. 7350 */ 7351 if (un->un_f_is_fibre == TRUE) { 7352 un->un_f_allow_bus_device_reset = TRUE; 7353 } else { 7354 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7355 "allow-bus-device-reset", 1) != 0) { 7356 un->un_f_allow_bus_device_reset = TRUE; 7357 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7358 "sd_unit_attach: un:0x%p Bus device reset " 7359 "enabled\n", un); 7360 } else { 7361 un->un_f_allow_bus_device_reset = FALSE; 7362 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7363 "sd_unit_attach: un:0x%p Bus device reset " 7364 "disabled\n", un); 7365 } 7366 } 7367 7368 /* 7369 * Check if this is an ATAPI device. ATAPI devices use Group 1 7370 * Read/Write commands and Group 2 Mode Sense/Select commands. 7371 * 7372 * Note: The "obsolete" way of doing this is to check for the "atapi" 7373 * property. The new "variant" property with a value of "atapi" has been 7374 * introduced so that future 'variants' of standard SCSI behavior (like 7375 * atapi) could be specified by the underlying HBA drivers by supplying 7376 * a new value for the "variant" property, instead of having to define a 7377 * new property. 7378 */ 7379 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7380 un->un_f_cfg_is_atapi = TRUE; 7381 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7382 "sd_unit_attach: un:0x%p Atapi device\n", un); 7383 } 7384 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7385 &variantp) == DDI_PROP_SUCCESS) { 7386 if (strcmp(variantp, "atapi") == 0) { 7387 un->un_f_cfg_is_atapi = TRUE; 7388 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7389 "sd_unit_attach: un:0x%p Atapi device\n", un); 7390 } 7391 ddi_prop_free(variantp); 7392 } 7393 7394 un->un_cmd_timeout = SD_IO_TIME; 7395 7396 un->un_busy_timeout = SD_BSY_TIMEOUT; 7397 7398 /* Info on current states, statuses, etc. (Updated frequently) */ 7399 un->un_state = SD_STATE_NORMAL; 7400 un->un_last_state = SD_STATE_NORMAL; 7401 7402 /* Control & status info for command throttling */ 7403 un->un_throttle = sd_max_throttle; 7404 un->un_saved_throttle = sd_max_throttle; 7405 un->un_min_throttle = sd_min_throttle; 7406 7407 if (un->un_f_is_fibre == TRUE) { 7408 un->un_f_use_adaptive_throttle = TRUE; 7409 } else { 7410 un->un_f_use_adaptive_throttle = FALSE; 7411 } 7412 7413 /* Removable media support. */ 7414 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7415 un->un_mediastate = DKIO_NONE; 7416 un->un_specified_mediastate = DKIO_NONE; 7417 7418 /* CVs for suspend/resume (PM or DR) */ 7419 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7420 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7421 7422 /* Power management support. */ 7423 un->un_power_level = SD_SPINDLE_UNINIT; 7424 7425 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7426 un->un_f_wcc_inprog = 0; 7427 7428 /* 7429 * The open/close semaphore is used to serialize threads executing 7430 * in the driver's open & close entry point routines for a given 7431 * instance. 7432 */ 7433 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7434 7435 /* 7436 * The conf file entry and softstate variable is a forceful override, 7437 * meaning a non-zero value must be entered to change the default. 7438 */ 7439 un->un_f_disksort_disabled = FALSE; 7440 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7441 7442 /* 7443 * Retrieve the properties from the static driver table or the driver 7444 * configuration file (.conf) for this unit and update the soft state 7445 * for the device as needed for the indicated properties. 7446 * Note: the property configuration needs to occur here as some of the 7447 * following routines may have dependencies on soft state flags set 7448 * as part of the driver property configuration. 7449 */ 7450 sd_read_unit_properties(un); 7451 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7452 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7453 7454 /* 7455 * Only if a device has "hotpluggable" property, it is 7456 * treated as hotpluggable device. Otherwise, it is 7457 * regarded as non-hotpluggable one. 7458 */ 7459 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7460 -1) != -1) { 7461 un->un_f_is_hotpluggable = TRUE; 7462 } 7463 7464 /* 7465 * set unit's attributes(flags) according to "hotpluggable" and 7466 * RMB bit in INQUIRY data. 7467 */ 7468 sd_set_unit_attributes(un, devi); 7469 7470 /* 7471 * By default, we mark the capacity, lbasize, and geometry 7472 * as invalid. Only if we successfully read a valid capacity 7473 * will we update the un_blockcount and un_tgt_blocksize with the 7474 * valid values (the geometry will be validated later). 7475 */ 7476 un->un_f_blockcount_is_valid = FALSE; 7477 un->un_f_tgt_blocksize_is_valid = FALSE; 7478 7479 /* 7480 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7481 * otherwise. 7482 */ 7483 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7484 un->un_blockcount = 0; 7485 7486 /* 7487 * Set up the per-instance info needed to determine the correct 7488 * CDBs and other info for issuing commands to the target. 7489 */ 7490 sd_init_cdb_limits(un); 7491 7492 /* 7493 * Set up the IO chains to use, based upon the target type. 7494 */ 7495 if (un->un_f_non_devbsize_supported) { 7496 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7497 } else { 7498 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7499 } 7500 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7501 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7502 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7503 7504 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7505 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7506 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7507 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7508 7509 7510 if (ISCD(un)) { 7511 un->un_additional_codes = sd_additional_codes; 7512 } else { 7513 un->un_additional_codes = NULL; 7514 } 7515 7516 /* 7517 * Create the kstats here so they can be available for attach-time 7518 * routines that send commands to the unit (either polled or via 7519 * sd_send_scsi_cmd). 7520 * 7521 * Note: This is a critical sequence that needs to be maintained: 7522 * 1) Instantiate the kstats here, before any routines using the 7523 * iopath (i.e. sd_send_scsi_cmd). 7524 * 2) Instantiate and initialize the partition stats 7525 * (sd_set_pstats). 7526 * 3) Initialize the error stats (sd_set_errstats), following 7527 * sd_validate_geometry(),sd_register_devid(), 7528 * and sd_cache_control(). 7529 */ 7530 7531 un->un_stats = kstat_create(sd_label, instance, 7532 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7533 if (un->un_stats != NULL) { 7534 un->un_stats->ks_lock = SD_MUTEX(un); 7535 kstat_install(un->un_stats); 7536 } 7537 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7538 "sd_unit_attach: un:0x%p un_stats created\n", un); 7539 7540 sd_create_errstats(un, instance); 7541 if (un->un_errstats == NULL) { 7542 goto create_errstats_failed; 7543 } 7544 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7545 "sd_unit_attach: un:0x%p errstats created\n", un); 7546 7547 /* 7548 * The following if/else code was relocated here from below as part 7549 * of the fix for bug (4430280). However with the default setup added 7550 * on entry to this routine, it's no longer absolutely necessary for 7551 * this to be before the call to sd_spin_up_unit. 7552 */ 7553 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7554 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7555 (devp->sd_inq->inq_ansi == 5)) && 7556 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7557 7558 /* 7559 * If tagged queueing is supported by the target 7560 * and by the host adapter then we will enable it 7561 */ 7562 un->un_tagflags = 0; 7563 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7564 (un->un_f_arq_enabled == TRUE)) { 7565 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7566 1, 1) == 1) { 7567 un->un_tagflags = FLAG_STAG; 7568 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7569 "sd_unit_attach: un:0x%p tag queueing " 7570 "enabled\n", un); 7571 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7572 "untagged-qing", 0) == 1) { 7573 un->un_f_opt_queueing = TRUE; 7574 un->un_saved_throttle = un->un_throttle = 7575 min(un->un_throttle, 3); 7576 } else { 7577 un->un_f_opt_queueing = FALSE; 7578 un->un_saved_throttle = un->un_throttle = 1; 7579 } 7580 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7581 == 1) && (un->un_f_arq_enabled == TRUE)) { 7582 /* The Host Adapter supports internal queueing. */ 7583 un->un_f_opt_queueing = TRUE; 7584 un->un_saved_throttle = un->un_throttle = 7585 min(un->un_throttle, 3); 7586 } else { 7587 un->un_f_opt_queueing = FALSE; 7588 un->un_saved_throttle = un->un_throttle = 1; 7589 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7590 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7591 } 7592 7593 /* 7594 * Enable large transfers for SATA/SAS drives 7595 */ 7596 if (SD_IS_SERIAL(un)) { 7597 un->un_max_xfer_size = 7598 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7599 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7600 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7601 "sd_unit_attach: un:0x%p max transfer " 7602 "size=0x%x\n", un, un->un_max_xfer_size); 7603 7604 } 7605 7606 /* Setup or tear down default wide operations for disks */ 7607 7608 /* 7609 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7610 * and "ssd_max_xfer_size" to exist simultaneously on the same 7611 * system and be set to different values. In the future this 7612 * code may need to be updated when the ssd module is 7613 * obsoleted and removed from the system. (4299588) 7614 */ 7615 if (SD_IS_PARALLEL_SCSI(un) && 7616 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7617 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7618 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7619 1, 1) == 1) { 7620 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7621 "sd_unit_attach: un:0x%p Wide Transfer " 7622 "enabled\n", un); 7623 } 7624 7625 /* 7626 * If tagged queuing has also been enabled, then 7627 * enable large xfers 7628 */ 7629 if (un->un_saved_throttle == sd_max_throttle) { 7630 un->un_max_xfer_size = 7631 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7632 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7633 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7634 "sd_unit_attach: un:0x%p max transfer " 7635 "size=0x%x\n", un, un->un_max_xfer_size); 7636 } 7637 } else { 7638 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7639 0, 1) == 1) { 7640 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7641 "sd_unit_attach: un:0x%p " 7642 "Wide Transfer disabled\n", un); 7643 } 7644 } 7645 } else { 7646 un->un_tagflags = FLAG_STAG; 7647 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7648 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7649 } 7650 7651 /* 7652 * If this target supports LUN reset, try to enable it. 7653 */ 7654 if (un->un_f_lun_reset_enabled) { 7655 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7656 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7657 "un:0x%p lun_reset capability set\n", un); 7658 } else { 7659 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7660 "un:0x%p lun-reset capability not set\n", un); 7661 } 7662 } 7663 7664 /* 7665 * Adjust the maximum transfer size. This is to fix 7666 * the problem of partial DMA support on SPARC. Some 7667 * HBA driver, like aac, has very small dma_attr_maxxfer 7668 * size, which requires partial DMA support on SPARC. 7669 * In the future the SPARC pci nexus driver may solve 7670 * the problem instead of this fix. 7671 */ 7672 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7673 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7674 /* We need DMA partial even on sparc to ensure sddump() works */ 7675 un->un_max_xfer_size = max_xfer_size; 7676 if (un->un_partial_dma_supported == 0) 7677 un->un_partial_dma_supported = 1; 7678 } 7679 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7680 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7681 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7682 un->un_max_xfer_size) == 1) { 7683 un->un_buf_breakup_supported = 1; 7684 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7685 "un:0x%p Buf breakup enabled\n", un); 7686 } 7687 } 7688 7689 /* 7690 * Set PKT_DMA_PARTIAL flag. 7691 */ 7692 if (un->un_partial_dma_supported == 1) { 7693 un->un_pkt_flags = PKT_DMA_PARTIAL; 7694 } else { 7695 un->un_pkt_flags = 0; 7696 } 7697 7698 /* Initialize sd_ssc_t for internal uscsi commands */ 7699 ssc = sd_ssc_init(un); 7700 scsi_fm_init(devp); 7701 7702 /* 7703 * Allocate memory for SCSI FMA stuffs. 7704 */ 7705 un->un_fm_private = 7706 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7707 sfip = (struct sd_fm_internal *)un->un_fm_private; 7708 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7709 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7710 sfip->fm_ssc.ssc_un = un; 7711 7712 if (ISCD(un) || 7713 un->un_f_has_removable_media || 7714 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7715 /* 7716 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7717 * Their log are unchanged. 7718 */ 7719 sfip->fm_log_level = SD_FM_LOG_NSUP; 7720 } else { 7721 /* 7722 * If enter here, it should be non-CDROM and FM-capable 7723 * device, and it will not keep the old scsi_log as before 7724 * in /var/adm/messages. However, the property 7725 * "fm-scsi-log" will control whether the FM telemetry will 7726 * be logged in /var/adm/messages. 7727 */ 7728 int fm_scsi_log; 7729 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7730 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7731 7732 if (fm_scsi_log) 7733 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7734 else 7735 sfip->fm_log_level = SD_FM_LOG_SILENT; 7736 } 7737 7738 /* 7739 * At this point in the attach, we have enough info in the 7740 * soft state to be able to issue commands to the target. 7741 * 7742 * All command paths used below MUST issue their commands as 7743 * SD_PATH_DIRECT. This is important as intermediate layers 7744 * are not all initialized yet (such as PM). 7745 */ 7746 7747 /* 7748 * Send a TEST UNIT READY command to the device. This should clear 7749 * any outstanding UNIT ATTENTION that may be present. 7750 * 7751 * Note: Don't check for success, just track if there is a reservation, 7752 * this is a throw away command to clear any unit attentions. 7753 * 7754 * Note: This MUST be the first command issued to the target during 7755 * attach to ensure power on UNIT ATTENTIONS are cleared. 7756 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7757 * with attempts at spinning up a device with no media. 7758 */ 7759 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7760 if (status != 0) { 7761 if (status == EACCES) 7762 reservation_flag = SD_TARGET_IS_RESERVED; 7763 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7764 } 7765 7766 /* 7767 * If the device is NOT a removable media device, attempt to spin 7768 * it up (using the START_STOP_UNIT command) and read its capacity 7769 * (using the READ CAPACITY command). Note, however, that either 7770 * of these could fail and in some cases we would continue with 7771 * the attach despite the failure (see below). 7772 */ 7773 if (un->un_f_descr_format_supported) { 7774 7775 switch (sd_spin_up_unit(ssc)) { 7776 case 0: 7777 /* 7778 * Spin-up was successful; now try to read the 7779 * capacity. If successful then save the results 7780 * and mark the capacity & lbasize as valid. 7781 */ 7782 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7783 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7784 7785 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7786 &lbasize, SD_PATH_DIRECT); 7787 7788 switch (status) { 7789 case 0: { 7790 if (capacity > DK_MAX_BLOCKS) { 7791 #ifdef _LP64 7792 if ((capacity + 1) > 7793 SD_GROUP1_MAX_ADDRESS) { 7794 /* 7795 * Enable descriptor format 7796 * sense data so that we can 7797 * get 64 bit sense data 7798 * fields. 7799 */ 7800 sd_enable_descr_sense(ssc); 7801 } 7802 #else 7803 /* 32-bit kernels can't handle this */ 7804 scsi_log(SD_DEVINFO(un), 7805 sd_label, CE_WARN, 7806 "disk has %llu blocks, which " 7807 "is too large for a 32-bit " 7808 "kernel", capacity); 7809 7810 #if defined(__i386) || defined(__amd64) 7811 /* 7812 * 1TB disk was treated as (1T - 512)B 7813 * in the past, so that it might have 7814 * valid VTOC and solaris partitions, 7815 * we have to allow it to continue to 7816 * work. 7817 */ 7818 if (capacity -1 > DK_MAX_BLOCKS) 7819 #endif 7820 goto spinup_failed; 7821 #endif 7822 } 7823 7824 /* 7825 * Here it's not necessary to check the case: 7826 * the capacity of the device is bigger than 7827 * what the max hba cdb can support. Because 7828 * sd_send_scsi_READ_CAPACITY will retrieve 7829 * the capacity by sending USCSI command, which 7830 * is constrained by the max hba cdb. Actually, 7831 * sd_send_scsi_READ_CAPACITY will return 7832 * EINVAL when using bigger cdb than required 7833 * cdb length. Will handle this case in 7834 * "case EINVAL". 7835 */ 7836 7837 /* 7838 * The following relies on 7839 * sd_send_scsi_READ_CAPACITY never 7840 * returning 0 for capacity and/or lbasize. 7841 */ 7842 sd_update_block_info(un, lbasize, capacity); 7843 7844 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7845 "sd_unit_attach: un:0x%p capacity = %ld " 7846 "blocks; lbasize= %ld.\n", un, 7847 un->un_blockcount, un->un_tgt_blocksize); 7848 7849 break; 7850 } 7851 case EINVAL: 7852 /* 7853 * In the case where the max-cdb-length property 7854 * is smaller than the required CDB length for 7855 * a SCSI device, a target driver can fail to 7856 * attach to that device. 7857 */ 7858 scsi_log(SD_DEVINFO(un), 7859 sd_label, CE_WARN, 7860 "disk capacity is too large " 7861 "for current cdb length"); 7862 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7863 7864 goto spinup_failed; 7865 case EACCES: 7866 /* 7867 * Should never get here if the spin-up 7868 * succeeded, but code it in anyway. 7869 * From here, just continue with the attach... 7870 */ 7871 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7872 "sd_unit_attach: un:0x%p " 7873 "sd_send_scsi_READ_CAPACITY " 7874 "returned reservation conflict\n", un); 7875 reservation_flag = SD_TARGET_IS_RESERVED; 7876 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7877 break; 7878 default: 7879 /* 7880 * Likewise, should never get here if the 7881 * spin-up succeeded. Just continue with 7882 * the attach... 7883 */ 7884 if (status == EIO) 7885 sd_ssc_assessment(ssc, 7886 SD_FMT_STATUS_CHECK); 7887 else 7888 sd_ssc_assessment(ssc, 7889 SD_FMT_IGNORE); 7890 break; 7891 } 7892 break; 7893 case EACCES: 7894 /* 7895 * Device is reserved by another host. In this case 7896 * we could not spin it up or read the capacity, but 7897 * we continue with the attach anyway. 7898 */ 7899 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7900 "sd_unit_attach: un:0x%p spin-up reservation " 7901 "conflict.\n", un); 7902 reservation_flag = SD_TARGET_IS_RESERVED; 7903 break; 7904 default: 7905 /* Fail the attach if the spin-up failed. */ 7906 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7907 "sd_unit_attach: un:0x%p spin-up failed.", un); 7908 goto spinup_failed; 7909 } 7910 7911 } 7912 7913 /* 7914 * Check to see if this is a MMC drive 7915 */ 7916 if (ISCD(un)) { 7917 sd_set_mmc_caps(ssc); 7918 } 7919 7920 7921 /* 7922 * Add a zero-length attribute to tell the world we support 7923 * kernel ioctls (for layered drivers) 7924 */ 7925 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7926 DDI_KERNEL_IOCTL, NULL, 0); 7927 7928 /* 7929 * Add a boolean property to tell the world we support 7930 * the B_FAILFAST flag (for layered drivers) 7931 */ 7932 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7933 "ddi-failfast-supported", NULL, 0); 7934 7935 /* 7936 * Initialize power management 7937 */ 7938 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7939 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7940 sd_setup_pm(ssc, devi); 7941 if (un->un_f_pm_is_enabled == FALSE) { 7942 /* 7943 * For performance, point to a jump table that does 7944 * not include pm. 7945 * The direct and priority chains don't change with PM. 7946 * 7947 * Note: this is currently done based on individual device 7948 * capabilities. When an interface for determining system 7949 * power enabled state becomes available, or when additional 7950 * layers are added to the command chain, these values will 7951 * have to be re-evaluated for correctness. 7952 */ 7953 if (un->un_f_non_devbsize_supported) { 7954 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7955 } else { 7956 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7957 } 7958 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7959 } 7960 7961 /* 7962 * This property is set to 0 by HA software to avoid retries 7963 * on a reserved disk. (The preferred property name is 7964 * "retry-on-reservation-conflict") (1189689) 7965 * 7966 * Note: The use of a global here can have unintended consequences. A 7967 * per instance variable is preferable to match the capabilities of 7968 * different underlying hba's (4402600) 7969 */ 7970 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7971 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7972 sd_retry_on_reservation_conflict); 7973 if (sd_retry_on_reservation_conflict != 0) { 7974 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7975 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7976 sd_retry_on_reservation_conflict); 7977 } 7978 7979 /* Set up options for QFULL handling. */ 7980 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7981 "qfull-retries", -1)) != -1) { 7982 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7983 rval, 1); 7984 } 7985 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7986 "qfull-retry-interval", -1)) != -1) { 7987 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7988 rval, 1); 7989 } 7990 7991 /* 7992 * This just prints a message that announces the existence of the 7993 * device. The message is always printed in the system logfile, but 7994 * only appears on the console if the system is booted with the 7995 * -v (verbose) argument. 7996 */ 7997 ddi_report_dev(devi); 7998 7999 un->un_mediastate = DKIO_NONE; 8000 8001 cmlb_alloc_handle(&un->un_cmlbhandle); 8002 8003 #if defined(__i386) || defined(__amd64) 8004 /* 8005 * On x86, compensate for off-by-1 legacy error 8006 */ 8007 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8008 (lbasize == un->un_sys_blocksize)) 8009 offbyone = CMLB_OFF_BY_ONE; 8010 #endif 8011 8012 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8013 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8014 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8015 un->un_node_type, offbyone, un->un_cmlbhandle, 8016 (void *)SD_PATH_DIRECT) != 0) { 8017 goto cmlb_attach_failed; 8018 } 8019 8020 8021 /* 8022 * Read and validate the device's geometry (ie, disk label) 8023 * A new unformatted drive will not have a valid geometry, but 8024 * the driver needs to successfully attach to this device so 8025 * the drive can be formatted via ioctls. 8026 */ 8027 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8028 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8029 8030 mutex_enter(SD_MUTEX(un)); 8031 8032 /* 8033 * Read and initialize the devid for the unit. 8034 */ 8035 if (un->un_f_devid_supported) { 8036 sd_register_devid(ssc, devi, reservation_flag); 8037 } 8038 mutex_exit(SD_MUTEX(un)); 8039 8040 #if (defined(__fibre)) 8041 /* 8042 * Register callbacks for fibre only. You can't do this solely 8043 * on the basis of the devid_type because this is hba specific. 8044 * We need to query our hba capabilities to find out whether to 8045 * register or not. 8046 */ 8047 if (un->un_f_is_fibre) { 8048 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8049 sd_init_event_callbacks(un); 8050 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8051 "sd_unit_attach: un:0x%p event callbacks inserted", 8052 un); 8053 } 8054 } 8055 #endif 8056 8057 if (un->un_f_opt_disable_cache == TRUE) { 8058 /* 8059 * Disable both read cache and write cache. This is 8060 * the historic behavior of the keywords in the config file. 8061 */ 8062 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8063 0) { 8064 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8065 "sd_unit_attach: un:0x%p Could not disable " 8066 "caching", un); 8067 goto devid_failed; 8068 } 8069 } 8070 8071 /* 8072 * Check the value of the WCE bit now and 8073 * set un_f_write_cache_enabled accordingly. 8074 */ 8075 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8076 mutex_enter(SD_MUTEX(un)); 8077 un->un_f_write_cache_enabled = (wc_enabled != 0); 8078 mutex_exit(SD_MUTEX(un)); 8079 8080 if (un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8081 un->un_tgt_blocksize != DEV_BSIZE) { 8082 if (!(un->un_wm_cache)) { 8083 (void) snprintf(name_str, sizeof (name_str), 8084 "%s%d_cache", 8085 ddi_driver_name(SD_DEVINFO(un)), 8086 ddi_get_instance(SD_DEVINFO(un))); 8087 un->un_wm_cache = kmem_cache_create( 8088 name_str, sizeof (struct sd_w_map), 8089 8, sd_wm_cache_constructor, 8090 sd_wm_cache_destructor, NULL, 8091 (void *)un, NULL, 0); 8092 if (!(un->un_wm_cache)) { 8093 goto wm_cache_failed; 8094 } 8095 } 8096 } 8097 8098 /* 8099 * Check the value of the NV_SUP bit and set 8100 * un_f_suppress_cache_flush accordingly. 8101 */ 8102 sd_get_nv_sup(ssc); 8103 8104 /* 8105 * Find out what type of reservation this disk supports. 8106 */ 8107 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8108 8109 switch (status) { 8110 case 0: 8111 /* 8112 * SCSI-3 reservations are supported. 8113 */ 8114 un->un_reservation_type = SD_SCSI3_RESERVATION; 8115 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8116 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8117 break; 8118 case ENOTSUP: 8119 /* 8120 * The PERSISTENT RESERVE IN command would not be recognized by 8121 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8122 */ 8123 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8124 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8125 un->un_reservation_type = SD_SCSI2_RESERVATION; 8126 8127 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8128 break; 8129 default: 8130 /* 8131 * default to SCSI-3 reservations 8132 */ 8133 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8134 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8135 un->un_reservation_type = SD_SCSI3_RESERVATION; 8136 8137 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8138 break; 8139 } 8140 8141 /* 8142 * Set the pstat and error stat values here, so data obtained during the 8143 * previous attach-time routines is available. 8144 * 8145 * Note: This is a critical sequence that needs to be maintained: 8146 * 1) Instantiate the kstats before any routines using the iopath 8147 * (i.e. sd_send_scsi_cmd). 8148 * 2) Initialize the error stats (sd_set_errstats) and partition 8149 * stats (sd_set_pstats)here, following 8150 * cmlb_validate_geometry(), sd_register_devid(), and 8151 * sd_cache_control(). 8152 */ 8153 8154 if (un->un_f_pkstats_enabled && geom_label_valid) { 8155 sd_set_pstats(un); 8156 SD_TRACE(SD_LOG_IO_PARTITION, un, 8157 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8158 } 8159 8160 sd_set_errstats(un); 8161 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8162 "sd_unit_attach: un:0x%p errstats set\n", un); 8163 8164 8165 /* 8166 * After successfully attaching an instance, we record the information 8167 * of how many luns have been attached on the relative target and 8168 * controller for parallel SCSI. This information is used when sd tries 8169 * to set the tagged queuing capability in HBA. 8170 */ 8171 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8172 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8173 } 8174 8175 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8176 "sd_unit_attach: un:0x%p exit success\n", un); 8177 8178 /* Uninitialize sd_ssc_t pointer */ 8179 sd_ssc_fini(ssc); 8180 8181 return (DDI_SUCCESS); 8182 8183 /* 8184 * An error occurred during the attach; clean up & return failure. 8185 */ 8186 wm_cache_failed: 8187 devid_failed: 8188 8189 setup_pm_failed: 8190 ddi_remove_minor_node(devi, NULL); 8191 8192 cmlb_attach_failed: 8193 /* 8194 * Cleanup from the scsi_ifsetcap() calls (437868) 8195 */ 8196 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8197 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8198 8199 /* 8200 * Refer to the comments of setting tagged-qing in the beginning of 8201 * sd_unit_attach. We can only disable tagged queuing when there is 8202 * no lun attached on the target. 8203 */ 8204 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8205 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8206 } 8207 8208 if (un->un_f_is_fibre == FALSE) { 8209 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8210 } 8211 8212 spinup_failed: 8213 8214 /* Uninitialize sd_ssc_t pointer */ 8215 sd_ssc_fini(ssc); 8216 8217 mutex_enter(SD_MUTEX(un)); 8218 8219 /* Deallocate SCSI FMA memory spaces */ 8220 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8221 8222 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8223 if (un->un_direct_priority_timeid != NULL) { 8224 timeout_id_t temp_id = un->un_direct_priority_timeid; 8225 un->un_direct_priority_timeid = NULL; 8226 mutex_exit(SD_MUTEX(un)); 8227 (void) untimeout(temp_id); 8228 mutex_enter(SD_MUTEX(un)); 8229 } 8230 8231 /* Cancel any pending start/stop timeouts */ 8232 if (un->un_startstop_timeid != NULL) { 8233 timeout_id_t temp_id = un->un_startstop_timeid; 8234 un->un_startstop_timeid = NULL; 8235 mutex_exit(SD_MUTEX(un)); 8236 (void) untimeout(temp_id); 8237 mutex_enter(SD_MUTEX(un)); 8238 } 8239 8240 /* Cancel any pending reset-throttle timeouts */ 8241 if (un->un_reset_throttle_timeid != NULL) { 8242 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8243 un->un_reset_throttle_timeid = NULL; 8244 mutex_exit(SD_MUTEX(un)); 8245 (void) untimeout(temp_id); 8246 mutex_enter(SD_MUTEX(un)); 8247 } 8248 8249 /* Cancel rmw warning message timeouts */ 8250 if (un->un_rmw_msg_timeid != NULL) { 8251 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8252 un->un_rmw_msg_timeid = NULL; 8253 mutex_exit(SD_MUTEX(un)); 8254 (void) untimeout(temp_id); 8255 mutex_enter(SD_MUTEX(un)); 8256 } 8257 8258 /* Cancel any pending retry timeouts */ 8259 if (un->un_retry_timeid != NULL) { 8260 timeout_id_t temp_id = un->un_retry_timeid; 8261 un->un_retry_timeid = NULL; 8262 mutex_exit(SD_MUTEX(un)); 8263 (void) untimeout(temp_id); 8264 mutex_enter(SD_MUTEX(un)); 8265 } 8266 8267 /* Cancel any pending delayed cv broadcast timeouts */ 8268 if (un->un_dcvb_timeid != NULL) { 8269 timeout_id_t temp_id = un->un_dcvb_timeid; 8270 un->un_dcvb_timeid = NULL; 8271 mutex_exit(SD_MUTEX(un)); 8272 (void) untimeout(temp_id); 8273 mutex_enter(SD_MUTEX(un)); 8274 } 8275 8276 mutex_exit(SD_MUTEX(un)); 8277 8278 /* There should not be any in-progress I/O so ASSERT this check */ 8279 ASSERT(un->un_ncmds_in_transport == 0); 8280 ASSERT(un->un_ncmds_in_driver == 0); 8281 8282 /* Do not free the softstate if the callback routine is active */ 8283 sd_sync_with_callback(un); 8284 8285 /* 8286 * Partition stats apparently are not used with removables. These would 8287 * not have been created during attach, so no need to clean them up... 8288 */ 8289 if (un->un_errstats != NULL) { 8290 kstat_delete(un->un_errstats); 8291 un->un_errstats = NULL; 8292 } 8293 8294 create_errstats_failed: 8295 8296 if (un->un_stats != NULL) { 8297 kstat_delete(un->un_stats); 8298 un->un_stats = NULL; 8299 } 8300 8301 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8302 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8303 8304 ddi_prop_remove_all(devi); 8305 sema_destroy(&un->un_semoclose); 8306 cv_destroy(&un->un_state_cv); 8307 8308 getrbuf_failed: 8309 8310 sd_free_rqs(un); 8311 8312 alloc_rqs_failed: 8313 8314 devp->sd_private = NULL; 8315 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8316 8317 get_softstate_failed: 8318 /* 8319 * Note: the man pages are unclear as to whether or not doing a 8320 * ddi_soft_state_free(sd_state, instance) is the right way to 8321 * clean up after the ddi_soft_state_zalloc() if the subsequent 8322 * ddi_get_soft_state() fails. The implication seems to be 8323 * that the get_soft_state cannot fail if the zalloc succeeds. 8324 */ 8325 #ifndef XPV_HVM_DRIVER 8326 ddi_soft_state_free(sd_state, instance); 8327 #endif /* !XPV_HVM_DRIVER */ 8328 8329 probe_failed: 8330 scsi_unprobe(devp); 8331 8332 return (DDI_FAILURE); 8333 } 8334 8335 8336 /* 8337 * Function: sd_unit_detach 8338 * 8339 * Description: Performs DDI_DETACH processing for sddetach(). 8340 * 8341 * Return Code: DDI_SUCCESS 8342 * DDI_FAILURE 8343 * 8344 * Context: Kernel thread context 8345 */ 8346 8347 static int 8348 sd_unit_detach(dev_info_t *devi) 8349 { 8350 struct scsi_device *devp; 8351 struct sd_lun *un; 8352 int i; 8353 int tgt; 8354 dev_t dev; 8355 dev_info_t *pdip = ddi_get_parent(devi); 8356 #ifndef XPV_HVM_DRIVER 8357 int instance = ddi_get_instance(devi); 8358 #endif /* !XPV_HVM_DRIVER */ 8359 8360 mutex_enter(&sd_detach_mutex); 8361 8362 /* 8363 * Fail the detach for any of the following: 8364 * - Unable to get the sd_lun struct for the instance 8365 * - A layered driver has an outstanding open on the instance 8366 * - Another thread is already detaching this instance 8367 * - Another thread is currently performing an open 8368 */ 8369 devp = ddi_get_driver_private(devi); 8370 if ((devp == NULL) || 8371 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8372 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8373 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8374 mutex_exit(&sd_detach_mutex); 8375 return (DDI_FAILURE); 8376 } 8377 8378 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8379 8380 /* 8381 * Mark this instance as currently in a detach, to inhibit any 8382 * opens from a layered driver. 8383 */ 8384 un->un_detach_count++; 8385 mutex_exit(&sd_detach_mutex); 8386 8387 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8388 SCSI_ADDR_PROP_TARGET, -1); 8389 8390 dev = sd_make_device(SD_DEVINFO(un)); 8391 8392 #ifndef lint 8393 _NOTE(COMPETING_THREADS_NOW); 8394 #endif 8395 8396 mutex_enter(SD_MUTEX(un)); 8397 8398 /* 8399 * Fail the detach if there are any outstanding layered 8400 * opens on this device. 8401 */ 8402 for (i = 0; i < NDKMAP; i++) { 8403 if (un->un_ocmap.lyropen[i] != 0) { 8404 goto err_notclosed; 8405 } 8406 } 8407 8408 /* 8409 * Verify there are NO outstanding commands issued to this device. 8410 * ie, un_ncmds_in_transport == 0. 8411 * It's possible to have outstanding commands through the physio 8412 * code path, even though everything's closed. 8413 */ 8414 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8415 (un->un_direct_priority_timeid != NULL) || 8416 (un->un_state == SD_STATE_RWAIT)) { 8417 mutex_exit(SD_MUTEX(un)); 8418 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8419 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8420 goto err_stillbusy; 8421 } 8422 8423 /* 8424 * If we have the device reserved, release the reservation. 8425 */ 8426 if ((un->un_resvd_status & SD_RESERVE) && 8427 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8428 mutex_exit(SD_MUTEX(un)); 8429 /* 8430 * Note: sd_reserve_release sends a command to the device 8431 * via the sd_ioctlcmd() path, and can sleep. 8432 */ 8433 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8434 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8435 "sd_dr_detach: Cannot release reservation \n"); 8436 } 8437 } else { 8438 mutex_exit(SD_MUTEX(un)); 8439 } 8440 8441 /* 8442 * Untimeout any reserve recover, throttle reset, restart unit 8443 * and delayed broadcast timeout threads. Protect the timeout pointer 8444 * from getting nulled by their callback functions. 8445 */ 8446 mutex_enter(SD_MUTEX(un)); 8447 if (un->un_resvd_timeid != NULL) { 8448 timeout_id_t temp_id = un->un_resvd_timeid; 8449 un->un_resvd_timeid = NULL; 8450 mutex_exit(SD_MUTEX(un)); 8451 (void) untimeout(temp_id); 8452 mutex_enter(SD_MUTEX(un)); 8453 } 8454 8455 if (un->un_reset_throttle_timeid != NULL) { 8456 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8457 un->un_reset_throttle_timeid = NULL; 8458 mutex_exit(SD_MUTEX(un)); 8459 (void) untimeout(temp_id); 8460 mutex_enter(SD_MUTEX(un)); 8461 } 8462 8463 if (un->un_startstop_timeid != NULL) { 8464 timeout_id_t temp_id = un->un_startstop_timeid; 8465 un->un_startstop_timeid = NULL; 8466 mutex_exit(SD_MUTEX(un)); 8467 (void) untimeout(temp_id); 8468 mutex_enter(SD_MUTEX(un)); 8469 } 8470 8471 if (un->un_rmw_msg_timeid != NULL) { 8472 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8473 un->un_rmw_msg_timeid = NULL; 8474 mutex_exit(SD_MUTEX(un)); 8475 (void) untimeout(temp_id); 8476 mutex_enter(SD_MUTEX(un)); 8477 } 8478 8479 if (un->un_dcvb_timeid != NULL) { 8480 timeout_id_t temp_id = un->un_dcvb_timeid; 8481 un->un_dcvb_timeid = NULL; 8482 mutex_exit(SD_MUTEX(un)); 8483 (void) untimeout(temp_id); 8484 } else { 8485 mutex_exit(SD_MUTEX(un)); 8486 } 8487 8488 /* Remove any pending reservation reclaim requests for this device */ 8489 sd_rmv_resv_reclaim_req(dev); 8490 8491 mutex_enter(SD_MUTEX(un)); 8492 8493 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8494 if (un->un_direct_priority_timeid != NULL) { 8495 timeout_id_t temp_id = un->un_direct_priority_timeid; 8496 un->un_direct_priority_timeid = NULL; 8497 mutex_exit(SD_MUTEX(un)); 8498 (void) untimeout(temp_id); 8499 mutex_enter(SD_MUTEX(un)); 8500 } 8501 8502 /* Cancel any active multi-host disk watch thread requests */ 8503 if (un->un_mhd_token != NULL) { 8504 mutex_exit(SD_MUTEX(un)); 8505 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8506 if (scsi_watch_request_terminate(un->un_mhd_token, 8507 SCSI_WATCH_TERMINATE_NOWAIT)) { 8508 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8509 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8510 /* 8511 * Note: We are returning here after having removed 8512 * some driver timeouts above. This is consistent with 8513 * the legacy implementation but perhaps the watch 8514 * terminate call should be made with the wait flag set. 8515 */ 8516 goto err_stillbusy; 8517 } 8518 mutex_enter(SD_MUTEX(un)); 8519 un->un_mhd_token = NULL; 8520 } 8521 8522 if (un->un_swr_token != NULL) { 8523 mutex_exit(SD_MUTEX(un)); 8524 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8525 if (scsi_watch_request_terminate(un->un_swr_token, 8526 SCSI_WATCH_TERMINATE_NOWAIT)) { 8527 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8528 "sd_dr_detach: Cannot cancel swr watch request\n"); 8529 /* 8530 * Note: We are returning here after having removed 8531 * some driver timeouts above. This is consistent with 8532 * the legacy implementation but perhaps the watch 8533 * terminate call should be made with the wait flag set. 8534 */ 8535 goto err_stillbusy; 8536 } 8537 mutex_enter(SD_MUTEX(un)); 8538 un->un_swr_token = NULL; 8539 } 8540 8541 mutex_exit(SD_MUTEX(un)); 8542 8543 /* 8544 * Clear any scsi_reset_notifies. We clear the reset notifies 8545 * if we have not registered one. 8546 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8547 */ 8548 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8549 sd_mhd_reset_notify_cb, (caddr_t)un); 8550 8551 /* 8552 * protect the timeout pointers from getting nulled by 8553 * their callback functions during the cancellation process. 8554 * In such a scenario untimeout can be invoked with a null value. 8555 */ 8556 _NOTE(NO_COMPETING_THREADS_NOW); 8557 8558 mutex_enter(&un->un_pm_mutex); 8559 if (un->un_pm_idle_timeid != NULL) { 8560 timeout_id_t temp_id = un->un_pm_idle_timeid; 8561 un->un_pm_idle_timeid = NULL; 8562 mutex_exit(&un->un_pm_mutex); 8563 8564 /* 8565 * Timeout is active; cancel it. 8566 * Note that it'll never be active on a device 8567 * that does not support PM therefore we don't 8568 * have to check before calling pm_idle_component. 8569 */ 8570 (void) untimeout(temp_id); 8571 (void) pm_idle_component(SD_DEVINFO(un), 0); 8572 mutex_enter(&un->un_pm_mutex); 8573 } 8574 8575 /* 8576 * Check whether there is already a timeout scheduled for power 8577 * management. If yes then don't lower the power here, that's. 8578 * the timeout handler's job. 8579 */ 8580 if (un->un_pm_timeid != NULL) { 8581 timeout_id_t temp_id = un->un_pm_timeid; 8582 un->un_pm_timeid = NULL; 8583 mutex_exit(&un->un_pm_mutex); 8584 /* 8585 * Timeout is active; cancel it. 8586 * Note that it'll never be active on a device 8587 * that does not support PM therefore we don't 8588 * have to check before calling pm_idle_component. 8589 */ 8590 (void) untimeout(temp_id); 8591 (void) pm_idle_component(SD_DEVINFO(un), 0); 8592 8593 } else { 8594 mutex_exit(&un->un_pm_mutex); 8595 if ((un->un_f_pm_is_enabled == TRUE) && 8596 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8597 DDI_SUCCESS)) { 8598 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8599 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8600 /* 8601 * Fix for bug: 4297749, item # 13 8602 * The above test now includes a check to see if PM is 8603 * supported by this device before call 8604 * pm_lower_power(). 8605 * Note, the following is not dead code. The call to 8606 * pm_lower_power above will generate a call back into 8607 * our sdpower routine which might result in a timeout 8608 * handler getting activated. Therefore the following 8609 * code is valid and necessary. 8610 */ 8611 mutex_enter(&un->un_pm_mutex); 8612 if (un->un_pm_timeid != NULL) { 8613 timeout_id_t temp_id = un->un_pm_timeid; 8614 un->un_pm_timeid = NULL; 8615 mutex_exit(&un->un_pm_mutex); 8616 (void) untimeout(temp_id); 8617 (void) pm_idle_component(SD_DEVINFO(un), 0); 8618 } else { 8619 mutex_exit(&un->un_pm_mutex); 8620 } 8621 } 8622 } 8623 8624 /* 8625 * Cleanup from the scsi_ifsetcap() calls (437868) 8626 * Relocated here from above to be after the call to 8627 * pm_lower_power, which was getting errors. 8628 */ 8629 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8630 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8631 8632 /* 8633 * Currently, tagged queuing is supported per target based by HBA. 8634 * Setting this per lun instance actually sets the capability of this 8635 * target in HBA, which affects those luns already attached on the 8636 * same target. So during detach, we can only disable this capability 8637 * only when this is the only lun left on this target. By doing 8638 * this, we assume a target has the same tagged queuing capability 8639 * for every lun. The condition can be removed when HBA is changed to 8640 * support per lun based tagged queuing capability. 8641 */ 8642 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8643 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8644 } 8645 8646 if (un->un_f_is_fibre == FALSE) { 8647 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8648 } 8649 8650 /* 8651 * Remove any event callbacks, fibre only 8652 */ 8653 if (un->un_f_is_fibre == TRUE) { 8654 if ((un->un_insert_event != NULL) && 8655 (ddi_remove_event_handler(un->un_insert_cb_id) != 8656 DDI_SUCCESS)) { 8657 /* 8658 * Note: We are returning here after having done 8659 * substantial cleanup above. This is consistent 8660 * with the legacy implementation but this may not 8661 * be the right thing to do. 8662 */ 8663 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8664 "sd_dr_detach: Cannot cancel insert event\n"); 8665 goto err_remove_event; 8666 } 8667 un->un_insert_event = NULL; 8668 8669 if ((un->un_remove_event != NULL) && 8670 (ddi_remove_event_handler(un->un_remove_cb_id) != 8671 DDI_SUCCESS)) { 8672 /* 8673 * Note: We are returning here after having done 8674 * substantial cleanup above. This is consistent 8675 * with the legacy implementation but this may not 8676 * be the right thing to do. 8677 */ 8678 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8679 "sd_dr_detach: Cannot cancel remove event\n"); 8680 goto err_remove_event; 8681 } 8682 un->un_remove_event = NULL; 8683 } 8684 8685 /* Do not free the softstate if the callback routine is active */ 8686 sd_sync_with_callback(un); 8687 8688 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8689 cmlb_free_handle(&un->un_cmlbhandle); 8690 8691 /* 8692 * Hold the detach mutex here, to make sure that no other threads ever 8693 * can access a (partially) freed soft state structure. 8694 */ 8695 mutex_enter(&sd_detach_mutex); 8696 8697 /* 8698 * Clean up the soft state struct. 8699 * Cleanup is done in reverse order of allocs/inits. 8700 * At this point there should be no competing threads anymore. 8701 */ 8702 8703 scsi_fm_fini(devp); 8704 8705 /* 8706 * Deallocate memory for SCSI FMA. 8707 */ 8708 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8709 8710 /* 8711 * Unregister and free device id if it was not registered 8712 * by the transport. 8713 */ 8714 if (un->un_f_devid_transport_defined == FALSE) 8715 ddi_devid_unregister(devi); 8716 8717 /* 8718 * free the devid structure if allocated before (by ddi_devid_init() 8719 * or ddi_devid_get()). 8720 */ 8721 if (un->un_devid) { 8722 ddi_devid_free(un->un_devid); 8723 un->un_devid = NULL; 8724 } 8725 8726 /* 8727 * Destroy wmap cache if it exists. 8728 */ 8729 if (un->un_wm_cache != NULL) { 8730 kmem_cache_destroy(un->un_wm_cache); 8731 un->un_wm_cache = NULL; 8732 } 8733 8734 /* 8735 * kstat cleanup is done in detach for all device types (4363169). 8736 * We do not want to fail detach if the device kstats are not deleted 8737 * since there is a confusion about the devo_refcnt for the device. 8738 * We just delete the kstats and let detach complete successfully. 8739 */ 8740 if (un->un_stats != NULL) { 8741 kstat_delete(un->un_stats); 8742 un->un_stats = NULL; 8743 } 8744 if (un->un_errstats != NULL) { 8745 kstat_delete(un->un_errstats); 8746 un->un_errstats = NULL; 8747 } 8748 8749 /* Remove partition stats */ 8750 if (un->un_f_pkstats_enabled) { 8751 for (i = 0; i < NSDMAP; i++) { 8752 if (un->un_pstats[i] != NULL) { 8753 kstat_delete(un->un_pstats[i]); 8754 un->un_pstats[i] = NULL; 8755 } 8756 } 8757 } 8758 8759 /* Remove xbuf registration */ 8760 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8761 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8762 8763 /* Remove driver properties */ 8764 ddi_prop_remove_all(devi); 8765 8766 mutex_destroy(&un->un_pm_mutex); 8767 cv_destroy(&un->un_pm_busy_cv); 8768 8769 cv_destroy(&un->un_wcc_cv); 8770 8771 /* Open/close semaphore */ 8772 sema_destroy(&un->un_semoclose); 8773 8774 /* Removable media condvar. */ 8775 cv_destroy(&un->un_state_cv); 8776 8777 /* Suspend/resume condvar. */ 8778 cv_destroy(&un->un_suspend_cv); 8779 cv_destroy(&un->un_disk_busy_cv); 8780 8781 sd_free_rqs(un); 8782 8783 /* Free up soft state */ 8784 devp->sd_private = NULL; 8785 8786 bzero(un, sizeof (struct sd_lun)); 8787 #ifndef XPV_HVM_DRIVER 8788 ddi_soft_state_free(sd_state, instance); 8789 #endif /* !XPV_HVM_DRIVER */ 8790 8791 mutex_exit(&sd_detach_mutex); 8792 8793 /* This frees up the INQUIRY data associated with the device. */ 8794 scsi_unprobe(devp); 8795 8796 /* 8797 * After successfully detaching an instance, we update the information 8798 * of how many luns have been attached in the relative target and 8799 * controller for parallel SCSI. This information is used when sd tries 8800 * to set the tagged queuing capability in HBA. 8801 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8802 * check if the device is parallel SCSI. However, we don't need to 8803 * check here because we've already checked during attach. No device 8804 * that is not parallel SCSI is in the chain. 8805 */ 8806 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8807 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8808 } 8809 8810 return (DDI_SUCCESS); 8811 8812 err_notclosed: 8813 mutex_exit(SD_MUTEX(un)); 8814 8815 err_stillbusy: 8816 _NOTE(NO_COMPETING_THREADS_NOW); 8817 8818 err_remove_event: 8819 mutex_enter(&sd_detach_mutex); 8820 un->un_detach_count--; 8821 mutex_exit(&sd_detach_mutex); 8822 8823 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8824 return (DDI_FAILURE); 8825 } 8826 8827 8828 /* 8829 * Function: sd_create_errstats 8830 * 8831 * Description: This routine instantiates the device error stats. 8832 * 8833 * Note: During attach the stats are instantiated first so they are 8834 * available for attach-time routines that utilize the driver 8835 * iopath to send commands to the device. The stats are initialized 8836 * separately so data obtained during some attach-time routines is 8837 * available. (4362483) 8838 * 8839 * Arguments: un - driver soft state (unit) structure 8840 * instance - driver instance 8841 * 8842 * Context: Kernel thread context 8843 */ 8844 8845 static void 8846 sd_create_errstats(struct sd_lun *un, int instance) 8847 { 8848 struct sd_errstats *stp; 8849 char kstatmodule_err[KSTAT_STRLEN]; 8850 char kstatname[KSTAT_STRLEN]; 8851 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8852 8853 ASSERT(un != NULL); 8854 8855 if (un->un_errstats != NULL) { 8856 return; 8857 } 8858 8859 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8860 "%serr", sd_label); 8861 (void) snprintf(kstatname, sizeof (kstatname), 8862 "%s%d,err", sd_label, instance); 8863 8864 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8865 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8866 8867 if (un->un_errstats == NULL) { 8868 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8869 "sd_create_errstats: Failed kstat_create\n"); 8870 return; 8871 } 8872 8873 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8874 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8875 KSTAT_DATA_UINT32); 8876 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8877 KSTAT_DATA_UINT32); 8878 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8879 KSTAT_DATA_UINT32); 8880 kstat_named_init(&stp->sd_vid, "Vendor", 8881 KSTAT_DATA_CHAR); 8882 kstat_named_init(&stp->sd_pid, "Product", 8883 KSTAT_DATA_CHAR); 8884 kstat_named_init(&stp->sd_revision, "Revision", 8885 KSTAT_DATA_CHAR); 8886 kstat_named_init(&stp->sd_serial, "Serial No", 8887 KSTAT_DATA_CHAR); 8888 kstat_named_init(&stp->sd_capacity, "Size", 8889 KSTAT_DATA_ULONGLONG); 8890 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8891 KSTAT_DATA_UINT32); 8892 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8893 KSTAT_DATA_UINT32); 8894 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8895 KSTAT_DATA_UINT32); 8896 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8897 KSTAT_DATA_UINT32); 8898 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8899 KSTAT_DATA_UINT32); 8900 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8901 KSTAT_DATA_UINT32); 8902 8903 un->un_errstats->ks_private = un; 8904 un->un_errstats->ks_update = nulldev; 8905 8906 kstat_install(un->un_errstats); 8907 } 8908 8909 8910 /* 8911 * Function: sd_set_errstats 8912 * 8913 * Description: This routine sets the value of the vendor id, product id, 8914 * revision, serial number, and capacity device error stats. 8915 * 8916 * Note: During attach the stats are instantiated first so they are 8917 * available for attach-time routines that utilize the driver 8918 * iopath to send commands to the device. The stats are initialized 8919 * separately so data obtained during some attach-time routines is 8920 * available. (4362483) 8921 * 8922 * Arguments: un - driver soft state (unit) structure 8923 * 8924 * Context: Kernel thread context 8925 */ 8926 8927 static void 8928 sd_set_errstats(struct sd_lun *un) 8929 { 8930 struct sd_errstats *stp; 8931 8932 ASSERT(un != NULL); 8933 ASSERT(un->un_errstats != NULL); 8934 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8935 ASSERT(stp != NULL); 8936 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8937 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8938 (void) strncpy(stp->sd_revision.value.c, 8939 un->un_sd->sd_inq->inq_revision, 4); 8940 8941 /* 8942 * All the errstats are persistent across detach/attach, 8943 * so reset all the errstats here in case of the hot 8944 * replacement of disk drives, except for not changed 8945 * Sun qualified drives. 8946 */ 8947 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8948 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8949 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8950 stp->sd_softerrs.value.ui32 = 0; 8951 stp->sd_harderrs.value.ui32 = 0; 8952 stp->sd_transerrs.value.ui32 = 0; 8953 stp->sd_rq_media_err.value.ui32 = 0; 8954 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8955 stp->sd_rq_nodev_err.value.ui32 = 0; 8956 stp->sd_rq_recov_err.value.ui32 = 0; 8957 stp->sd_rq_illrq_err.value.ui32 = 0; 8958 stp->sd_rq_pfa_err.value.ui32 = 0; 8959 } 8960 8961 /* 8962 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8963 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8964 * (4376302)) 8965 */ 8966 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8967 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8968 sizeof (SD_INQUIRY(un)->inq_serial)); 8969 } 8970 8971 if (un->un_f_blockcount_is_valid != TRUE) { 8972 /* 8973 * Set capacity error stat to 0 for no media. This ensures 8974 * a valid capacity is displayed in response to 'iostat -E' 8975 * when no media is present in the device. 8976 */ 8977 stp->sd_capacity.value.ui64 = 0; 8978 } else { 8979 /* 8980 * Multiply un_blockcount by un->un_sys_blocksize to get 8981 * capacity. 8982 * 8983 * Note: for non-512 blocksize devices "un_blockcount" has been 8984 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8985 * (un_tgt_blocksize / un->un_sys_blocksize). 8986 */ 8987 stp->sd_capacity.value.ui64 = (uint64_t) 8988 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8989 } 8990 } 8991 8992 8993 /* 8994 * Function: sd_set_pstats 8995 * 8996 * Description: This routine instantiates and initializes the partition 8997 * stats for each partition with more than zero blocks. 8998 * (4363169) 8999 * 9000 * Arguments: un - driver soft state (unit) structure 9001 * 9002 * Context: Kernel thread context 9003 */ 9004 9005 static void 9006 sd_set_pstats(struct sd_lun *un) 9007 { 9008 char kstatname[KSTAT_STRLEN]; 9009 int instance; 9010 int i; 9011 diskaddr_t nblks = 0; 9012 char *partname = NULL; 9013 9014 ASSERT(un != NULL); 9015 9016 instance = ddi_get_instance(SD_DEVINFO(un)); 9017 9018 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9019 for (i = 0; i < NSDMAP; i++) { 9020 9021 if (cmlb_partinfo(un->un_cmlbhandle, i, 9022 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9023 continue; 9024 mutex_enter(SD_MUTEX(un)); 9025 9026 if ((un->un_pstats[i] == NULL) && 9027 (nblks != 0)) { 9028 9029 (void) snprintf(kstatname, sizeof (kstatname), 9030 "%s%d,%s", sd_label, instance, 9031 partname); 9032 9033 un->un_pstats[i] = kstat_create(sd_label, 9034 instance, kstatname, "partition", KSTAT_TYPE_IO, 9035 1, KSTAT_FLAG_PERSISTENT); 9036 if (un->un_pstats[i] != NULL) { 9037 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9038 kstat_install(un->un_pstats[i]); 9039 } 9040 } 9041 mutex_exit(SD_MUTEX(un)); 9042 } 9043 } 9044 9045 9046 #if (defined(__fibre)) 9047 /* 9048 * Function: sd_init_event_callbacks 9049 * 9050 * Description: This routine initializes the insertion and removal event 9051 * callbacks. (fibre only) 9052 * 9053 * Arguments: un - driver soft state (unit) structure 9054 * 9055 * Context: Kernel thread context 9056 */ 9057 9058 static void 9059 sd_init_event_callbacks(struct sd_lun *un) 9060 { 9061 ASSERT(un != NULL); 9062 9063 if ((un->un_insert_event == NULL) && 9064 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9065 &un->un_insert_event) == DDI_SUCCESS)) { 9066 /* 9067 * Add the callback for an insertion event 9068 */ 9069 (void) ddi_add_event_handler(SD_DEVINFO(un), 9070 un->un_insert_event, sd_event_callback, (void *)un, 9071 &(un->un_insert_cb_id)); 9072 } 9073 9074 if ((un->un_remove_event == NULL) && 9075 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9076 &un->un_remove_event) == DDI_SUCCESS)) { 9077 /* 9078 * Add the callback for a removal event 9079 */ 9080 (void) ddi_add_event_handler(SD_DEVINFO(un), 9081 un->un_remove_event, sd_event_callback, (void *)un, 9082 &(un->un_remove_cb_id)); 9083 } 9084 } 9085 9086 9087 /* 9088 * Function: sd_event_callback 9089 * 9090 * Description: This routine handles insert/remove events (photon). The 9091 * state is changed to OFFLINE which can be used to supress 9092 * error msgs. (fibre only) 9093 * 9094 * Arguments: un - driver soft state (unit) structure 9095 * 9096 * Context: Callout thread context 9097 */ 9098 /* ARGSUSED */ 9099 static void 9100 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9101 void *bus_impldata) 9102 { 9103 struct sd_lun *un = (struct sd_lun *)arg; 9104 9105 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9106 if (event == un->un_insert_event) { 9107 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9108 mutex_enter(SD_MUTEX(un)); 9109 if (un->un_state == SD_STATE_OFFLINE) { 9110 if (un->un_last_state != SD_STATE_SUSPENDED) { 9111 un->un_state = un->un_last_state; 9112 } else { 9113 /* 9114 * We have gone through SUSPEND/RESUME while 9115 * we were offline. Restore the last state 9116 */ 9117 un->un_state = un->un_save_state; 9118 } 9119 } 9120 mutex_exit(SD_MUTEX(un)); 9121 9122 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9123 } else if (event == un->un_remove_event) { 9124 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9125 mutex_enter(SD_MUTEX(un)); 9126 /* 9127 * We need to handle an event callback that occurs during 9128 * the suspend operation, since we don't prevent it. 9129 */ 9130 if (un->un_state != SD_STATE_OFFLINE) { 9131 if (un->un_state != SD_STATE_SUSPENDED) { 9132 New_state(un, SD_STATE_OFFLINE); 9133 } else { 9134 un->un_last_state = SD_STATE_OFFLINE; 9135 } 9136 } 9137 mutex_exit(SD_MUTEX(un)); 9138 } else { 9139 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9140 "!Unknown event\n"); 9141 } 9142 9143 } 9144 #endif 9145 9146 /* 9147 * Function: sd_cache_control() 9148 * 9149 * Description: This routine is the driver entry point for setting 9150 * read and write caching by modifying the WCE (write cache 9151 * enable) and RCD (read cache disable) bits of mode 9152 * page 8 (MODEPAGE_CACHING). 9153 * 9154 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9155 * structure for this target. 9156 * rcd_flag - flag for controlling the read cache 9157 * wce_flag - flag for controlling the write cache 9158 * 9159 * Return Code: EIO 9160 * code returned by sd_send_scsi_MODE_SENSE and 9161 * sd_send_scsi_MODE_SELECT 9162 * 9163 * Context: Kernel Thread 9164 */ 9165 9166 static int 9167 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9168 { 9169 struct mode_caching *mode_caching_page; 9170 uchar_t *header; 9171 size_t buflen; 9172 int hdrlen; 9173 int bd_len; 9174 int rval = 0; 9175 struct mode_header_grp2 *mhp; 9176 struct sd_lun *un; 9177 int status; 9178 9179 ASSERT(ssc != NULL); 9180 un = ssc->ssc_un; 9181 ASSERT(un != NULL); 9182 9183 /* 9184 * Do a test unit ready, otherwise a mode sense may not work if this 9185 * is the first command sent to the device after boot. 9186 */ 9187 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9188 if (status != 0) 9189 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9190 9191 if (un->un_f_cfg_is_atapi == TRUE) { 9192 hdrlen = MODE_HEADER_LENGTH_GRP2; 9193 } else { 9194 hdrlen = MODE_HEADER_LENGTH; 9195 } 9196 9197 /* 9198 * Allocate memory for the retrieved mode page and its headers. Set 9199 * a pointer to the page itself. Use mode_cache_scsi3 to insure 9200 * we get all of the mode sense data otherwise, the mode select 9201 * will fail. mode_cache_scsi3 is a superset of mode_caching. 9202 */ 9203 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 9204 sizeof (struct mode_cache_scsi3); 9205 9206 header = kmem_zalloc(buflen, KM_SLEEP); 9207 9208 /* Get the information from the device. */ 9209 if (un->un_f_cfg_is_atapi == TRUE) { 9210 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9211 MODEPAGE_CACHING, SD_PATH_DIRECT); 9212 } else { 9213 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9214 MODEPAGE_CACHING, SD_PATH_DIRECT); 9215 } 9216 9217 if (rval != 0) { 9218 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9219 "sd_cache_control: Mode Sense Failed\n"); 9220 goto mode_sense_failed; 9221 } 9222 9223 /* 9224 * Determine size of Block Descriptors in order to locate 9225 * the mode page data. ATAPI devices return 0, SCSI devices 9226 * should return MODE_BLK_DESC_LENGTH. 9227 */ 9228 if (un->un_f_cfg_is_atapi == TRUE) { 9229 mhp = (struct mode_header_grp2 *)header; 9230 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9231 } else { 9232 bd_len = ((struct mode_header *)header)->bdesc_length; 9233 } 9234 9235 if (bd_len > MODE_BLK_DESC_LENGTH) { 9236 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9237 "sd_cache_control: Mode Sense returned invalid block " 9238 "descriptor length\n"); 9239 rval = EIO; 9240 goto mode_sense_failed; 9241 } 9242 9243 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9244 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9245 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9246 "sd_cache_control: Mode Sense caching page code mismatch " 9247 "%d\n", mode_caching_page->mode_page.code); 9248 rval = EIO; 9249 goto mode_sense_failed; 9250 } 9251 9252 /* Check the relevant bits on successful mode sense. */ 9253 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9254 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9255 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9256 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9257 9258 size_t sbuflen; 9259 uchar_t save_pg; 9260 9261 /* 9262 * Construct select buffer length based on the 9263 * length of the sense data returned. 9264 */ 9265 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 9266 sizeof (struct mode_page) + 9267 (int)mode_caching_page->mode_page.length; 9268 9269 /* 9270 * Set the caching bits as requested. 9271 */ 9272 if (rcd_flag == SD_CACHE_ENABLE) 9273 mode_caching_page->rcd = 0; 9274 else if (rcd_flag == SD_CACHE_DISABLE) 9275 mode_caching_page->rcd = 1; 9276 9277 if (wce_flag == SD_CACHE_ENABLE) 9278 mode_caching_page->wce = 1; 9279 else if (wce_flag == SD_CACHE_DISABLE) 9280 mode_caching_page->wce = 0; 9281 9282 /* 9283 * Save the page if the mode sense says the 9284 * drive supports it. 9285 */ 9286 save_pg = mode_caching_page->mode_page.ps ? 9287 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9288 9289 /* Clear reserved bits before mode select. */ 9290 mode_caching_page->mode_page.ps = 0; 9291 9292 /* 9293 * Clear out mode header for mode select. 9294 * The rest of the retrieved page will be reused. 9295 */ 9296 bzero(header, hdrlen); 9297 9298 if (un->un_f_cfg_is_atapi == TRUE) { 9299 mhp = (struct mode_header_grp2 *)header; 9300 mhp->bdesc_length_hi = bd_len >> 8; 9301 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9302 } else { 9303 ((struct mode_header *)header)->bdesc_length = bd_len; 9304 } 9305 9306 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9307 9308 /* Issue mode select to change the cache settings */ 9309 if (un->un_f_cfg_is_atapi == TRUE) { 9310 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9311 sbuflen, save_pg, SD_PATH_DIRECT); 9312 } else { 9313 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9314 sbuflen, save_pg, SD_PATH_DIRECT); 9315 } 9316 9317 } 9318 9319 9320 mode_sense_failed: 9321 9322 kmem_free(header, buflen); 9323 9324 if (rval != 0) { 9325 if (rval == EIO) 9326 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9327 else 9328 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9329 } 9330 return (rval); 9331 } 9332 9333 9334 /* 9335 * Function: sd_get_write_cache_enabled() 9336 * 9337 * Description: This routine is the driver entry point for determining if 9338 * write caching is enabled. It examines the WCE (write cache 9339 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9340 * 9341 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9342 * structure for this target. 9343 * is_enabled - pointer to int where write cache enabled state 9344 * is returned (non-zero -> write cache enabled) 9345 * 9346 * 9347 * Return Code: EIO 9348 * code returned by sd_send_scsi_MODE_SENSE 9349 * 9350 * Context: Kernel Thread 9351 * 9352 * NOTE: If ioctl is added to disable write cache, this sequence should 9353 * be followed so that no locking is required for accesses to 9354 * un->un_f_write_cache_enabled: 9355 * do mode select to clear wce 9356 * do synchronize cache to flush cache 9357 * set un->un_f_write_cache_enabled = FALSE 9358 * 9359 * Conversely, an ioctl to enable the write cache should be done 9360 * in this order: 9361 * set un->un_f_write_cache_enabled = TRUE 9362 * do mode select to set wce 9363 */ 9364 9365 static int 9366 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9367 { 9368 struct mode_caching *mode_caching_page; 9369 uchar_t *header; 9370 size_t buflen; 9371 int hdrlen; 9372 int bd_len; 9373 int rval = 0; 9374 struct sd_lun *un; 9375 int status; 9376 9377 ASSERT(ssc != NULL); 9378 un = ssc->ssc_un; 9379 ASSERT(un != NULL); 9380 ASSERT(is_enabled != NULL); 9381 9382 /* in case of error, flag as enabled */ 9383 *is_enabled = TRUE; 9384 9385 /* 9386 * Do a test unit ready, otherwise a mode sense may not work if this 9387 * is the first command sent to the device after boot. 9388 */ 9389 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9390 9391 if (status != 0) 9392 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9393 9394 if (un->un_f_cfg_is_atapi == TRUE) { 9395 hdrlen = MODE_HEADER_LENGTH_GRP2; 9396 } else { 9397 hdrlen = MODE_HEADER_LENGTH; 9398 } 9399 9400 /* 9401 * Allocate memory for the retrieved mode page and its headers. Set 9402 * a pointer to the page itself. 9403 */ 9404 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9405 header = kmem_zalloc(buflen, KM_SLEEP); 9406 9407 /* Get the information from the device. */ 9408 if (un->un_f_cfg_is_atapi == TRUE) { 9409 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9410 MODEPAGE_CACHING, SD_PATH_DIRECT); 9411 } else { 9412 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9413 MODEPAGE_CACHING, SD_PATH_DIRECT); 9414 } 9415 9416 if (rval != 0) { 9417 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9418 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9419 goto mode_sense_failed; 9420 } 9421 9422 /* 9423 * Determine size of Block Descriptors in order to locate 9424 * the mode page data. ATAPI devices return 0, SCSI devices 9425 * should return MODE_BLK_DESC_LENGTH. 9426 */ 9427 if (un->un_f_cfg_is_atapi == TRUE) { 9428 struct mode_header_grp2 *mhp; 9429 mhp = (struct mode_header_grp2 *)header; 9430 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9431 } else { 9432 bd_len = ((struct mode_header *)header)->bdesc_length; 9433 } 9434 9435 if (bd_len > MODE_BLK_DESC_LENGTH) { 9436 /* FMA should make upset complain here */ 9437 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9438 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9439 "block descriptor length\n"); 9440 rval = EIO; 9441 goto mode_sense_failed; 9442 } 9443 9444 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9445 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9446 /* FMA could make upset complain here */ 9447 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9448 "sd_get_write_cache_enabled: Mode Sense caching page " 9449 "code mismatch %d\n", mode_caching_page->mode_page.code); 9450 rval = EIO; 9451 goto mode_sense_failed; 9452 } 9453 *is_enabled = mode_caching_page->wce; 9454 9455 mode_sense_failed: 9456 if (rval == 0) { 9457 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9458 } else if (rval == EIO) { 9459 /* 9460 * Some disks do not support mode sense(6), we 9461 * should ignore this kind of error(sense key is 9462 * 0x5 - illegal request). 9463 */ 9464 uint8_t *sensep; 9465 int senlen; 9466 9467 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9468 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9469 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9470 9471 if (senlen > 0 && 9472 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9473 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9474 } else { 9475 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9476 } 9477 } else { 9478 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9479 } 9480 kmem_free(header, buflen); 9481 return (rval); 9482 } 9483 9484 /* 9485 * Function: sd_get_nv_sup() 9486 * 9487 * Description: This routine is the driver entry point for 9488 * determining whether non-volatile cache is supported. This 9489 * determination process works as follows: 9490 * 9491 * 1. sd first queries sd.conf on whether 9492 * suppress_cache_flush bit is set for this device. 9493 * 9494 * 2. if not there, then queries the internal disk table. 9495 * 9496 * 3. if either sd.conf or internal disk table specifies 9497 * cache flush be suppressed, we don't bother checking 9498 * NV_SUP bit. 9499 * 9500 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9501 * the optional INQUIRY VPD page 0x86. If the device 9502 * supports VPD page 0x86, sd examines the NV_SUP 9503 * (non-volatile cache support) bit in the INQUIRY VPD page 9504 * 0x86: 9505 * o If NV_SUP bit is set, sd assumes the device has a 9506 * non-volatile cache and set the 9507 * un_f_sync_nv_supported to TRUE. 9508 * o Otherwise cache is not non-volatile, 9509 * un_f_sync_nv_supported is set to FALSE. 9510 * 9511 * Arguments: un - driver soft state (unit) structure 9512 * 9513 * Return Code: 9514 * 9515 * Context: Kernel Thread 9516 */ 9517 9518 static void 9519 sd_get_nv_sup(sd_ssc_t *ssc) 9520 { 9521 int rval = 0; 9522 uchar_t *inq86 = NULL; 9523 size_t inq86_len = MAX_INQUIRY_SIZE; 9524 size_t inq86_resid = 0; 9525 struct dk_callback *dkc; 9526 struct sd_lun *un; 9527 9528 ASSERT(ssc != NULL); 9529 un = ssc->ssc_un; 9530 ASSERT(un != NULL); 9531 9532 mutex_enter(SD_MUTEX(un)); 9533 9534 /* 9535 * Be conservative on the device's support of 9536 * SYNC_NV bit: un_f_sync_nv_supported is 9537 * initialized to be false. 9538 */ 9539 un->un_f_sync_nv_supported = FALSE; 9540 9541 /* 9542 * If either sd.conf or internal disk table 9543 * specifies cache flush be suppressed, then 9544 * we don't bother checking NV_SUP bit. 9545 */ 9546 if (un->un_f_suppress_cache_flush == TRUE) { 9547 mutex_exit(SD_MUTEX(un)); 9548 return; 9549 } 9550 9551 if (sd_check_vpd_page_support(ssc) == 0 && 9552 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9553 mutex_exit(SD_MUTEX(un)); 9554 /* collect page 86 data if available */ 9555 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9556 9557 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9558 0x01, 0x86, &inq86_resid); 9559 9560 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9561 SD_TRACE(SD_LOG_COMMON, un, 9562 "sd_get_nv_sup: \ 9563 successfully get VPD page: %x \ 9564 PAGE LENGTH: %x BYTE 6: %x\n", 9565 inq86[1], inq86[3], inq86[6]); 9566 9567 mutex_enter(SD_MUTEX(un)); 9568 /* 9569 * check the value of NV_SUP bit: only if the device 9570 * reports NV_SUP bit to be 1, the 9571 * un_f_sync_nv_supported bit will be set to true. 9572 */ 9573 if (inq86[6] & SD_VPD_NV_SUP) { 9574 un->un_f_sync_nv_supported = TRUE; 9575 } 9576 mutex_exit(SD_MUTEX(un)); 9577 } else if (rval != 0) { 9578 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9579 } 9580 9581 kmem_free(inq86, inq86_len); 9582 } else { 9583 mutex_exit(SD_MUTEX(un)); 9584 } 9585 9586 /* 9587 * Send a SYNC CACHE command to check whether 9588 * SYNC_NV bit is supported. This command should have 9589 * un_f_sync_nv_supported set to correct value. 9590 */ 9591 mutex_enter(SD_MUTEX(un)); 9592 if (un->un_f_sync_nv_supported) { 9593 mutex_exit(SD_MUTEX(un)); 9594 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9595 dkc->dkc_flag = FLUSH_VOLATILE; 9596 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9597 9598 /* 9599 * Send a TEST UNIT READY command to the device. This should 9600 * clear any outstanding UNIT ATTENTION that may be present. 9601 */ 9602 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9603 if (rval != 0) 9604 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9605 9606 kmem_free(dkc, sizeof (struct dk_callback)); 9607 } else { 9608 mutex_exit(SD_MUTEX(un)); 9609 } 9610 9611 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9612 un_f_suppress_cache_flush is set to %d\n", 9613 un->un_f_suppress_cache_flush); 9614 } 9615 9616 /* 9617 * Function: sd_make_device 9618 * 9619 * Description: Utility routine to return the Solaris device number from 9620 * the data in the device's dev_info structure. 9621 * 9622 * Return Code: The Solaris device number 9623 * 9624 * Context: Any 9625 */ 9626 9627 static dev_t 9628 sd_make_device(dev_info_t *devi) 9629 { 9630 return (makedevice(ddi_driver_major(devi), 9631 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9632 } 9633 9634 9635 /* 9636 * Function: sd_pm_entry 9637 * 9638 * Description: Called at the start of a new command to manage power 9639 * and busy status of a device. This includes determining whether 9640 * the current power state of the device is sufficient for 9641 * performing the command or whether it must be changed. 9642 * The PM framework is notified appropriately. 9643 * Only with a return status of DDI_SUCCESS will the 9644 * component be busy to the framework. 9645 * 9646 * All callers of sd_pm_entry must check the return status 9647 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9648 * of DDI_FAILURE indicates the device failed to power up. 9649 * In this case un_pm_count has been adjusted so the result 9650 * on exit is still powered down, ie. count is less than 0. 9651 * Calling sd_pm_exit with this count value hits an ASSERT. 9652 * 9653 * Return Code: DDI_SUCCESS or DDI_FAILURE 9654 * 9655 * Context: Kernel thread context. 9656 */ 9657 9658 static int 9659 sd_pm_entry(struct sd_lun *un) 9660 { 9661 int return_status = DDI_SUCCESS; 9662 9663 ASSERT(!mutex_owned(SD_MUTEX(un))); 9664 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9665 9666 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9667 9668 if (un->un_f_pm_is_enabled == FALSE) { 9669 SD_TRACE(SD_LOG_IO_PM, un, 9670 "sd_pm_entry: exiting, PM not enabled\n"); 9671 return (return_status); 9672 } 9673 9674 /* 9675 * Just increment a counter if PM is enabled. On the transition from 9676 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9677 * the count with each IO and mark the device as idle when the count 9678 * hits 0. 9679 * 9680 * If the count is less than 0 the device is powered down. If a powered 9681 * down device is successfully powered up then the count must be 9682 * incremented to reflect the power up. Note that it'll get incremented 9683 * a second time to become busy. 9684 * 9685 * Because the following has the potential to change the device state 9686 * and must release the un_pm_mutex to do so, only one thread can be 9687 * allowed through at a time. 9688 */ 9689 9690 mutex_enter(&un->un_pm_mutex); 9691 while (un->un_pm_busy == TRUE) { 9692 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9693 } 9694 un->un_pm_busy = TRUE; 9695 9696 if (un->un_pm_count < 1) { 9697 9698 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9699 9700 /* 9701 * Indicate we are now busy so the framework won't attempt to 9702 * power down the device. This call will only fail if either 9703 * we passed a bad component number or the device has no 9704 * components. Neither of these should ever happen. 9705 */ 9706 mutex_exit(&un->un_pm_mutex); 9707 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9708 ASSERT(return_status == DDI_SUCCESS); 9709 9710 mutex_enter(&un->un_pm_mutex); 9711 9712 if (un->un_pm_count < 0) { 9713 mutex_exit(&un->un_pm_mutex); 9714 9715 SD_TRACE(SD_LOG_IO_PM, un, 9716 "sd_pm_entry: power up component\n"); 9717 9718 /* 9719 * pm_raise_power will cause sdpower to be called 9720 * which brings the device power level to the 9721 * desired state, ON in this case. If successful, 9722 * un_pm_count and un_power_level will be updated 9723 * appropriately. 9724 */ 9725 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9726 SD_SPINDLE_ON); 9727 9728 mutex_enter(&un->un_pm_mutex); 9729 9730 if (return_status != DDI_SUCCESS) { 9731 /* 9732 * Power up failed. 9733 * Idle the device and adjust the count 9734 * so the result on exit is that we're 9735 * still powered down, ie. count is less than 0. 9736 */ 9737 SD_TRACE(SD_LOG_IO_PM, un, 9738 "sd_pm_entry: power up failed," 9739 " idle the component\n"); 9740 9741 (void) pm_idle_component(SD_DEVINFO(un), 0); 9742 un->un_pm_count--; 9743 } else { 9744 /* 9745 * Device is powered up, verify the 9746 * count is non-negative. 9747 * This is debug only. 9748 */ 9749 ASSERT(un->un_pm_count == 0); 9750 } 9751 } 9752 9753 if (return_status == DDI_SUCCESS) { 9754 /* 9755 * For performance, now that the device has been tagged 9756 * as busy, and it's known to be powered up, update the 9757 * chain types to use jump tables that do not include 9758 * pm. This significantly lowers the overhead and 9759 * therefore improves performance. 9760 */ 9761 9762 mutex_exit(&un->un_pm_mutex); 9763 mutex_enter(SD_MUTEX(un)); 9764 SD_TRACE(SD_LOG_IO_PM, un, 9765 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9766 un->un_uscsi_chain_type); 9767 9768 if (un->un_f_non_devbsize_supported) { 9769 un->un_buf_chain_type = 9770 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9771 } else { 9772 un->un_buf_chain_type = 9773 SD_CHAIN_INFO_DISK_NO_PM; 9774 } 9775 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9776 9777 SD_TRACE(SD_LOG_IO_PM, un, 9778 " changed uscsi_chain_type to %d\n", 9779 un->un_uscsi_chain_type); 9780 mutex_exit(SD_MUTEX(un)); 9781 mutex_enter(&un->un_pm_mutex); 9782 9783 if (un->un_pm_idle_timeid == NULL) { 9784 /* 300 ms. */ 9785 un->un_pm_idle_timeid = 9786 timeout(sd_pm_idletimeout_handler, un, 9787 (drv_usectohz((clock_t)300000))); 9788 /* 9789 * Include an extra call to busy which keeps the 9790 * device busy with-respect-to the PM layer 9791 * until the timer fires, at which time it'll 9792 * get the extra idle call. 9793 */ 9794 (void) pm_busy_component(SD_DEVINFO(un), 0); 9795 } 9796 } 9797 } 9798 un->un_pm_busy = FALSE; 9799 /* Next... */ 9800 cv_signal(&un->un_pm_busy_cv); 9801 9802 un->un_pm_count++; 9803 9804 SD_TRACE(SD_LOG_IO_PM, un, 9805 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9806 9807 mutex_exit(&un->un_pm_mutex); 9808 9809 return (return_status); 9810 } 9811 9812 9813 /* 9814 * Function: sd_pm_exit 9815 * 9816 * Description: Called at the completion of a command to manage busy 9817 * status for the device. If the device becomes idle the 9818 * PM framework is notified. 9819 * 9820 * Context: Kernel thread context 9821 */ 9822 9823 static void 9824 sd_pm_exit(struct sd_lun *un) 9825 { 9826 ASSERT(!mutex_owned(SD_MUTEX(un))); 9827 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9828 9829 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9830 9831 /* 9832 * After attach the following flag is only read, so don't 9833 * take the penalty of acquiring a mutex for it. 9834 */ 9835 if (un->un_f_pm_is_enabled == TRUE) { 9836 9837 mutex_enter(&un->un_pm_mutex); 9838 un->un_pm_count--; 9839 9840 SD_TRACE(SD_LOG_IO_PM, un, 9841 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9842 9843 ASSERT(un->un_pm_count >= 0); 9844 if (un->un_pm_count == 0) { 9845 mutex_exit(&un->un_pm_mutex); 9846 9847 SD_TRACE(SD_LOG_IO_PM, un, 9848 "sd_pm_exit: idle component\n"); 9849 9850 (void) pm_idle_component(SD_DEVINFO(un), 0); 9851 9852 } else { 9853 mutex_exit(&un->un_pm_mutex); 9854 } 9855 } 9856 9857 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9858 } 9859 9860 9861 /* 9862 * Function: sdopen 9863 * 9864 * Description: Driver's open(9e) entry point function. 9865 * 9866 * Arguments: dev_i - pointer to device number 9867 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9868 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9869 * cred_p - user credential pointer 9870 * 9871 * Return Code: EINVAL 9872 * ENXIO 9873 * EIO 9874 * EROFS 9875 * EBUSY 9876 * 9877 * Context: Kernel thread context 9878 */ 9879 /* ARGSUSED */ 9880 static int 9881 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9882 { 9883 struct sd_lun *un; 9884 int nodelay; 9885 int part; 9886 uint64_t partmask; 9887 int instance; 9888 dev_t dev; 9889 int rval = EIO; 9890 diskaddr_t nblks = 0; 9891 diskaddr_t label_cap; 9892 9893 /* Validate the open type */ 9894 if (otyp >= OTYPCNT) { 9895 return (EINVAL); 9896 } 9897 9898 dev = *dev_p; 9899 instance = SDUNIT(dev); 9900 mutex_enter(&sd_detach_mutex); 9901 9902 /* 9903 * Fail the open if there is no softstate for the instance, or 9904 * if another thread somewhere is trying to detach the instance. 9905 */ 9906 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9907 (un->un_detach_count != 0)) { 9908 mutex_exit(&sd_detach_mutex); 9909 /* 9910 * The probe cache only needs to be cleared when open (9e) fails 9911 * with ENXIO (4238046). 9912 */ 9913 /* 9914 * un-conditionally clearing probe cache is ok with 9915 * separate sd/ssd binaries 9916 * x86 platform can be an issue with both parallel 9917 * and fibre in 1 binary 9918 */ 9919 sd_scsi_clear_probe_cache(); 9920 return (ENXIO); 9921 } 9922 9923 /* 9924 * The un_layer_count is to prevent another thread in specfs from 9925 * trying to detach the instance, which can happen when we are 9926 * called from a higher-layer driver instead of thru specfs. 9927 * This will not be needed when DDI provides a layered driver 9928 * interface that allows specfs to know that an instance is in 9929 * use by a layered driver & should not be detached. 9930 * 9931 * Note: the semantics for layered driver opens are exactly one 9932 * close for every open. 9933 */ 9934 if (otyp == OTYP_LYR) { 9935 un->un_layer_count++; 9936 } 9937 9938 /* 9939 * Keep a count of the current # of opens in progress. This is because 9940 * some layered drivers try to call us as a regular open. This can 9941 * cause problems that we cannot prevent, however by keeping this count 9942 * we can at least keep our open and detach routines from racing against 9943 * each other under such conditions. 9944 */ 9945 un->un_opens_in_progress++; 9946 mutex_exit(&sd_detach_mutex); 9947 9948 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9949 part = SDPART(dev); 9950 partmask = 1 << part; 9951 9952 /* 9953 * We use a semaphore here in order to serialize 9954 * open and close requests on the device. 9955 */ 9956 sema_p(&un->un_semoclose); 9957 9958 mutex_enter(SD_MUTEX(un)); 9959 9960 /* 9961 * All device accesses go thru sdstrategy() where we check 9962 * on suspend status but there could be a scsi_poll command, 9963 * which bypasses sdstrategy(), so we need to check pm 9964 * status. 9965 */ 9966 9967 if (!nodelay) { 9968 while ((un->un_state == SD_STATE_SUSPENDED) || 9969 (un->un_state == SD_STATE_PM_CHANGING)) { 9970 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9971 } 9972 9973 mutex_exit(SD_MUTEX(un)); 9974 if (sd_pm_entry(un) != DDI_SUCCESS) { 9975 rval = EIO; 9976 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9977 "sdopen: sd_pm_entry failed\n"); 9978 goto open_failed_with_pm; 9979 } 9980 mutex_enter(SD_MUTEX(un)); 9981 } 9982 9983 /* check for previous exclusive open */ 9984 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9985 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9986 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9987 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9988 9989 if (un->un_exclopen & (partmask)) { 9990 goto excl_open_fail; 9991 } 9992 9993 if (flag & FEXCL) { 9994 int i; 9995 if (un->un_ocmap.lyropen[part]) { 9996 goto excl_open_fail; 9997 } 9998 for (i = 0; i < (OTYPCNT - 1); i++) { 9999 if (un->un_ocmap.regopen[i] & (partmask)) { 10000 goto excl_open_fail; 10001 } 10002 } 10003 } 10004 10005 /* 10006 * Check the write permission if this is a removable media device, 10007 * NDELAY has not been set, and writable permission is requested. 10008 * 10009 * Note: If NDELAY was set and this is write-protected media the WRITE 10010 * attempt will fail with EIO as part of the I/O processing. This is a 10011 * more permissive implementation that allows the open to succeed and 10012 * WRITE attempts to fail when appropriate. 10013 */ 10014 if (un->un_f_chk_wp_open) { 10015 if ((flag & FWRITE) && (!nodelay)) { 10016 mutex_exit(SD_MUTEX(un)); 10017 /* 10018 * Defer the check for write permission on writable 10019 * DVD drive till sdstrategy and will not fail open even 10020 * if FWRITE is set as the device can be writable 10021 * depending upon the media and the media can change 10022 * after the call to open(). 10023 */ 10024 if (un->un_f_dvdram_writable_device == FALSE) { 10025 if (ISCD(un) || sr_check_wp(dev)) { 10026 rval = EROFS; 10027 mutex_enter(SD_MUTEX(un)); 10028 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10029 "write to cd or write protected media\n"); 10030 goto open_fail; 10031 } 10032 } 10033 mutex_enter(SD_MUTEX(un)); 10034 } 10035 } 10036 10037 /* 10038 * If opening in NDELAY/NONBLOCK mode, just return. 10039 * Check if disk is ready and has a valid geometry later. 10040 */ 10041 if (!nodelay) { 10042 sd_ssc_t *ssc; 10043 10044 mutex_exit(SD_MUTEX(un)); 10045 ssc = sd_ssc_init(un); 10046 rval = sd_ready_and_valid(ssc, part); 10047 sd_ssc_fini(ssc); 10048 mutex_enter(SD_MUTEX(un)); 10049 /* 10050 * Fail if device is not ready or if the number of disk 10051 * blocks is zero or negative for non CD devices. 10052 */ 10053 10054 nblks = 0; 10055 10056 if (rval == SD_READY_VALID && (!ISCD(un))) { 10057 /* if cmlb_partinfo fails, nblks remains 0 */ 10058 mutex_exit(SD_MUTEX(un)); 10059 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10060 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10061 mutex_enter(SD_MUTEX(un)); 10062 } 10063 10064 if ((rval != SD_READY_VALID) || 10065 (!ISCD(un) && nblks <= 0)) { 10066 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10067 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10068 "device not ready or invalid disk block value\n"); 10069 goto open_fail; 10070 } 10071 #if defined(__i386) || defined(__amd64) 10072 } else { 10073 uchar_t *cp; 10074 /* 10075 * x86 requires special nodelay handling, so that p0 is 10076 * always defined and accessible. 10077 * Invalidate geometry only if device is not already open. 10078 */ 10079 cp = &un->un_ocmap.chkd[0]; 10080 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10081 if (*cp != (uchar_t)0) { 10082 break; 10083 } 10084 cp++; 10085 } 10086 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10087 mutex_exit(SD_MUTEX(un)); 10088 cmlb_invalidate(un->un_cmlbhandle, 10089 (void *)SD_PATH_DIRECT); 10090 mutex_enter(SD_MUTEX(un)); 10091 } 10092 10093 #endif 10094 } 10095 10096 if (otyp == OTYP_LYR) { 10097 un->un_ocmap.lyropen[part]++; 10098 } else { 10099 un->un_ocmap.regopen[otyp] |= partmask; 10100 } 10101 10102 /* Set up open and exclusive open flags */ 10103 if (flag & FEXCL) { 10104 un->un_exclopen |= (partmask); 10105 } 10106 10107 /* 10108 * If the lun is EFI labeled and lun capacity is greater than the 10109 * capacity contained in the label, log a sys-event to notify the 10110 * interested module. 10111 * To avoid an infinite loop of logging sys-event, we only log the 10112 * event when the lun is not opened in NDELAY mode. The event handler 10113 * should open the lun in NDELAY mode. 10114 */ 10115 if (!(flag & FNDELAY)) { 10116 mutex_exit(SD_MUTEX(un)); 10117 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10118 (void*)SD_PATH_DIRECT) == 0) { 10119 mutex_enter(SD_MUTEX(un)); 10120 if (un->un_f_blockcount_is_valid && 10121 un->un_blockcount > label_cap) { 10122 mutex_exit(SD_MUTEX(un)); 10123 sd_log_lun_expansion_event(un, 10124 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10125 mutex_enter(SD_MUTEX(un)); 10126 } 10127 } else { 10128 mutex_enter(SD_MUTEX(un)); 10129 } 10130 } 10131 10132 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10133 "open of part %d type %d\n", part, otyp); 10134 10135 mutex_exit(SD_MUTEX(un)); 10136 if (!nodelay) { 10137 sd_pm_exit(un); 10138 } 10139 10140 sema_v(&un->un_semoclose); 10141 10142 mutex_enter(&sd_detach_mutex); 10143 un->un_opens_in_progress--; 10144 mutex_exit(&sd_detach_mutex); 10145 10146 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10147 return (DDI_SUCCESS); 10148 10149 excl_open_fail: 10150 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10151 rval = EBUSY; 10152 10153 open_fail: 10154 mutex_exit(SD_MUTEX(un)); 10155 10156 /* 10157 * On a failed open we must exit the pm management. 10158 */ 10159 if (!nodelay) { 10160 sd_pm_exit(un); 10161 } 10162 open_failed_with_pm: 10163 sema_v(&un->un_semoclose); 10164 10165 mutex_enter(&sd_detach_mutex); 10166 un->un_opens_in_progress--; 10167 if (otyp == OTYP_LYR) { 10168 un->un_layer_count--; 10169 } 10170 mutex_exit(&sd_detach_mutex); 10171 10172 return (rval); 10173 } 10174 10175 10176 /* 10177 * Function: sdclose 10178 * 10179 * Description: Driver's close(9e) entry point function. 10180 * 10181 * Arguments: dev - device number 10182 * flag - file status flag, informational only 10183 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10184 * cred_p - user credential pointer 10185 * 10186 * Return Code: ENXIO 10187 * 10188 * Context: Kernel thread context 10189 */ 10190 /* ARGSUSED */ 10191 static int 10192 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10193 { 10194 struct sd_lun *un; 10195 uchar_t *cp; 10196 int part; 10197 int nodelay; 10198 int rval = 0; 10199 10200 /* Validate the open type */ 10201 if (otyp >= OTYPCNT) { 10202 return (ENXIO); 10203 } 10204 10205 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10206 return (ENXIO); 10207 } 10208 10209 part = SDPART(dev); 10210 nodelay = flag & (FNDELAY | FNONBLOCK); 10211 10212 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10213 "sdclose: close of part %d type %d\n", part, otyp); 10214 10215 /* 10216 * We use a semaphore here in order to serialize 10217 * open and close requests on the device. 10218 */ 10219 sema_p(&un->un_semoclose); 10220 10221 mutex_enter(SD_MUTEX(un)); 10222 10223 /* Don't proceed if power is being changed. */ 10224 while (un->un_state == SD_STATE_PM_CHANGING) { 10225 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10226 } 10227 10228 if (un->un_exclopen & (1 << part)) { 10229 un->un_exclopen &= ~(1 << part); 10230 } 10231 10232 /* Update the open partition map */ 10233 if (otyp == OTYP_LYR) { 10234 un->un_ocmap.lyropen[part] -= 1; 10235 } else { 10236 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10237 } 10238 10239 cp = &un->un_ocmap.chkd[0]; 10240 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10241 if (*cp != NULL) { 10242 break; 10243 } 10244 cp++; 10245 } 10246 10247 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10248 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10249 10250 /* 10251 * We avoid persistance upon the last close, and set 10252 * the throttle back to the maximum. 10253 */ 10254 un->un_throttle = un->un_saved_throttle; 10255 10256 if (un->un_state == SD_STATE_OFFLINE) { 10257 if (un->un_f_is_fibre == FALSE) { 10258 scsi_log(SD_DEVINFO(un), sd_label, 10259 CE_WARN, "offline\n"); 10260 } 10261 mutex_exit(SD_MUTEX(un)); 10262 cmlb_invalidate(un->un_cmlbhandle, 10263 (void *)SD_PATH_DIRECT); 10264 mutex_enter(SD_MUTEX(un)); 10265 10266 } else { 10267 /* 10268 * Flush any outstanding writes in NVRAM cache. 10269 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10270 * cmd, it may not work for non-Pluto devices. 10271 * SYNCHRONIZE CACHE is not required for removables, 10272 * except DVD-RAM drives. 10273 * 10274 * Also note: because SYNCHRONIZE CACHE is currently 10275 * the only command issued here that requires the 10276 * drive be powered up, only do the power up before 10277 * sending the Sync Cache command. If additional 10278 * commands are added which require a powered up 10279 * drive, the following sequence may have to change. 10280 * 10281 * And finally, note that parallel SCSI on SPARC 10282 * only issues a Sync Cache to DVD-RAM, a newly 10283 * supported device. 10284 */ 10285 #if defined(__i386) || defined(__amd64) 10286 if ((un->un_f_sync_cache_supported && 10287 un->un_f_sync_cache_required) || 10288 un->un_f_dvdram_writable_device == TRUE) { 10289 #else 10290 if (un->un_f_dvdram_writable_device == TRUE) { 10291 #endif 10292 mutex_exit(SD_MUTEX(un)); 10293 if (sd_pm_entry(un) == DDI_SUCCESS) { 10294 rval = 10295 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10296 NULL); 10297 /* ignore error if not supported */ 10298 if (rval == ENOTSUP) { 10299 rval = 0; 10300 } else if (rval != 0) { 10301 rval = EIO; 10302 } 10303 sd_pm_exit(un); 10304 } else { 10305 rval = EIO; 10306 } 10307 mutex_enter(SD_MUTEX(un)); 10308 } 10309 10310 /* 10311 * For devices which supports DOOR_LOCK, send an ALLOW 10312 * MEDIA REMOVAL command, but don't get upset if it 10313 * fails. We need to raise the power of the drive before 10314 * we can call sd_send_scsi_DOORLOCK() 10315 */ 10316 if (un->un_f_doorlock_supported) { 10317 mutex_exit(SD_MUTEX(un)); 10318 if (sd_pm_entry(un) == DDI_SUCCESS) { 10319 sd_ssc_t *ssc; 10320 10321 ssc = sd_ssc_init(un); 10322 rval = sd_send_scsi_DOORLOCK(ssc, 10323 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10324 if (rval != 0) 10325 sd_ssc_assessment(ssc, 10326 SD_FMT_IGNORE); 10327 sd_ssc_fini(ssc); 10328 10329 sd_pm_exit(un); 10330 if (ISCD(un) && (rval != 0) && 10331 (nodelay != 0)) { 10332 rval = ENXIO; 10333 } 10334 } else { 10335 rval = EIO; 10336 } 10337 mutex_enter(SD_MUTEX(un)); 10338 } 10339 10340 /* 10341 * If a device has removable media, invalidate all 10342 * parameters related to media, such as geometry, 10343 * blocksize, and blockcount. 10344 */ 10345 if (un->un_f_has_removable_media) { 10346 sr_ejected(un); 10347 } 10348 10349 /* 10350 * Destroy the cache (if it exists) which was 10351 * allocated for the write maps since this is 10352 * the last close for this media. 10353 */ 10354 if (un->un_wm_cache) { 10355 /* 10356 * Check if there are pending commands. 10357 * and if there are give a warning and 10358 * do not destroy the cache. 10359 */ 10360 if (un->un_ncmds_in_driver > 0) { 10361 scsi_log(SD_DEVINFO(un), 10362 sd_label, CE_WARN, 10363 "Unable to clean up memory " 10364 "because of pending I/O\n"); 10365 } else { 10366 kmem_cache_destroy( 10367 un->un_wm_cache); 10368 un->un_wm_cache = NULL; 10369 } 10370 } 10371 } 10372 } 10373 10374 mutex_exit(SD_MUTEX(un)); 10375 sema_v(&un->un_semoclose); 10376 10377 if (otyp == OTYP_LYR) { 10378 mutex_enter(&sd_detach_mutex); 10379 /* 10380 * The detach routine may run when the layer count 10381 * drops to zero. 10382 */ 10383 un->un_layer_count--; 10384 mutex_exit(&sd_detach_mutex); 10385 } 10386 10387 return (rval); 10388 } 10389 10390 10391 /* 10392 * Function: sd_ready_and_valid 10393 * 10394 * Description: Test if device is ready and has a valid geometry. 10395 * 10396 * Arguments: ssc - sd_ssc_t will contain un 10397 * un - driver soft state (unit) structure 10398 * 10399 * Return Code: SD_READY_VALID ready and valid label 10400 * SD_NOT_READY_VALID not ready, no label 10401 * SD_RESERVED_BY_OTHERS reservation conflict 10402 * 10403 * Context: Never called at interrupt context. 10404 */ 10405 10406 static int 10407 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10408 { 10409 struct sd_errstats *stp; 10410 uint64_t capacity; 10411 uint_t lbasize; 10412 int rval = SD_READY_VALID; 10413 char name_str[48]; 10414 boolean_t is_valid; 10415 struct sd_lun *un; 10416 int status; 10417 10418 ASSERT(ssc != NULL); 10419 un = ssc->ssc_un; 10420 ASSERT(un != NULL); 10421 ASSERT(!mutex_owned(SD_MUTEX(un))); 10422 10423 mutex_enter(SD_MUTEX(un)); 10424 /* 10425 * If a device has removable media, we must check if media is 10426 * ready when checking if this device is ready and valid. 10427 */ 10428 if (un->un_f_has_removable_media) { 10429 mutex_exit(SD_MUTEX(un)); 10430 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10431 10432 if (status != 0) { 10433 rval = SD_NOT_READY_VALID; 10434 mutex_enter(SD_MUTEX(un)); 10435 10436 /* Ignore all failed status for removalbe media */ 10437 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10438 10439 goto done; 10440 } 10441 10442 is_valid = SD_IS_VALID_LABEL(un); 10443 mutex_enter(SD_MUTEX(un)); 10444 if (!is_valid || 10445 (un->un_f_blockcount_is_valid == FALSE) || 10446 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10447 10448 /* capacity has to be read every open. */ 10449 mutex_exit(SD_MUTEX(un)); 10450 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10451 &lbasize, SD_PATH_DIRECT); 10452 10453 if (status != 0) { 10454 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10455 10456 cmlb_invalidate(un->un_cmlbhandle, 10457 (void *)SD_PATH_DIRECT); 10458 mutex_enter(SD_MUTEX(un)); 10459 rval = SD_NOT_READY_VALID; 10460 10461 goto done; 10462 } else { 10463 mutex_enter(SD_MUTEX(un)); 10464 sd_update_block_info(un, lbasize, capacity); 10465 } 10466 } 10467 10468 /* 10469 * Check if the media in the device is writable or not. 10470 */ 10471 if (!is_valid && ISCD(un)) { 10472 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10473 } 10474 10475 } else { 10476 /* 10477 * Do a test unit ready to clear any unit attention from non-cd 10478 * devices. 10479 */ 10480 mutex_exit(SD_MUTEX(un)); 10481 10482 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10483 if (status != 0) { 10484 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10485 } 10486 10487 mutex_enter(SD_MUTEX(un)); 10488 } 10489 10490 10491 /* 10492 * If this is a non 512 block device, allocate space for 10493 * the wmap cache. This is being done here since every time 10494 * a media is changed this routine will be called and the 10495 * block size is a function of media rather than device. 10496 */ 10497 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10498 un->un_f_non_devbsize_supported) && 10499 un->un_tgt_blocksize != DEV_BSIZE) { 10500 if (!(un->un_wm_cache)) { 10501 (void) snprintf(name_str, sizeof (name_str), 10502 "%s%d_cache", 10503 ddi_driver_name(SD_DEVINFO(un)), 10504 ddi_get_instance(SD_DEVINFO(un))); 10505 un->un_wm_cache = kmem_cache_create( 10506 name_str, sizeof (struct sd_w_map), 10507 8, sd_wm_cache_constructor, 10508 sd_wm_cache_destructor, NULL, 10509 (void *)un, NULL, 0); 10510 if (!(un->un_wm_cache)) { 10511 rval = ENOMEM; 10512 goto done; 10513 } 10514 } 10515 } 10516 10517 if (un->un_state == SD_STATE_NORMAL) { 10518 /* 10519 * If the target is not yet ready here (defined by a TUR 10520 * failure), invalidate the geometry and print an 'offline' 10521 * message. This is a legacy message, as the state of the 10522 * target is not actually changed to SD_STATE_OFFLINE. 10523 * 10524 * If the TUR fails for EACCES (Reservation Conflict), 10525 * SD_RESERVED_BY_OTHERS will be returned to indicate 10526 * reservation conflict. If the TUR fails for other 10527 * reasons, SD_NOT_READY_VALID will be returned. 10528 */ 10529 int err; 10530 10531 mutex_exit(SD_MUTEX(un)); 10532 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10533 mutex_enter(SD_MUTEX(un)); 10534 10535 if (err != 0) { 10536 mutex_exit(SD_MUTEX(un)); 10537 cmlb_invalidate(un->un_cmlbhandle, 10538 (void *)SD_PATH_DIRECT); 10539 mutex_enter(SD_MUTEX(un)); 10540 if (err == EACCES) { 10541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10542 "reservation conflict\n"); 10543 rval = SD_RESERVED_BY_OTHERS; 10544 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10545 } else { 10546 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10547 "drive offline\n"); 10548 rval = SD_NOT_READY_VALID; 10549 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10550 } 10551 goto done; 10552 } 10553 } 10554 10555 if (un->un_f_format_in_progress == FALSE) { 10556 mutex_exit(SD_MUTEX(un)); 10557 10558 (void) cmlb_validate(un->un_cmlbhandle, 0, 10559 (void *)SD_PATH_DIRECT); 10560 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10561 NULL, (void *) SD_PATH_DIRECT) != 0) { 10562 rval = SD_NOT_READY_VALID; 10563 mutex_enter(SD_MUTEX(un)); 10564 10565 goto done; 10566 } 10567 if (un->un_f_pkstats_enabled) { 10568 sd_set_pstats(un); 10569 SD_TRACE(SD_LOG_IO_PARTITION, un, 10570 "sd_ready_and_valid: un:0x%p pstats created and " 10571 "set\n", un); 10572 } 10573 mutex_enter(SD_MUTEX(un)); 10574 } 10575 10576 /* 10577 * If this device supports DOOR_LOCK command, try and send 10578 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10579 * if it fails. For a CD, however, it is an error 10580 */ 10581 if (un->un_f_doorlock_supported) { 10582 mutex_exit(SD_MUTEX(un)); 10583 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10584 SD_PATH_DIRECT); 10585 10586 if ((status != 0) && ISCD(un)) { 10587 rval = SD_NOT_READY_VALID; 10588 mutex_enter(SD_MUTEX(un)); 10589 10590 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10591 10592 goto done; 10593 } else if (status != 0) 10594 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10595 mutex_enter(SD_MUTEX(un)); 10596 } 10597 10598 /* The state has changed, inform the media watch routines */ 10599 un->un_mediastate = DKIO_INSERTED; 10600 cv_broadcast(&un->un_state_cv); 10601 rval = SD_READY_VALID; 10602 10603 done: 10604 10605 /* 10606 * Initialize the capacity kstat value, if no media previously 10607 * (capacity kstat is 0) and a media has been inserted 10608 * (un_blockcount > 0). 10609 */ 10610 if (un->un_errstats != NULL) { 10611 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10612 if ((stp->sd_capacity.value.ui64 == 0) && 10613 (un->un_f_blockcount_is_valid == TRUE)) { 10614 stp->sd_capacity.value.ui64 = 10615 (uint64_t)((uint64_t)un->un_blockcount * 10616 un->un_sys_blocksize); 10617 } 10618 } 10619 10620 mutex_exit(SD_MUTEX(un)); 10621 return (rval); 10622 } 10623 10624 10625 /* 10626 * Function: sdmin 10627 * 10628 * Description: Routine to limit the size of a data transfer. Used in 10629 * conjunction with physio(9F). 10630 * 10631 * Arguments: bp - pointer to the indicated buf(9S) struct. 10632 * 10633 * Context: Kernel thread context. 10634 */ 10635 10636 static void 10637 sdmin(struct buf *bp) 10638 { 10639 struct sd_lun *un; 10640 int instance; 10641 10642 instance = SDUNIT(bp->b_edev); 10643 10644 un = ddi_get_soft_state(sd_state, instance); 10645 ASSERT(un != NULL); 10646 10647 /* 10648 * We depend on DMA partial or buf breakup to restrict 10649 * IO size if any of them enabled. 10650 */ 10651 if (un->un_partial_dma_supported || 10652 un->un_buf_breakup_supported) { 10653 return; 10654 } 10655 10656 if (bp->b_bcount > un->un_max_xfer_size) { 10657 bp->b_bcount = un->un_max_xfer_size; 10658 } 10659 } 10660 10661 10662 /* 10663 * Function: sdread 10664 * 10665 * Description: Driver's read(9e) entry point function. 10666 * 10667 * Arguments: dev - device number 10668 * uio - structure pointer describing where data is to be stored 10669 * in user's space 10670 * cred_p - user credential pointer 10671 * 10672 * Return Code: ENXIO 10673 * EIO 10674 * EINVAL 10675 * value returned by physio 10676 * 10677 * Context: Kernel thread context. 10678 */ 10679 /* ARGSUSED */ 10680 static int 10681 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10682 { 10683 struct sd_lun *un = NULL; 10684 int secmask; 10685 int err = 0; 10686 sd_ssc_t *ssc; 10687 10688 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10689 return (ENXIO); 10690 } 10691 10692 ASSERT(!mutex_owned(SD_MUTEX(un))); 10693 10694 10695 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10696 mutex_enter(SD_MUTEX(un)); 10697 /* 10698 * Because the call to sd_ready_and_valid will issue I/O we 10699 * must wait here if either the device is suspended or 10700 * if it's power level is changing. 10701 */ 10702 while ((un->un_state == SD_STATE_SUSPENDED) || 10703 (un->un_state == SD_STATE_PM_CHANGING)) { 10704 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10705 } 10706 un->un_ncmds_in_driver++; 10707 mutex_exit(SD_MUTEX(un)); 10708 10709 /* Initialize sd_ssc_t for internal uscsi commands */ 10710 ssc = sd_ssc_init(un); 10711 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10712 err = EIO; 10713 } else { 10714 err = 0; 10715 } 10716 sd_ssc_fini(ssc); 10717 10718 mutex_enter(SD_MUTEX(un)); 10719 un->un_ncmds_in_driver--; 10720 ASSERT(un->un_ncmds_in_driver >= 0); 10721 mutex_exit(SD_MUTEX(un)); 10722 if (err != 0) 10723 return (err); 10724 } 10725 10726 /* 10727 * Read requests are restricted to multiples of the system block size. 10728 */ 10729 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10730 secmask = un->un_tgt_blocksize - 1; 10731 else 10732 secmask = DEV_BSIZE - 1; 10733 10734 if (uio->uio_loffset & ((offset_t)(secmask))) { 10735 SD_ERROR(SD_LOG_READ_WRITE, un, 10736 "sdread: file offset not modulo %d\n", 10737 secmask + 1); 10738 err = EINVAL; 10739 } else if (uio->uio_iov->iov_len & (secmask)) { 10740 SD_ERROR(SD_LOG_READ_WRITE, un, 10741 "sdread: transfer length not modulo %d\n", 10742 secmask + 1); 10743 err = EINVAL; 10744 } else { 10745 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10746 } 10747 10748 return (err); 10749 } 10750 10751 10752 /* 10753 * Function: sdwrite 10754 * 10755 * Description: Driver's write(9e) entry point function. 10756 * 10757 * Arguments: dev - device number 10758 * uio - structure pointer describing where data is stored in 10759 * user's space 10760 * cred_p - user credential pointer 10761 * 10762 * Return Code: ENXIO 10763 * EIO 10764 * EINVAL 10765 * value returned by physio 10766 * 10767 * Context: Kernel thread context. 10768 */ 10769 /* ARGSUSED */ 10770 static int 10771 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10772 { 10773 struct sd_lun *un = NULL; 10774 int secmask; 10775 int err = 0; 10776 sd_ssc_t *ssc; 10777 10778 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10779 return (ENXIO); 10780 } 10781 10782 ASSERT(!mutex_owned(SD_MUTEX(un))); 10783 10784 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10785 mutex_enter(SD_MUTEX(un)); 10786 /* 10787 * Because the call to sd_ready_and_valid will issue I/O we 10788 * must wait here if either the device is suspended or 10789 * if it's power level is changing. 10790 */ 10791 while ((un->un_state == SD_STATE_SUSPENDED) || 10792 (un->un_state == SD_STATE_PM_CHANGING)) { 10793 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10794 } 10795 un->un_ncmds_in_driver++; 10796 mutex_exit(SD_MUTEX(un)); 10797 10798 /* Initialize sd_ssc_t for internal uscsi commands */ 10799 ssc = sd_ssc_init(un); 10800 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10801 err = EIO; 10802 } else { 10803 err = 0; 10804 } 10805 sd_ssc_fini(ssc); 10806 10807 mutex_enter(SD_MUTEX(un)); 10808 un->un_ncmds_in_driver--; 10809 ASSERT(un->un_ncmds_in_driver >= 0); 10810 mutex_exit(SD_MUTEX(un)); 10811 if (err != 0) 10812 return (err); 10813 } 10814 10815 /* 10816 * Write requests are restricted to multiples of the system block size. 10817 */ 10818 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10819 secmask = un->un_tgt_blocksize - 1; 10820 else 10821 secmask = DEV_BSIZE - 1; 10822 10823 if (uio->uio_loffset & ((offset_t)(secmask))) { 10824 SD_ERROR(SD_LOG_READ_WRITE, un, 10825 "sdwrite: file offset not modulo %d\n", 10826 secmask + 1); 10827 err = EINVAL; 10828 } else if (uio->uio_iov->iov_len & (secmask)) { 10829 SD_ERROR(SD_LOG_READ_WRITE, un, 10830 "sdwrite: transfer length not modulo %d\n", 10831 secmask + 1); 10832 err = EINVAL; 10833 } else { 10834 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10835 } 10836 10837 return (err); 10838 } 10839 10840 10841 /* 10842 * Function: sdaread 10843 * 10844 * Description: Driver's aread(9e) entry point function. 10845 * 10846 * Arguments: dev - device number 10847 * aio - structure pointer describing where data is to be stored 10848 * cred_p - user credential pointer 10849 * 10850 * Return Code: ENXIO 10851 * EIO 10852 * EINVAL 10853 * value returned by aphysio 10854 * 10855 * Context: Kernel thread context. 10856 */ 10857 /* ARGSUSED */ 10858 static int 10859 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10860 { 10861 struct sd_lun *un = NULL; 10862 struct uio *uio = aio->aio_uio; 10863 int secmask; 10864 int err = 0; 10865 sd_ssc_t *ssc; 10866 10867 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10868 return (ENXIO); 10869 } 10870 10871 ASSERT(!mutex_owned(SD_MUTEX(un))); 10872 10873 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10874 mutex_enter(SD_MUTEX(un)); 10875 /* 10876 * Because the call to sd_ready_and_valid will issue I/O we 10877 * must wait here if either the device is suspended or 10878 * if it's power level is changing. 10879 */ 10880 while ((un->un_state == SD_STATE_SUSPENDED) || 10881 (un->un_state == SD_STATE_PM_CHANGING)) { 10882 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10883 } 10884 un->un_ncmds_in_driver++; 10885 mutex_exit(SD_MUTEX(un)); 10886 10887 /* Initialize sd_ssc_t for internal uscsi commands */ 10888 ssc = sd_ssc_init(un); 10889 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10890 err = EIO; 10891 } else { 10892 err = 0; 10893 } 10894 sd_ssc_fini(ssc); 10895 10896 mutex_enter(SD_MUTEX(un)); 10897 un->un_ncmds_in_driver--; 10898 ASSERT(un->un_ncmds_in_driver >= 0); 10899 mutex_exit(SD_MUTEX(un)); 10900 if (err != 0) 10901 return (err); 10902 } 10903 10904 /* 10905 * Read requests are restricted to multiples of the system block size. 10906 */ 10907 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10908 secmask = un->un_tgt_blocksize - 1; 10909 else 10910 secmask = DEV_BSIZE - 1; 10911 10912 if (uio->uio_loffset & ((offset_t)(secmask))) { 10913 SD_ERROR(SD_LOG_READ_WRITE, un, 10914 "sdaread: file offset not modulo %d\n", 10915 secmask + 1); 10916 err = EINVAL; 10917 } else if (uio->uio_iov->iov_len & (secmask)) { 10918 SD_ERROR(SD_LOG_READ_WRITE, un, 10919 "sdaread: transfer length not modulo %d\n", 10920 secmask + 1); 10921 err = EINVAL; 10922 } else { 10923 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10924 } 10925 10926 return (err); 10927 } 10928 10929 10930 /* 10931 * Function: sdawrite 10932 * 10933 * Description: Driver's awrite(9e) entry point function. 10934 * 10935 * Arguments: dev - device number 10936 * aio - structure pointer describing where data is stored 10937 * cred_p - user credential pointer 10938 * 10939 * Return Code: ENXIO 10940 * EIO 10941 * EINVAL 10942 * value returned by aphysio 10943 * 10944 * Context: Kernel thread context. 10945 */ 10946 /* ARGSUSED */ 10947 static int 10948 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10949 { 10950 struct sd_lun *un = NULL; 10951 struct uio *uio = aio->aio_uio; 10952 int secmask; 10953 int err = 0; 10954 sd_ssc_t *ssc; 10955 10956 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10957 return (ENXIO); 10958 } 10959 10960 ASSERT(!mutex_owned(SD_MUTEX(un))); 10961 10962 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10963 mutex_enter(SD_MUTEX(un)); 10964 /* 10965 * Because the call to sd_ready_and_valid will issue I/O we 10966 * must wait here if either the device is suspended or 10967 * if it's power level is changing. 10968 */ 10969 while ((un->un_state == SD_STATE_SUSPENDED) || 10970 (un->un_state == SD_STATE_PM_CHANGING)) { 10971 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10972 } 10973 un->un_ncmds_in_driver++; 10974 mutex_exit(SD_MUTEX(un)); 10975 10976 /* Initialize sd_ssc_t for internal uscsi commands */ 10977 ssc = sd_ssc_init(un); 10978 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10979 err = EIO; 10980 } else { 10981 err = 0; 10982 } 10983 sd_ssc_fini(ssc); 10984 10985 mutex_enter(SD_MUTEX(un)); 10986 un->un_ncmds_in_driver--; 10987 ASSERT(un->un_ncmds_in_driver >= 0); 10988 mutex_exit(SD_MUTEX(un)); 10989 if (err != 0) 10990 return (err); 10991 } 10992 10993 /* 10994 * Write requests are restricted to multiples of the system block size. 10995 */ 10996 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10997 secmask = un->un_tgt_blocksize - 1; 10998 else 10999 secmask = DEV_BSIZE - 1; 11000 11001 if (uio->uio_loffset & ((offset_t)(secmask))) { 11002 SD_ERROR(SD_LOG_READ_WRITE, un, 11003 "sdawrite: file offset not modulo %d\n", 11004 secmask + 1); 11005 err = EINVAL; 11006 } else if (uio->uio_iov->iov_len & (secmask)) { 11007 SD_ERROR(SD_LOG_READ_WRITE, un, 11008 "sdawrite: transfer length not modulo %d\n", 11009 secmask + 1); 11010 err = EINVAL; 11011 } else { 11012 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11013 } 11014 11015 return (err); 11016 } 11017 11018 11019 11020 11021 11022 /* 11023 * Driver IO processing follows the following sequence: 11024 * 11025 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11026 * | | ^ 11027 * v v | 11028 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11029 * | | | | 11030 * v | | | 11031 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11032 * | | ^ ^ 11033 * v v | | 11034 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11035 * | | | | 11036 * +---+ | +------------+ +-------+ 11037 * | | | | 11038 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11039 * | v | | 11040 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11041 * | | ^ | 11042 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11043 * | v | | 11044 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11045 * | | ^ | 11046 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11047 * | v | | 11048 * | sd_checksum_iostart() sd_checksum_iodone() | 11049 * | | ^ | 11050 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11051 * | v | | 11052 * | sd_pm_iostart() sd_pm_iodone() | 11053 * | | ^ | 11054 * | | | | 11055 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11056 * | ^ 11057 * v | 11058 * sd_core_iostart() | 11059 * | | 11060 * | +------>(*destroypkt)() 11061 * +-> sd_start_cmds() <-+ | | 11062 * | | | v 11063 * | | | scsi_destroy_pkt(9F) 11064 * | | | 11065 * +->(*initpkt)() +- sdintr() 11066 * | | | | 11067 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11068 * | +-> scsi_setup_cdb(9F) | 11069 * | | 11070 * +--> scsi_transport(9F) | 11071 * | | 11072 * +----> SCSA ---->+ 11073 * 11074 * 11075 * This code is based upon the following presumptions: 11076 * 11077 * - iostart and iodone functions operate on buf(9S) structures. These 11078 * functions perform the necessary operations on the buf(9S) and pass 11079 * them along to the next function in the chain by using the macros 11080 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11081 * (for iodone side functions). 11082 * 11083 * - The iostart side functions may sleep. The iodone side functions 11084 * are called under interrupt context and may NOT sleep. Therefore 11085 * iodone side functions also may not call iostart side functions. 11086 * (NOTE: iostart side functions should NOT sleep for memory, as 11087 * this could result in deadlock.) 11088 * 11089 * - An iostart side function may call its corresponding iodone side 11090 * function directly (if necessary). 11091 * 11092 * - In the event of an error, an iostart side function can return a buf(9S) 11093 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11094 * b_error in the usual way of course). 11095 * 11096 * - The taskq mechanism may be used by the iodone side functions to dispatch 11097 * requests to the iostart side functions. The iostart side functions in 11098 * this case would be called under the context of a taskq thread, so it's 11099 * OK for them to block/sleep/spin in this case. 11100 * 11101 * - iostart side functions may allocate "shadow" buf(9S) structs and 11102 * pass them along to the next function in the chain. The corresponding 11103 * iodone side functions must coalesce the "shadow" bufs and return 11104 * the "original" buf to the next higher layer. 11105 * 11106 * - The b_private field of the buf(9S) struct holds a pointer to 11107 * an sd_xbuf struct, which contains information needed to 11108 * construct the scsi_pkt for the command. 11109 * 11110 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11111 * layer must acquire & release the SD_MUTEX(un) as needed. 11112 */ 11113 11114 11115 /* 11116 * Create taskq for all targets in the system. This is created at 11117 * _init(9E) and destroyed at _fini(9E). 11118 * 11119 * Note: here we set the minalloc to a reasonably high number to ensure that 11120 * we will have an adequate supply of task entries available at interrupt time. 11121 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11122 * sd_create_taskq(). Since we do not want to sleep for allocations at 11123 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11124 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11125 * requests any one instant in time. 11126 */ 11127 #define SD_TASKQ_NUMTHREADS 8 11128 #define SD_TASKQ_MINALLOC 256 11129 #define SD_TASKQ_MAXALLOC 256 11130 11131 static taskq_t *sd_tq = NULL; 11132 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11133 11134 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11135 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11136 11137 /* 11138 * The following task queue is being created for the write part of 11139 * read-modify-write of non-512 block size devices. 11140 * Limit the number of threads to 1 for now. This number has been chosen 11141 * considering the fact that it applies only to dvd ram drives/MO drives 11142 * currently. Performance for which is not main criteria at this stage. 11143 * Note: It needs to be explored if we can use a single taskq in future 11144 */ 11145 #define SD_WMR_TASKQ_NUMTHREADS 1 11146 static taskq_t *sd_wmr_tq = NULL; 11147 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11148 11149 /* 11150 * Function: sd_taskq_create 11151 * 11152 * Description: Create taskq thread(s) and preallocate task entries 11153 * 11154 * Return Code: Returns a pointer to the allocated taskq_t. 11155 * 11156 * Context: Can sleep. Requires blockable context. 11157 * 11158 * Notes: - The taskq() facility currently is NOT part of the DDI. 11159 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11160 * - taskq_create() will block for memory, also it will panic 11161 * if it cannot create the requested number of threads. 11162 * - Currently taskq_create() creates threads that cannot be 11163 * swapped. 11164 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11165 * supply of taskq entries at interrupt time (ie, so that we 11166 * do not have to sleep for memory) 11167 */ 11168 11169 static void 11170 sd_taskq_create(void) 11171 { 11172 char taskq_name[TASKQ_NAMELEN]; 11173 11174 ASSERT(sd_tq == NULL); 11175 ASSERT(sd_wmr_tq == NULL); 11176 11177 (void) snprintf(taskq_name, sizeof (taskq_name), 11178 "%s_drv_taskq", sd_label); 11179 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11180 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11181 TASKQ_PREPOPULATE)); 11182 11183 (void) snprintf(taskq_name, sizeof (taskq_name), 11184 "%s_rmw_taskq", sd_label); 11185 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11186 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11187 TASKQ_PREPOPULATE)); 11188 } 11189 11190 11191 /* 11192 * Function: sd_taskq_delete 11193 * 11194 * Description: Complementary cleanup routine for sd_taskq_create(). 11195 * 11196 * Context: Kernel thread context. 11197 */ 11198 11199 static void 11200 sd_taskq_delete(void) 11201 { 11202 ASSERT(sd_tq != NULL); 11203 ASSERT(sd_wmr_tq != NULL); 11204 taskq_destroy(sd_tq); 11205 taskq_destroy(sd_wmr_tq); 11206 sd_tq = NULL; 11207 sd_wmr_tq = NULL; 11208 } 11209 11210 11211 /* 11212 * Function: sdstrategy 11213 * 11214 * Description: Driver's strategy (9E) entry point function. 11215 * 11216 * Arguments: bp - pointer to buf(9S) 11217 * 11218 * Return Code: Always returns zero 11219 * 11220 * Context: Kernel thread context. 11221 */ 11222 11223 static int 11224 sdstrategy(struct buf *bp) 11225 { 11226 struct sd_lun *un; 11227 11228 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11229 if (un == NULL) { 11230 bioerror(bp, EIO); 11231 bp->b_resid = bp->b_bcount; 11232 biodone(bp); 11233 return (0); 11234 } 11235 11236 /* As was done in the past, fail new cmds. if state is dumping. */ 11237 if (un->un_state == SD_STATE_DUMPING) { 11238 bioerror(bp, ENXIO); 11239 bp->b_resid = bp->b_bcount; 11240 biodone(bp); 11241 return (0); 11242 } 11243 11244 ASSERT(!mutex_owned(SD_MUTEX(un))); 11245 11246 /* 11247 * Commands may sneak in while we released the mutex in 11248 * DDI_SUSPEND, we should block new commands. However, old 11249 * commands that are still in the driver at this point should 11250 * still be allowed to drain. 11251 */ 11252 mutex_enter(SD_MUTEX(un)); 11253 /* 11254 * Must wait here if either the device is suspended or 11255 * if it's power level is changing. 11256 */ 11257 while ((un->un_state == SD_STATE_SUSPENDED) || 11258 (un->un_state == SD_STATE_PM_CHANGING)) { 11259 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11260 } 11261 11262 un->un_ncmds_in_driver++; 11263 11264 /* 11265 * atapi: Since we are running the CD for now in PIO mode we need to 11266 * call bp_mapin here to avoid bp_mapin called interrupt context under 11267 * the HBA's init_pkt routine. 11268 */ 11269 if (un->un_f_cfg_is_atapi == TRUE) { 11270 mutex_exit(SD_MUTEX(un)); 11271 bp_mapin(bp); 11272 mutex_enter(SD_MUTEX(un)); 11273 } 11274 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11275 un->un_ncmds_in_driver); 11276 11277 if (bp->b_flags & B_WRITE) 11278 un->un_f_sync_cache_required = TRUE; 11279 11280 mutex_exit(SD_MUTEX(un)); 11281 11282 /* 11283 * This will (eventually) allocate the sd_xbuf area and 11284 * call sd_xbuf_strategy(). We just want to return the 11285 * result of ddi_xbuf_qstrategy so that we have an opt- 11286 * imized tail call which saves us a stack frame. 11287 */ 11288 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11289 } 11290 11291 11292 /* 11293 * Function: sd_xbuf_strategy 11294 * 11295 * Description: Function for initiating IO operations via the 11296 * ddi_xbuf_qstrategy() mechanism. 11297 * 11298 * Context: Kernel thread context. 11299 */ 11300 11301 static void 11302 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11303 { 11304 struct sd_lun *un = arg; 11305 11306 ASSERT(bp != NULL); 11307 ASSERT(xp != NULL); 11308 ASSERT(un != NULL); 11309 ASSERT(!mutex_owned(SD_MUTEX(un))); 11310 11311 /* 11312 * Initialize the fields in the xbuf and save a pointer to the 11313 * xbuf in bp->b_private. 11314 */ 11315 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11316 11317 /* Send the buf down the iostart chain */ 11318 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11319 } 11320 11321 11322 /* 11323 * Function: sd_xbuf_init 11324 * 11325 * Description: Prepare the given sd_xbuf struct for use. 11326 * 11327 * Arguments: un - ptr to softstate 11328 * bp - ptr to associated buf(9S) 11329 * xp - ptr to associated sd_xbuf 11330 * chain_type - IO chain type to use: 11331 * SD_CHAIN_NULL 11332 * SD_CHAIN_BUFIO 11333 * SD_CHAIN_USCSI 11334 * SD_CHAIN_DIRECT 11335 * SD_CHAIN_DIRECT_PRIORITY 11336 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11337 * initialization; may be NULL if none. 11338 * 11339 * Context: Kernel thread context 11340 */ 11341 11342 static void 11343 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11344 uchar_t chain_type, void *pktinfop) 11345 { 11346 int index; 11347 11348 ASSERT(un != NULL); 11349 ASSERT(bp != NULL); 11350 ASSERT(xp != NULL); 11351 11352 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11353 bp, chain_type); 11354 11355 xp->xb_un = un; 11356 xp->xb_pktp = NULL; 11357 xp->xb_pktinfo = pktinfop; 11358 xp->xb_private = bp->b_private; 11359 xp->xb_blkno = (daddr_t)bp->b_blkno; 11360 11361 /* 11362 * Set up the iostart and iodone chain indexes in the xbuf, based 11363 * upon the specified chain type to use. 11364 */ 11365 switch (chain_type) { 11366 case SD_CHAIN_NULL: 11367 /* 11368 * Fall thru to just use the values for the buf type, even 11369 * tho for the NULL chain these values will never be used. 11370 */ 11371 /* FALLTHRU */ 11372 case SD_CHAIN_BUFIO: 11373 index = un->un_buf_chain_type; 11374 if ((!un->un_f_has_removable_media) && 11375 (un->un_tgt_blocksize != 0) && 11376 (un->un_tgt_blocksize != DEV_BSIZE)) { 11377 int secmask = 0, blknomask = 0; 11378 blknomask = 11379 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11380 secmask = un->un_tgt_blocksize - 1; 11381 11382 if ((bp->b_lblkno & (blknomask)) || 11383 (bp->b_bcount & (secmask))) { 11384 if (un->un_f_rmw_type != 11385 SD_RMW_TYPE_RETURN_ERROR) { 11386 if (un->un_f_pm_is_enabled == FALSE) 11387 index = 11388 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11389 else 11390 index = 11391 SD_CHAIN_INFO_MSS_DISK; 11392 } 11393 } 11394 } 11395 break; 11396 case SD_CHAIN_USCSI: 11397 index = un->un_uscsi_chain_type; 11398 break; 11399 case SD_CHAIN_DIRECT: 11400 index = un->un_direct_chain_type; 11401 break; 11402 case SD_CHAIN_DIRECT_PRIORITY: 11403 index = un->un_priority_chain_type; 11404 break; 11405 default: 11406 /* We're really broken if we ever get here... */ 11407 panic("sd_xbuf_init: illegal chain type!"); 11408 /*NOTREACHED*/ 11409 } 11410 11411 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11412 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11413 11414 /* 11415 * It might be a bit easier to simply bzero the entire xbuf above, 11416 * but it turns out that since we init a fair number of members anyway, 11417 * we save a fair number cycles by doing explicit assignment of zero. 11418 */ 11419 xp->xb_pkt_flags = 0; 11420 xp->xb_dma_resid = 0; 11421 xp->xb_retry_count = 0; 11422 xp->xb_victim_retry_count = 0; 11423 xp->xb_ua_retry_count = 0; 11424 xp->xb_nr_retry_count = 0; 11425 xp->xb_sense_bp = NULL; 11426 xp->xb_sense_status = 0; 11427 xp->xb_sense_state = 0; 11428 xp->xb_sense_resid = 0; 11429 xp->xb_ena = 0; 11430 11431 bp->b_private = xp; 11432 bp->b_flags &= ~(B_DONE | B_ERROR); 11433 bp->b_resid = 0; 11434 bp->av_forw = NULL; 11435 bp->av_back = NULL; 11436 bioerror(bp, 0); 11437 11438 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11439 } 11440 11441 11442 /* 11443 * Function: sd_uscsi_strategy 11444 * 11445 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11446 * 11447 * Arguments: bp - buf struct ptr 11448 * 11449 * Return Code: Always returns 0 11450 * 11451 * Context: Kernel thread context 11452 */ 11453 11454 static int 11455 sd_uscsi_strategy(struct buf *bp) 11456 { 11457 struct sd_lun *un; 11458 struct sd_uscsi_info *uip; 11459 struct sd_xbuf *xp; 11460 uchar_t chain_type; 11461 uchar_t cmd; 11462 11463 ASSERT(bp != NULL); 11464 11465 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11466 if (un == NULL) { 11467 bioerror(bp, EIO); 11468 bp->b_resid = bp->b_bcount; 11469 biodone(bp); 11470 return (0); 11471 } 11472 11473 ASSERT(!mutex_owned(SD_MUTEX(un))); 11474 11475 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11476 11477 /* 11478 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11479 */ 11480 ASSERT(bp->b_private != NULL); 11481 uip = (struct sd_uscsi_info *)bp->b_private; 11482 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11483 11484 mutex_enter(SD_MUTEX(un)); 11485 /* 11486 * atapi: Since we are running the CD for now in PIO mode we need to 11487 * call bp_mapin here to avoid bp_mapin called interrupt context under 11488 * the HBA's init_pkt routine. 11489 */ 11490 if (un->un_f_cfg_is_atapi == TRUE) { 11491 mutex_exit(SD_MUTEX(un)); 11492 bp_mapin(bp); 11493 mutex_enter(SD_MUTEX(un)); 11494 } 11495 un->un_ncmds_in_driver++; 11496 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11497 un->un_ncmds_in_driver); 11498 11499 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11500 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11501 un->un_f_sync_cache_required = TRUE; 11502 11503 mutex_exit(SD_MUTEX(un)); 11504 11505 switch (uip->ui_flags) { 11506 case SD_PATH_DIRECT: 11507 chain_type = SD_CHAIN_DIRECT; 11508 break; 11509 case SD_PATH_DIRECT_PRIORITY: 11510 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11511 break; 11512 default: 11513 chain_type = SD_CHAIN_USCSI; 11514 break; 11515 } 11516 11517 /* 11518 * We may allocate extra buf for external USCSI commands. If the 11519 * application asks for bigger than 20-byte sense data via USCSI, 11520 * SCSA layer will allocate 252 bytes sense buf for that command. 11521 */ 11522 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11523 SENSE_LENGTH) { 11524 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11525 MAX_SENSE_LENGTH, KM_SLEEP); 11526 } else { 11527 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11528 } 11529 11530 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11531 11532 /* Use the index obtained within xbuf_init */ 11533 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11534 11535 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11536 11537 return (0); 11538 } 11539 11540 /* 11541 * Function: sd_send_scsi_cmd 11542 * 11543 * Description: Runs a USCSI command for user (when called thru sdioctl), 11544 * or for the driver 11545 * 11546 * Arguments: dev - the dev_t for the device 11547 * incmd - ptr to a valid uscsi_cmd struct 11548 * flag - bit flag, indicating open settings, 32/64 bit type 11549 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11550 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11551 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11552 * to use the USCSI "direct" chain and bypass the normal 11553 * command waitq. 11554 * 11555 * Return Code: 0 - successful completion of the given command 11556 * EIO - scsi_uscsi_handle_command() failed 11557 * ENXIO - soft state not found for specified dev 11558 * EINVAL 11559 * EFAULT - copyin/copyout error 11560 * return code of scsi_uscsi_handle_command(): 11561 * EIO 11562 * ENXIO 11563 * EACCES 11564 * 11565 * Context: Waits for command to complete. Can sleep. 11566 */ 11567 11568 static int 11569 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11570 enum uio_seg dataspace, int path_flag) 11571 { 11572 struct sd_lun *un; 11573 sd_ssc_t *ssc; 11574 int rval; 11575 11576 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11577 if (un == NULL) { 11578 return (ENXIO); 11579 } 11580 11581 /* 11582 * Using sd_ssc_send to handle uscsi cmd 11583 */ 11584 ssc = sd_ssc_init(un); 11585 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11586 sd_ssc_fini(ssc); 11587 11588 return (rval); 11589 } 11590 11591 /* 11592 * Function: sd_ssc_init 11593 * 11594 * Description: Uscsi end-user call this function to initialize necessary 11595 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11596 * 11597 * The return value of sd_send_scsi_cmd will be treated as a 11598 * fault in various conditions. Even it is not Zero, some 11599 * callers may ignore the return value. That is to say, we can 11600 * not make an accurate assessment in sdintr, since if a 11601 * command is failed in sdintr it does not mean the caller of 11602 * sd_send_scsi_cmd will treat it as a real failure. 11603 * 11604 * To avoid printing too many error logs for a failed uscsi 11605 * packet that the caller may not treat it as a failure, the 11606 * sd will keep silent for handling all uscsi commands. 11607 * 11608 * During detach->attach and attach-open, for some types of 11609 * problems, the driver should be providing information about 11610 * the problem encountered. Device use USCSI_SILENT, which 11611 * suppresses all driver information. The result is that no 11612 * information about the problem is available. Being 11613 * completely silent during this time is inappropriate. The 11614 * driver needs a more selective filter than USCSI_SILENT, so 11615 * that information related to faults is provided. 11616 * 11617 * To make the accurate accessment, the caller of 11618 * sd_send_scsi_USCSI_CMD should take the ownership and 11619 * get necessary information to print error messages. 11620 * 11621 * If we want to print necessary info of uscsi command, we need to 11622 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11623 * assessment. We use sd_ssc_init to alloc necessary 11624 * structs for sending an uscsi command and we are also 11625 * responsible for free the memory by calling 11626 * sd_ssc_fini. 11627 * 11628 * The calling secquences will look like: 11629 * sd_ssc_init-> 11630 * 11631 * ... 11632 * 11633 * sd_send_scsi_USCSI_CMD-> 11634 * sd_ssc_send-> - - - sdintr 11635 * ... 11636 * 11637 * if we think the return value should be treated as a 11638 * failure, we make the accessment here and print out 11639 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11640 * 11641 * ... 11642 * 11643 * sd_ssc_fini 11644 * 11645 * 11646 * Arguments: un - pointer to driver soft state (unit) structure for this 11647 * target. 11648 * 11649 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11650 * uscsi_cmd and sd_uscsi_info. 11651 * NULL - if can not alloc memory for sd_ssc_t struct 11652 * 11653 * Context: Kernel Thread. 11654 */ 11655 static sd_ssc_t * 11656 sd_ssc_init(struct sd_lun *un) 11657 { 11658 sd_ssc_t *ssc; 11659 struct uscsi_cmd *ucmdp; 11660 struct sd_uscsi_info *uip; 11661 11662 ASSERT(un != NULL); 11663 ASSERT(!mutex_owned(SD_MUTEX(un))); 11664 11665 /* 11666 * Allocate sd_ssc_t structure 11667 */ 11668 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11669 11670 /* 11671 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11672 */ 11673 ucmdp = scsi_uscsi_alloc(); 11674 11675 /* 11676 * Allocate sd_uscsi_info structure 11677 */ 11678 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11679 11680 ssc->ssc_uscsi_cmd = ucmdp; 11681 ssc->ssc_uscsi_info = uip; 11682 ssc->ssc_un = un; 11683 11684 return (ssc); 11685 } 11686 11687 /* 11688 * Function: sd_ssc_fini 11689 * 11690 * Description: To free sd_ssc_t and it's hanging off 11691 * 11692 * Arguments: ssc - struct pointer of sd_ssc_t. 11693 */ 11694 static void 11695 sd_ssc_fini(sd_ssc_t *ssc) 11696 { 11697 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11698 11699 if (ssc->ssc_uscsi_info != NULL) { 11700 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11701 ssc->ssc_uscsi_info = NULL; 11702 } 11703 11704 kmem_free(ssc, sizeof (sd_ssc_t)); 11705 ssc = NULL; 11706 } 11707 11708 /* 11709 * Function: sd_ssc_send 11710 * 11711 * Description: Runs a USCSI command for user when called through sdioctl, 11712 * or for the driver. 11713 * 11714 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11715 * sd_uscsi_info in. 11716 * incmd - ptr to a valid uscsi_cmd struct 11717 * flag - bit flag, indicating open settings, 32/64 bit type 11718 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11719 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11720 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11721 * to use the USCSI "direct" chain and bypass the normal 11722 * command waitq. 11723 * 11724 * Return Code: 0 - successful completion of the given command 11725 * EIO - scsi_uscsi_handle_command() failed 11726 * ENXIO - soft state not found for specified dev 11727 * EINVAL 11728 * EFAULT - copyin/copyout error 11729 * return code of scsi_uscsi_handle_command(): 11730 * EIO 11731 * ENXIO 11732 * EACCES 11733 * 11734 * Context: Kernel Thread; 11735 * Waits for command to complete. Can sleep. 11736 */ 11737 static int 11738 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11739 enum uio_seg dataspace, int path_flag) 11740 { 11741 struct sd_uscsi_info *uip; 11742 struct uscsi_cmd *uscmd; 11743 struct sd_lun *un; 11744 dev_t dev; 11745 11746 int format = 0; 11747 int rval; 11748 11749 ASSERT(ssc != NULL); 11750 un = ssc->ssc_un; 11751 ASSERT(un != NULL); 11752 uscmd = ssc->ssc_uscsi_cmd; 11753 ASSERT(uscmd != NULL); 11754 ASSERT(!mutex_owned(SD_MUTEX(un))); 11755 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11756 /* 11757 * If enter here, it indicates that the previous uscsi 11758 * command has not been processed by sd_ssc_assessment. 11759 * This is violating our rules of FMA telemetry processing. 11760 * We should print out this message and the last undisposed 11761 * uscsi command. 11762 */ 11763 if (uscmd->uscsi_cdb != NULL) { 11764 SD_INFO(SD_LOG_SDTEST, un, 11765 "sd_ssc_send is missing the alternative " 11766 "sd_ssc_assessment when running command 0x%x.\n", 11767 uscmd->uscsi_cdb[0]); 11768 } 11769 /* 11770 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11771 * the initial status. 11772 */ 11773 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11774 } 11775 11776 /* 11777 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11778 * followed to avoid missing FMA telemetries. 11779 */ 11780 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11781 11782 #ifdef SDDEBUG 11783 switch (dataspace) { 11784 case UIO_USERSPACE: 11785 SD_TRACE(SD_LOG_IO, un, 11786 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11787 break; 11788 case UIO_SYSSPACE: 11789 SD_TRACE(SD_LOG_IO, un, 11790 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11791 break; 11792 default: 11793 SD_TRACE(SD_LOG_IO, un, 11794 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11795 break; 11796 } 11797 #endif 11798 11799 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11800 SD_ADDRESS(un), &uscmd); 11801 if (rval != 0) { 11802 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11803 "scsi_uscsi_alloc_and_copyin failed\n", un); 11804 return (rval); 11805 } 11806 11807 if ((uscmd->uscsi_cdb != NULL) && 11808 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11809 mutex_enter(SD_MUTEX(un)); 11810 un->un_f_format_in_progress = TRUE; 11811 mutex_exit(SD_MUTEX(un)); 11812 format = 1; 11813 } 11814 11815 /* 11816 * Allocate an sd_uscsi_info struct and fill it with the info 11817 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11818 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11819 * since we allocate the buf here in this function, we do not 11820 * need to preserve the prior contents of b_private. 11821 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11822 */ 11823 uip = ssc->ssc_uscsi_info; 11824 uip->ui_flags = path_flag; 11825 uip->ui_cmdp = uscmd; 11826 11827 /* 11828 * Commands sent with priority are intended for error recovery 11829 * situations, and do not have retries performed. 11830 */ 11831 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11832 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11833 } 11834 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11835 11836 dev = SD_GET_DEV(un); 11837 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11838 sd_uscsi_strategy, NULL, uip); 11839 11840 /* 11841 * mark ssc_flags right after handle_cmd to make sure 11842 * the uscsi has been sent 11843 */ 11844 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11845 11846 #ifdef SDDEBUG 11847 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11848 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11849 uscmd->uscsi_status, uscmd->uscsi_resid); 11850 if (uscmd->uscsi_bufaddr != NULL) { 11851 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11852 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11853 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11854 if (dataspace == UIO_SYSSPACE) { 11855 SD_DUMP_MEMORY(un, SD_LOG_IO, 11856 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11857 uscmd->uscsi_buflen, SD_LOG_HEX); 11858 } 11859 } 11860 #endif 11861 11862 if (format == 1) { 11863 mutex_enter(SD_MUTEX(un)); 11864 un->un_f_format_in_progress = FALSE; 11865 mutex_exit(SD_MUTEX(un)); 11866 } 11867 11868 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11869 11870 return (rval); 11871 } 11872 11873 /* 11874 * Function: sd_ssc_print 11875 * 11876 * Description: Print information available to the console. 11877 * 11878 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11879 * sd_uscsi_info in. 11880 * sd_severity - log level. 11881 * Context: Kernel thread or interrupt context. 11882 */ 11883 static void 11884 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11885 { 11886 struct uscsi_cmd *ucmdp; 11887 struct scsi_device *devp; 11888 dev_info_t *devinfo; 11889 uchar_t *sensep; 11890 int senlen; 11891 union scsi_cdb *cdbp; 11892 uchar_t com; 11893 extern struct scsi_key_strings scsi_cmds[]; 11894 11895 ASSERT(ssc != NULL); 11896 ASSERT(ssc->ssc_un != NULL); 11897 11898 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 11899 return; 11900 ucmdp = ssc->ssc_uscsi_cmd; 11901 devp = SD_SCSI_DEVP(ssc->ssc_un); 11902 devinfo = SD_DEVINFO(ssc->ssc_un); 11903 ASSERT(ucmdp != NULL); 11904 ASSERT(devp != NULL); 11905 ASSERT(devinfo != NULL); 11906 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11907 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11908 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11909 11910 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11911 if (cdbp == NULL) 11912 return; 11913 /* We don't print log if no sense data available. */ 11914 if (senlen == 0) 11915 sensep = NULL; 11916 com = cdbp->scc_cmd; 11917 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11918 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11919 } 11920 11921 /* 11922 * Function: sd_ssc_assessment 11923 * 11924 * Description: We use this function to make an assessment at the point 11925 * where SD driver may encounter a potential error. 11926 * 11927 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11928 * sd_uscsi_info in. 11929 * tp_assess - a hint of strategy for ereport posting. 11930 * Possible values of tp_assess include: 11931 * SD_FMT_IGNORE - we don't post any ereport because we're 11932 * sure that it is ok to ignore the underlying problems. 11933 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11934 * but it might be not correct to ignore the underlying hardware 11935 * error. 11936 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11937 * payload driver-assessment of value "fail" or 11938 * "fatal"(depending on what information we have here). This 11939 * assessment value is usually set when SD driver think there 11940 * is a potential error occurred(Typically, when return value 11941 * of the SCSI command is EIO). 11942 * SD_FMT_STANDARD - we will post an ereport with the payload 11943 * driver-assessment of value "info". This assessment value is 11944 * set when the SCSI command returned successfully and with 11945 * sense data sent back. 11946 * 11947 * Context: Kernel thread. 11948 */ 11949 static void 11950 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11951 { 11952 int senlen = 0; 11953 struct uscsi_cmd *ucmdp = NULL; 11954 struct sd_lun *un; 11955 11956 ASSERT(ssc != NULL); 11957 un = ssc->ssc_un; 11958 ASSERT(un != NULL); 11959 ucmdp = ssc->ssc_uscsi_cmd; 11960 ASSERT(ucmdp != NULL); 11961 11962 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11963 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11964 } else { 11965 /* 11966 * If enter here, it indicates that we have a wrong 11967 * calling sequence of sd_ssc_send and sd_ssc_assessment, 11968 * both of which should be called in a pair in case of 11969 * loss of FMA telemetries. 11970 */ 11971 if (ucmdp->uscsi_cdb != NULL) { 11972 SD_INFO(SD_LOG_SDTEST, un, 11973 "sd_ssc_assessment is missing the " 11974 "alternative sd_ssc_send when running 0x%x, " 11975 "or there are superfluous sd_ssc_assessment for " 11976 "the same sd_ssc_send.\n", 11977 ucmdp->uscsi_cdb[0]); 11978 } 11979 /* 11980 * Set the ssc_flags to the initial value to avoid passing 11981 * down dirty flags to the following sd_ssc_send function. 11982 */ 11983 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11984 return; 11985 } 11986 11987 /* 11988 * Only handle an issued command which is waiting for assessment. 11989 * A command which is not issued will not have 11990 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 11991 */ 11992 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11993 sd_ssc_print(ssc, SCSI_ERR_INFO); 11994 return; 11995 } else { 11996 /* 11997 * For an issued command, we should clear this flag in 11998 * order to make the sd_ssc_t structure be used off 11999 * multiple uscsi commands. 12000 */ 12001 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12002 } 12003 12004 /* 12005 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12006 * commands here. And we should clear the ssc_flags before return. 12007 */ 12008 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12009 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12010 return; 12011 } 12012 12013 switch (tp_assess) { 12014 case SD_FMT_IGNORE: 12015 case SD_FMT_IGNORE_COMPROMISE: 12016 break; 12017 case SD_FMT_STATUS_CHECK: 12018 /* 12019 * For a failed command(including the succeeded command 12020 * with invalid data sent back). 12021 */ 12022 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12023 break; 12024 case SD_FMT_STANDARD: 12025 /* 12026 * Always for the succeeded commands probably with sense 12027 * data sent back. 12028 * Limitation: 12029 * We can only handle a succeeded command with sense 12030 * data sent back when auto-request-sense is enabled. 12031 */ 12032 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12033 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12034 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12035 (un->un_f_arq_enabled == TRUE) && 12036 senlen > 0 && 12037 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12038 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12039 } 12040 break; 12041 default: 12042 /* 12043 * Should not have other type of assessment. 12044 */ 12045 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12046 "sd_ssc_assessment got wrong " 12047 "sd_type_assessment %d.\n", tp_assess); 12048 break; 12049 } 12050 /* 12051 * Clear up the ssc_flags before return. 12052 */ 12053 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12054 } 12055 12056 /* 12057 * Function: sd_ssc_post 12058 * 12059 * Description: 1. read the driver property to get fm-scsi-log flag. 12060 * 2. print log if fm_log_capable is non-zero. 12061 * 3. call sd_ssc_ereport_post to post ereport if possible. 12062 * 12063 * Context: May be called from kernel thread or interrupt context. 12064 */ 12065 static void 12066 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12067 { 12068 struct sd_lun *un; 12069 int sd_severity; 12070 12071 ASSERT(ssc != NULL); 12072 un = ssc->ssc_un; 12073 ASSERT(un != NULL); 12074 12075 /* 12076 * We may enter here from sd_ssc_assessment(for USCSI command) or 12077 * by directly called from sdintr context. 12078 * We don't handle a non-disk drive(CD-ROM, removable media). 12079 * Clear the ssc_flags before return in case we've set 12080 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12081 * driver. 12082 */ 12083 if (ISCD(un) || un->un_f_has_removable_media) { 12084 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12085 return; 12086 } 12087 12088 switch (sd_assess) { 12089 case SD_FM_DRV_FATAL: 12090 sd_severity = SCSI_ERR_FATAL; 12091 break; 12092 case SD_FM_DRV_RECOVERY: 12093 sd_severity = SCSI_ERR_RECOVERED; 12094 break; 12095 case SD_FM_DRV_RETRY: 12096 sd_severity = SCSI_ERR_RETRYABLE; 12097 break; 12098 case SD_FM_DRV_NOTICE: 12099 sd_severity = SCSI_ERR_INFO; 12100 break; 12101 default: 12102 sd_severity = SCSI_ERR_UNKNOWN; 12103 } 12104 /* print log */ 12105 sd_ssc_print(ssc, sd_severity); 12106 12107 /* always post ereport */ 12108 sd_ssc_ereport_post(ssc, sd_assess); 12109 } 12110 12111 /* 12112 * Function: sd_ssc_set_info 12113 * 12114 * Description: Mark ssc_flags and set ssc_info which would be the 12115 * payload of uderr ereport. This function will cause 12116 * sd_ssc_ereport_post to post uderr ereport only. 12117 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12118 * the function will also call SD_ERROR or scsi_log for a 12119 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12120 * 12121 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12122 * sd_uscsi_info in. 12123 * ssc_flags - indicate the sub-category of a uderr. 12124 * comp - this argument is meaningful only when 12125 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12126 * values include: 12127 * > 0, SD_ERROR is used with comp as the driver logging 12128 * component; 12129 * = 0, scsi-log is used to log error telemetries; 12130 * < 0, no log available for this telemetry. 12131 * 12132 * Context: Kernel thread or interrupt context 12133 */ 12134 static void 12135 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12136 { 12137 va_list ap; 12138 12139 ASSERT(ssc != NULL); 12140 ASSERT(ssc->ssc_un != NULL); 12141 12142 ssc->ssc_flags |= ssc_flags; 12143 va_start(ap, fmt); 12144 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12145 va_end(ap); 12146 12147 /* 12148 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12149 * with invalid data sent back. For non-uscsi command, the 12150 * following code will be bypassed. 12151 */ 12152 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12153 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12154 /* 12155 * If the error belong to certain component and we 12156 * do not want it to show up on the console, we 12157 * will use SD_ERROR, otherwise scsi_log is 12158 * preferred. 12159 */ 12160 if (comp > 0) { 12161 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12162 } else if (comp == 0) { 12163 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12164 CE_WARN, ssc->ssc_info); 12165 } 12166 } 12167 } 12168 } 12169 12170 /* 12171 * Function: sd_buf_iodone 12172 * 12173 * Description: Frees the sd_xbuf & returns the buf to its originator. 12174 * 12175 * Context: May be called from interrupt context. 12176 */ 12177 /* ARGSUSED */ 12178 static void 12179 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12180 { 12181 struct sd_xbuf *xp; 12182 12183 ASSERT(un != NULL); 12184 ASSERT(bp != NULL); 12185 ASSERT(!mutex_owned(SD_MUTEX(un))); 12186 12187 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12188 12189 xp = SD_GET_XBUF(bp); 12190 ASSERT(xp != NULL); 12191 12192 /* xbuf is gone after this */ 12193 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12194 mutex_enter(SD_MUTEX(un)); 12195 12196 /* 12197 * Grab time when the cmd completed. 12198 * This is used for determining if the system has been 12199 * idle long enough to make it idle to the PM framework. 12200 * This is for lowering the overhead, and therefore improving 12201 * performance per I/O operation. 12202 */ 12203 un->un_pm_idle_time = ddi_get_time(); 12204 12205 un->un_ncmds_in_driver--; 12206 ASSERT(un->un_ncmds_in_driver >= 0); 12207 SD_INFO(SD_LOG_IO, un, 12208 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12209 un->un_ncmds_in_driver); 12210 12211 mutex_exit(SD_MUTEX(un)); 12212 } 12213 12214 biodone(bp); /* bp is gone after this */ 12215 12216 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12217 } 12218 12219 12220 /* 12221 * Function: sd_uscsi_iodone 12222 * 12223 * Description: Frees the sd_xbuf & returns the buf to its originator. 12224 * 12225 * Context: May be called from interrupt context. 12226 */ 12227 /* ARGSUSED */ 12228 static void 12229 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12230 { 12231 struct sd_xbuf *xp; 12232 12233 ASSERT(un != NULL); 12234 ASSERT(bp != NULL); 12235 12236 xp = SD_GET_XBUF(bp); 12237 ASSERT(xp != NULL); 12238 ASSERT(!mutex_owned(SD_MUTEX(un))); 12239 12240 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12241 12242 bp->b_private = xp->xb_private; 12243 12244 mutex_enter(SD_MUTEX(un)); 12245 12246 /* 12247 * Grab time when the cmd completed. 12248 * This is used for determining if the system has been 12249 * idle long enough to make it idle to the PM framework. 12250 * This is for lowering the overhead, and therefore improving 12251 * performance per I/O operation. 12252 */ 12253 un->un_pm_idle_time = ddi_get_time(); 12254 12255 un->un_ncmds_in_driver--; 12256 ASSERT(un->un_ncmds_in_driver >= 0); 12257 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12258 un->un_ncmds_in_driver); 12259 12260 mutex_exit(SD_MUTEX(un)); 12261 12262 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12263 SENSE_LENGTH) { 12264 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12265 MAX_SENSE_LENGTH); 12266 } else { 12267 kmem_free(xp, sizeof (struct sd_xbuf)); 12268 } 12269 12270 biodone(bp); 12271 12272 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12273 } 12274 12275 12276 /* 12277 * Function: sd_mapblockaddr_iostart 12278 * 12279 * Description: Verify request lies within the partition limits for 12280 * the indicated minor device. Issue "overrun" buf if 12281 * request would exceed partition range. Converts 12282 * partition-relative block address to absolute. 12283 * 12284 * Upon exit of this function: 12285 * 1.I/O is aligned 12286 * xp->xb_blkno represents the absolute sector address 12287 * 2.I/O is misaligned 12288 * xp->xb_blkno represents the absolute logical block address 12289 * based on DEV_BSIZE. The logical block address will be 12290 * converted to physical sector address in sd_mapblocksize_\ 12291 * iostart. 12292 * 3.I/O is misaligned but is aligned in "overrun" buf 12293 * xp->xb_blkno represents the absolute logical block address 12294 * based on DEV_BSIZE. The logical block address will be 12295 * converted to physical sector address in sd_mapblocksize_\ 12296 * iostart. But no RMW will be issued in this case. 12297 * 12298 * Context: Can sleep 12299 * 12300 * Issues: This follows what the old code did, in terms of accessing 12301 * some of the partition info in the unit struct without holding 12302 * the mutext. This is a general issue, if the partition info 12303 * can be altered while IO is in progress... as soon as we send 12304 * a buf, its partitioning can be invalid before it gets to the 12305 * device. Probably the right fix is to move partitioning out 12306 * of the driver entirely. 12307 */ 12308 12309 static void 12310 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12311 { 12312 diskaddr_t nblocks; /* #blocks in the given partition */ 12313 daddr_t blocknum; /* Block number specified by the buf */ 12314 size_t requested_nblocks; 12315 size_t available_nblocks; 12316 int partition; 12317 diskaddr_t partition_offset; 12318 struct sd_xbuf *xp; 12319 int secmask = 0, blknomask = 0; 12320 ushort_t is_aligned = TRUE; 12321 12322 ASSERT(un != NULL); 12323 ASSERT(bp != NULL); 12324 ASSERT(!mutex_owned(SD_MUTEX(un))); 12325 12326 SD_TRACE(SD_LOG_IO_PARTITION, un, 12327 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12328 12329 xp = SD_GET_XBUF(bp); 12330 ASSERT(xp != NULL); 12331 12332 /* 12333 * If the geometry is not indicated as valid, attempt to access 12334 * the unit & verify the geometry/label. This can be the case for 12335 * removable-media devices, of if the device was opened in 12336 * NDELAY/NONBLOCK mode. 12337 */ 12338 partition = SDPART(bp->b_edev); 12339 12340 if (!SD_IS_VALID_LABEL(un)) { 12341 sd_ssc_t *ssc; 12342 /* 12343 * Initialize sd_ssc_t for internal uscsi commands 12344 * In case of potential porformance issue, we need 12345 * to alloc memory only if there is invalid label 12346 */ 12347 ssc = sd_ssc_init(un); 12348 12349 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12350 /* 12351 * For removable devices it is possible to start an 12352 * I/O without a media by opening the device in nodelay 12353 * mode. Also for writable CDs there can be many 12354 * scenarios where there is no geometry yet but volume 12355 * manager is trying to issue a read() just because 12356 * it can see TOC on the CD. So do not print a message 12357 * for removables. 12358 */ 12359 if (!un->un_f_has_removable_media) { 12360 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12361 "i/o to invalid geometry\n"); 12362 } 12363 bioerror(bp, EIO); 12364 bp->b_resid = bp->b_bcount; 12365 SD_BEGIN_IODONE(index, un, bp); 12366 12367 sd_ssc_fini(ssc); 12368 return; 12369 } 12370 sd_ssc_fini(ssc); 12371 } 12372 12373 nblocks = 0; 12374 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12375 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12376 12377 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12378 secmask = un->un_tgt_blocksize - 1; 12379 12380 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12381 is_aligned = FALSE; 12382 } 12383 12384 if (!(NOT_DEVBSIZE(un))) { 12385 /* 12386 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12387 * Convert the logical block number to target's physical sector 12388 * number. 12389 */ 12390 if (is_aligned) { 12391 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12392 } else { 12393 switch (un->un_f_rmw_type) { 12394 case SD_RMW_TYPE_RETURN_ERROR: 12395 bp->b_flags |= B_ERROR; 12396 goto error_exit; 12397 12398 case SD_RMW_TYPE_DEFAULT: 12399 mutex_enter(SD_MUTEX(un)); 12400 if (un->un_rmw_msg_timeid == NULL) { 12401 scsi_log(SD_DEVINFO(un), sd_label, 12402 CE_WARN, "I/O request is not " 12403 "aligned with %d disk sector size. " 12404 "It is handled through Read Modify " 12405 "Write but the performance is " 12406 "very low.\n", 12407 un->un_tgt_blocksize); 12408 un->un_rmw_msg_timeid = 12409 timeout(sd_rmw_msg_print_handler, 12410 un, SD_RMW_MSG_PRINT_TIMEOUT); 12411 } else { 12412 un->un_rmw_incre_count ++; 12413 } 12414 mutex_exit(SD_MUTEX(un)); 12415 break; 12416 12417 case SD_RMW_TYPE_NO_WARNING: 12418 default: 12419 break; 12420 } 12421 12422 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12423 partition_offset = SD_TGT2SYSBLOCK(un, 12424 partition_offset); 12425 } 12426 } 12427 12428 /* 12429 * blocknum is the starting block number of the request. At this 12430 * point it is still relative to the start of the minor device. 12431 */ 12432 blocknum = xp->xb_blkno; 12433 12434 /* 12435 * Legacy: If the starting block number is one past the last block 12436 * in the partition, do not set B_ERROR in the buf. 12437 */ 12438 if (blocknum == nblocks) { 12439 goto error_exit; 12440 } 12441 12442 /* 12443 * Confirm that the first block of the request lies within the 12444 * partition limits. Also the requested number of bytes must be 12445 * a multiple of the system block size. 12446 */ 12447 if ((blocknum < 0) || (blocknum >= nblocks) || 12448 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12449 bp->b_flags |= B_ERROR; 12450 goto error_exit; 12451 } 12452 12453 /* 12454 * If the requsted # blocks exceeds the available # blocks, that 12455 * is an overrun of the partition. 12456 */ 12457 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12458 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12459 } else { 12460 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12461 } 12462 12463 available_nblocks = (size_t)(nblocks - blocknum); 12464 ASSERT(nblocks >= blocknum); 12465 12466 if (requested_nblocks > available_nblocks) { 12467 size_t resid; 12468 12469 /* 12470 * Allocate an "overrun" buf to allow the request to proceed 12471 * for the amount of space available in the partition. The 12472 * amount not transferred will be added into the b_resid 12473 * when the operation is complete. The overrun buf 12474 * replaces the original buf here, and the original buf 12475 * is saved inside the overrun buf, for later use. 12476 */ 12477 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12478 resid = SD_TGTBLOCKS2BYTES(un, 12479 (offset_t)(requested_nblocks - available_nblocks)); 12480 } else { 12481 resid = SD_SYSBLOCKS2BYTES( 12482 (offset_t)(requested_nblocks - available_nblocks)); 12483 } 12484 12485 size_t count = bp->b_bcount - resid; 12486 /* 12487 * Note: count is an unsigned entity thus it'll NEVER 12488 * be less than 0 so ASSERT the original values are 12489 * correct. 12490 */ 12491 ASSERT(bp->b_bcount >= resid); 12492 12493 bp = sd_bioclone_alloc(bp, count, blocknum, 12494 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12495 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12496 ASSERT(xp != NULL); 12497 } 12498 12499 /* At this point there should be no residual for this buf. */ 12500 ASSERT(bp->b_resid == 0); 12501 12502 /* Convert the block number to an absolute address. */ 12503 xp->xb_blkno += partition_offset; 12504 12505 SD_NEXT_IOSTART(index, un, bp); 12506 12507 SD_TRACE(SD_LOG_IO_PARTITION, un, 12508 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12509 12510 return; 12511 12512 error_exit: 12513 bp->b_resid = bp->b_bcount; 12514 SD_BEGIN_IODONE(index, un, bp); 12515 SD_TRACE(SD_LOG_IO_PARTITION, un, 12516 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12517 } 12518 12519 12520 /* 12521 * Function: sd_mapblockaddr_iodone 12522 * 12523 * Description: Completion-side processing for partition management. 12524 * 12525 * Context: May be called under interrupt context 12526 */ 12527 12528 static void 12529 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12530 { 12531 /* int partition; */ /* Not used, see below. */ 12532 ASSERT(un != NULL); 12533 ASSERT(bp != NULL); 12534 ASSERT(!mutex_owned(SD_MUTEX(un))); 12535 12536 SD_TRACE(SD_LOG_IO_PARTITION, un, 12537 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12538 12539 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12540 /* 12541 * We have an "overrun" buf to deal with... 12542 */ 12543 struct sd_xbuf *xp; 12544 struct buf *obp; /* ptr to the original buf */ 12545 12546 xp = SD_GET_XBUF(bp); 12547 ASSERT(xp != NULL); 12548 12549 /* Retrieve the pointer to the original buf */ 12550 obp = (struct buf *)xp->xb_private; 12551 ASSERT(obp != NULL); 12552 12553 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12554 bioerror(obp, bp->b_error); 12555 12556 sd_bioclone_free(bp); 12557 12558 /* 12559 * Get back the original buf. 12560 * Note that since the restoration of xb_blkno below 12561 * was removed, the sd_xbuf is not needed. 12562 */ 12563 bp = obp; 12564 /* 12565 * xp = SD_GET_XBUF(bp); 12566 * ASSERT(xp != NULL); 12567 */ 12568 } 12569 12570 /* 12571 * Convert sd->xb_blkno back to a minor-device relative value. 12572 * Note: this has been commented out, as it is not needed in the 12573 * current implementation of the driver (ie, since this function 12574 * is at the top of the layering chains, so the info will be 12575 * discarded) and it is in the "hot" IO path. 12576 * 12577 * partition = getminor(bp->b_edev) & SDPART_MASK; 12578 * xp->xb_blkno -= un->un_offset[partition]; 12579 */ 12580 12581 SD_NEXT_IODONE(index, un, bp); 12582 12583 SD_TRACE(SD_LOG_IO_PARTITION, un, 12584 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12585 } 12586 12587 12588 /* 12589 * Function: sd_mapblocksize_iostart 12590 * 12591 * Description: Convert between system block size (un->un_sys_blocksize) 12592 * and target block size (un->un_tgt_blocksize). 12593 * 12594 * Context: Can sleep to allocate resources. 12595 * 12596 * Assumptions: A higher layer has already performed any partition validation, 12597 * and converted the xp->xb_blkno to an absolute value relative 12598 * to the start of the device. 12599 * 12600 * It is also assumed that the higher layer has implemented 12601 * an "overrun" mechanism for the case where the request would 12602 * read/write beyond the end of a partition. In this case we 12603 * assume (and ASSERT) that bp->b_resid == 0. 12604 * 12605 * Note: The implementation for this routine assumes the target 12606 * block size remains constant between allocation and transport. 12607 */ 12608 12609 static void 12610 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12611 { 12612 struct sd_mapblocksize_info *bsp; 12613 struct sd_xbuf *xp; 12614 offset_t first_byte; 12615 daddr_t start_block, end_block; 12616 daddr_t request_bytes; 12617 ushort_t is_aligned = FALSE; 12618 12619 ASSERT(un != NULL); 12620 ASSERT(bp != NULL); 12621 ASSERT(!mutex_owned(SD_MUTEX(un))); 12622 ASSERT(bp->b_resid == 0); 12623 12624 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12625 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12626 12627 /* 12628 * For a non-writable CD, a write request is an error 12629 */ 12630 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12631 (un->un_f_mmc_writable_media == FALSE)) { 12632 bioerror(bp, EIO); 12633 bp->b_resid = bp->b_bcount; 12634 SD_BEGIN_IODONE(index, un, bp); 12635 return; 12636 } 12637 12638 /* 12639 * We do not need a shadow buf if the device is using 12640 * un->un_sys_blocksize as its block size or if bcount == 0. 12641 * In this case there is no layer-private data block allocated. 12642 */ 12643 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12644 (bp->b_bcount == 0)) { 12645 goto done; 12646 } 12647 12648 #if defined(__i386) || defined(__amd64) 12649 /* We do not support non-block-aligned transfers for ROD devices */ 12650 ASSERT(!ISROD(un)); 12651 #endif 12652 12653 xp = SD_GET_XBUF(bp); 12654 ASSERT(xp != NULL); 12655 12656 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12657 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12658 un->un_tgt_blocksize, DEV_BSIZE); 12659 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12660 "request start block:0x%x\n", xp->xb_blkno); 12661 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12662 "request len:0x%x\n", bp->b_bcount); 12663 12664 /* 12665 * Allocate the layer-private data area for the mapblocksize layer. 12666 * Layers are allowed to use the xp_private member of the sd_xbuf 12667 * struct to store the pointer to their layer-private data block, but 12668 * each layer also has the responsibility of restoring the prior 12669 * contents of xb_private before returning the buf/xbuf to the 12670 * higher layer that sent it. 12671 * 12672 * Here we save the prior contents of xp->xb_private into the 12673 * bsp->mbs_oprivate field of our layer-private data area. This value 12674 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12675 * the layer-private area and returning the buf/xbuf to the layer 12676 * that sent it. 12677 * 12678 * Note that here we use kmem_zalloc for the allocation as there are 12679 * parts of the mapblocksize code that expect certain fields to be 12680 * zero unless explicitly set to a required value. 12681 */ 12682 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12683 bsp->mbs_oprivate = xp->xb_private; 12684 xp->xb_private = bsp; 12685 12686 /* 12687 * This treats the data on the disk (target) as an array of bytes. 12688 * first_byte is the byte offset, from the beginning of the device, 12689 * to the location of the request. This is converted from a 12690 * un->un_sys_blocksize block address to a byte offset, and then back 12691 * to a block address based upon a un->un_tgt_blocksize block size. 12692 * 12693 * xp->xb_blkno should be absolute upon entry into this function, 12694 * but, but it is based upon partitions that use the "system" 12695 * block size. It must be adjusted to reflect the block size of 12696 * the target. 12697 * 12698 * Note that end_block is actually the block that follows the last 12699 * block of the request, but that's what is needed for the computation. 12700 */ 12701 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12702 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12703 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12704 un->un_tgt_blocksize; 12705 12706 /* request_bytes is rounded up to a multiple of the target block size */ 12707 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12708 12709 /* 12710 * See if the starting address of the request and the request 12711 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12712 * then we do not need to allocate a shadow buf to handle the request. 12713 */ 12714 if (((first_byte % un->un_tgt_blocksize) == 0) && 12715 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12716 is_aligned = TRUE; 12717 } 12718 12719 if ((bp->b_flags & B_READ) == 0) { 12720 /* 12721 * Lock the range for a write operation. An aligned request is 12722 * considered a simple write; otherwise the request must be a 12723 * read-modify-write. 12724 */ 12725 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12726 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12727 } 12728 12729 /* 12730 * Alloc a shadow buf if the request is not aligned. Also, this is 12731 * where the READ command is generated for a read-modify-write. (The 12732 * write phase is deferred until after the read completes.) 12733 */ 12734 if (is_aligned == FALSE) { 12735 12736 struct sd_mapblocksize_info *shadow_bsp; 12737 struct sd_xbuf *shadow_xp; 12738 struct buf *shadow_bp; 12739 12740 /* 12741 * Allocate the shadow buf and it associated xbuf. Note that 12742 * after this call the xb_blkno value in both the original 12743 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12744 * same: absolute relative to the start of the device, and 12745 * adjusted for the target block size. The b_blkno in the 12746 * shadow buf will also be set to this value. We should never 12747 * change b_blkno in the original bp however. 12748 * 12749 * Note also that the shadow buf will always need to be a 12750 * READ command, regardless of whether the incoming command 12751 * is a READ or a WRITE. 12752 */ 12753 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12754 xp->xb_blkno, 12755 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12756 12757 shadow_xp = SD_GET_XBUF(shadow_bp); 12758 12759 /* 12760 * Allocate the layer-private data for the shadow buf. 12761 * (No need to preserve xb_private in the shadow xbuf.) 12762 */ 12763 shadow_xp->xb_private = shadow_bsp = 12764 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12765 12766 /* 12767 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12768 * to figure out where the start of the user data is (based upon 12769 * the system block size) in the data returned by the READ 12770 * command (which will be based upon the target blocksize). Note 12771 * that this is only really used if the request is unaligned. 12772 */ 12773 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12774 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12775 ASSERT((bsp->mbs_copy_offset >= 0) && 12776 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12777 12778 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12779 12780 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12781 12782 /* Transfer the wmap (if any) to the shadow buf */ 12783 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12784 bsp->mbs_wmp = NULL; 12785 12786 /* 12787 * The shadow buf goes on from here in place of the 12788 * original buf. 12789 */ 12790 shadow_bsp->mbs_orig_bp = bp; 12791 bp = shadow_bp; 12792 } 12793 12794 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12795 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12796 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12797 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12798 request_bytes); 12799 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12800 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12801 12802 done: 12803 SD_NEXT_IOSTART(index, un, bp); 12804 12805 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12806 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12807 } 12808 12809 12810 /* 12811 * Function: sd_mapblocksize_iodone 12812 * 12813 * Description: Completion side processing for block-size mapping. 12814 * 12815 * Context: May be called under interrupt context 12816 */ 12817 12818 static void 12819 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12820 { 12821 struct sd_mapblocksize_info *bsp; 12822 struct sd_xbuf *xp; 12823 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12824 struct buf *orig_bp; /* ptr to the original buf */ 12825 offset_t shadow_end; 12826 offset_t request_end; 12827 offset_t shadow_start; 12828 ssize_t copy_offset; 12829 size_t copy_length; 12830 size_t shortfall; 12831 uint_t is_write; /* TRUE if this bp is a WRITE */ 12832 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12833 12834 ASSERT(un != NULL); 12835 ASSERT(bp != NULL); 12836 12837 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12838 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12839 12840 /* 12841 * There is no shadow buf or layer-private data if the target is 12842 * using un->un_sys_blocksize as its block size or if bcount == 0. 12843 */ 12844 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12845 (bp->b_bcount == 0)) { 12846 goto exit; 12847 } 12848 12849 xp = SD_GET_XBUF(bp); 12850 ASSERT(xp != NULL); 12851 12852 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12853 bsp = xp->xb_private; 12854 12855 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12856 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12857 12858 if (is_write) { 12859 /* 12860 * For a WRITE request we must free up the block range that 12861 * we have locked up. This holds regardless of whether this is 12862 * an aligned write request or a read-modify-write request. 12863 */ 12864 sd_range_unlock(un, bsp->mbs_wmp); 12865 bsp->mbs_wmp = NULL; 12866 } 12867 12868 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12869 /* 12870 * An aligned read or write command will have no shadow buf; 12871 * there is not much else to do with it. 12872 */ 12873 goto done; 12874 } 12875 12876 orig_bp = bsp->mbs_orig_bp; 12877 ASSERT(orig_bp != NULL); 12878 orig_xp = SD_GET_XBUF(orig_bp); 12879 ASSERT(orig_xp != NULL); 12880 ASSERT(!mutex_owned(SD_MUTEX(un))); 12881 12882 if (!is_write && has_wmap) { 12883 /* 12884 * A READ with a wmap means this is the READ phase of a 12885 * read-modify-write. If an error occurred on the READ then 12886 * we do not proceed with the WRITE phase or copy any data. 12887 * Just release the write maps and return with an error. 12888 */ 12889 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12890 orig_bp->b_resid = orig_bp->b_bcount; 12891 bioerror(orig_bp, bp->b_error); 12892 sd_range_unlock(un, bsp->mbs_wmp); 12893 goto freebuf_done; 12894 } 12895 } 12896 12897 /* 12898 * Here is where we set up to copy the data from the shadow buf 12899 * into the space associated with the original buf. 12900 * 12901 * To deal with the conversion between block sizes, these 12902 * computations treat the data as an array of bytes, with the 12903 * first byte (byte 0) corresponding to the first byte in the 12904 * first block on the disk. 12905 */ 12906 12907 /* 12908 * shadow_start and shadow_len indicate the location and size of 12909 * the data returned with the shadow IO request. 12910 */ 12911 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12912 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12913 12914 /* 12915 * copy_offset gives the offset (in bytes) from the start of the first 12916 * block of the READ request to the beginning of the data. We retrieve 12917 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12918 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12919 * data to be copied (in bytes). 12920 */ 12921 copy_offset = bsp->mbs_copy_offset; 12922 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12923 copy_length = orig_bp->b_bcount; 12924 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12925 12926 /* 12927 * Set up the resid and error fields of orig_bp as appropriate. 12928 */ 12929 if (shadow_end >= request_end) { 12930 /* We got all the requested data; set resid to zero */ 12931 orig_bp->b_resid = 0; 12932 } else { 12933 /* 12934 * We failed to get enough data to fully satisfy the original 12935 * request. Just copy back whatever data we got and set 12936 * up the residual and error code as required. 12937 * 12938 * 'shortfall' is the amount by which the data received with the 12939 * shadow buf has "fallen short" of the requested amount. 12940 */ 12941 shortfall = (size_t)(request_end - shadow_end); 12942 12943 if (shortfall > orig_bp->b_bcount) { 12944 /* 12945 * We did not get enough data to even partially 12946 * fulfill the original request. The residual is 12947 * equal to the amount requested. 12948 */ 12949 orig_bp->b_resid = orig_bp->b_bcount; 12950 } else { 12951 /* 12952 * We did not get all the data that we requested 12953 * from the device, but we will try to return what 12954 * portion we did get. 12955 */ 12956 orig_bp->b_resid = shortfall; 12957 } 12958 ASSERT(copy_length >= orig_bp->b_resid); 12959 copy_length -= orig_bp->b_resid; 12960 } 12961 12962 /* Propagate the error code from the shadow buf to the original buf */ 12963 bioerror(orig_bp, bp->b_error); 12964 12965 if (is_write) { 12966 goto freebuf_done; /* No data copying for a WRITE */ 12967 } 12968 12969 if (has_wmap) { 12970 /* 12971 * This is a READ command from the READ phase of a 12972 * read-modify-write request. We have to copy the data given 12973 * by the user OVER the data returned by the READ command, 12974 * then convert the command from a READ to a WRITE and send 12975 * it back to the target. 12976 */ 12977 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12978 copy_length); 12979 12980 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12981 12982 /* 12983 * Dispatch the WRITE command to the taskq thread, which 12984 * will in turn send the command to the target. When the 12985 * WRITE command completes, we (sd_mapblocksize_iodone()) 12986 * will get called again as part of the iodone chain 12987 * processing for it. Note that we will still be dealing 12988 * with the shadow buf at that point. 12989 */ 12990 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12991 KM_NOSLEEP) != 0) { 12992 /* 12993 * Dispatch was successful so we are done. Return 12994 * without going any higher up the iodone chain. Do 12995 * not free up any layer-private data until after the 12996 * WRITE completes. 12997 */ 12998 return; 12999 } 13000 13001 /* 13002 * Dispatch of the WRITE command failed; set up the error 13003 * condition and send this IO back up the iodone chain. 13004 */ 13005 bioerror(orig_bp, EIO); 13006 orig_bp->b_resid = orig_bp->b_bcount; 13007 13008 } else { 13009 /* 13010 * This is a regular READ request (ie, not a RMW). Copy the 13011 * data from the shadow buf into the original buf. The 13012 * copy_offset compensates for any "misalignment" between the 13013 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13014 * original buf (with its un->un_sys_blocksize blocks). 13015 */ 13016 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13017 copy_length); 13018 } 13019 13020 freebuf_done: 13021 13022 /* 13023 * At this point we still have both the shadow buf AND the original 13024 * buf to deal with, as well as the layer-private data area in each. 13025 * Local variables are as follows: 13026 * 13027 * bp -- points to shadow buf 13028 * xp -- points to xbuf of shadow buf 13029 * bsp -- points to layer-private data area of shadow buf 13030 * orig_bp -- points to original buf 13031 * 13032 * First free the shadow buf and its associated xbuf, then free the 13033 * layer-private data area from the shadow buf. There is no need to 13034 * restore xb_private in the shadow xbuf. 13035 */ 13036 sd_shadow_buf_free(bp); 13037 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13038 13039 /* 13040 * Now update the local variables to point to the original buf, xbuf, 13041 * and layer-private area. 13042 */ 13043 bp = orig_bp; 13044 xp = SD_GET_XBUF(bp); 13045 ASSERT(xp != NULL); 13046 ASSERT(xp == orig_xp); 13047 bsp = xp->xb_private; 13048 ASSERT(bsp != NULL); 13049 13050 done: 13051 /* 13052 * Restore xb_private to whatever it was set to by the next higher 13053 * layer in the chain, then free the layer-private data area. 13054 */ 13055 xp->xb_private = bsp->mbs_oprivate; 13056 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13057 13058 exit: 13059 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13060 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13061 13062 SD_NEXT_IODONE(index, un, bp); 13063 } 13064 13065 13066 /* 13067 * Function: sd_checksum_iostart 13068 * 13069 * Description: A stub function for a layer that's currently not used. 13070 * For now just a placeholder. 13071 * 13072 * Context: Kernel thread context 13073 */ 13074 13075 static void 13076 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13077 { 13078 ASSERT(un != NULL); 13079 ASSERT(bp != NULL); 13080 ASSERT(!mutex_owned(SD_MUTEX(un))); 13081 SD_NEXT_IOSTART(index, un, bp); 13082 } 13083 13084 13085 /* 13086 * Function: sd_checksum_iodone 13087 * 13088 * Description: A stub function for a layer that's currently not used. 13089 * For now just a placeholder. 13090 * 13091 * Context: May be called under interrupt context 13092 */ 13093 13094 static void 13095 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13096 { 13097 ASSERT(un != NULL); 13098 ASSERT(bp != NULL); 13099 ASSERT(!mutex_owned(SD_MUTEX(un))); 13100 SD_NEXT_IODONE(index, un, bp); 13101 } 13102 13103 13104 /* 13105 * Function: sd_checksum_uscsi_iostart 13106 * 13107 * Description: A stub function for a layer that's currently not used. 13108 * For now just a placeholder. 13109 * 13110 * Context: Kernel thread context 13111 */ 13112 13113 static void 13114 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13115 { 13116 ASSERT(un != NULL); 13117 ASSERT(bp != NULL); 13118 ASSERT(!mutex_owned(SD_MUTEX(un))); 13119 SD_NEXT_IOSTART(index, un, bp); 13120 } 13121 13122 13123 /* 13124 * Function: sd_checksum_uscsi_iodone 13125 * 13126 * Description: A stub function for a layer that's currently not used. 13127 * For now just a placeholder. 13128 * 13129 * Context: May be called under interrupt context 13130 */ 13131 13132 static void 13133 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13134 { 13135 ASSERT(un != NULL); 13136 ASSERT(bp != NULL); 13137 ASSERT(!mutex_owned(SD_MUTEX(un))); 13138 SD_NEXT_IODONE(index, un, bp); 13139 } 13140 13141 13142 /* 13143 * Function: sd_pm_iostart 13144 * 13145 * Description: iostart-side routine for Power mangement. 13146 * 13147 * Context: Kernel thread context 13148 */ 13149 13150 static void 13151 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13152 { 13153 ASSERT(un != NULL); 13154 ASSERT(bp != NULL); 13155 ASSERT(!mutex_owned(SD_MUTEX(un))); 13156 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13157 13158 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13159 13160 if (sd_pm_entry(un) != DDI_SUCCESS) { 13161 /* 13162 * Set up to return the failed buf back up the 'iodone' 13163 * side of the calling chain. 13164 */ 13165 bioerror(bp, EIO); 13166 bp->b_resid = bp->b_bcount; 13167 13168 SD_BEGIN_IODONE(index, un, bp); 13169 13170 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13171 return; 13172 } 13173 13174 SD_NEXT_IOSTART(index, un, bp); 13175 13176 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13177 } 13178 13179 13180 /* 13181 * Function: sd_pm_iodone 13182 * 13183 * Description: iodone-side routine for power mangement. 13184 * 13185 * Context: may be called from interrupt context 13186 */ 13187 13188 static void 13189 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13190 { 13191 ASSERT(un != NULL); 13192 ASSERT(bp != NULL); 13193 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13194 13195 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13196 13197 /* 13198 * After attach the following flag is only read, so don't 13199 * take the penalty of acquiring a mutex for it. 13200 */ 13201 if (un->un_f_pm_is_enabled == TRUE) { 13202 sd_pm_exit(un); 13203 } 13204 13205 SD_NEXT_IODONE(index, un, bp); 13206 13207 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13208 } 13209 13210 13211 /* 13212 * Function: sd_core_iostart 13213 * 13214 * Description: Primary driver function for enqueuing buf(9S) structs from 13215 * the system and initiating IO to the target device 13216 * 13217 * Context: Kernel thread context. Can sleep. 13218 * 13219 * Assumptions: - The given xp->xb_blkno is absolute 13220 * (ie, relative to the start of the device). 13221 * - The IO is to be done using the native blocksize of 13222 * the device, as specified in un->un_tgt_blocksize. 13223 */ 13224 /* ARGSUSED */ 13225 static void 13226 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13227 { 13228 struct sd_xbuf *xp; 13229 13230 ASSERT(un != NULL); 13231 ASSERT(bp != NULL); 13232 ASSERT(!mutex_owned(SD_MUTEX(un))); 13233 ASSERT(bp->b_resid == 0); 13234 13235 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13236 13237 xp = SD_GET_XBUF(bp); 13238 ASSERT(xp != NULL); 13239 13240 mutex_enter(SD_MUTEX(un)); 13241 13242 /* 13243 * If we are currently in the failfast state, fail any new IO 13244 * that has B_FAILFAST set, then return. 13245 */ 13246 if ((bp->b_flags & B_FAILFAST) && 13247 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13248 mutex_exit(SD_MUTEX(un)); 13249 bioerror(bp, EIO); 13250 bp->b_resid = bp->b_bcount; 13251 SD_BEGIN_IODONE(index, un, bp); 13252 return; 13253 } 13254 13255 if (SD_IS_DIRECT_PRIORITY(xp)) { 13256 /* 13257 * Priority command -- transport it immediately. 13258 * 13259 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13260 * because all direct priority commands should be associated 13261 * with error recovery actions which we don't want to retry. 13262 */ 13263 sd_start_cmds(un, bp); 13264 } else { 13265 /* 13266 * Normal command -- add it to the wait queue, then start 13267 * transporting commands from the wait queue. 13268 */ 13269 sd_add_buf_to_waitq(un, bp); 13270 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13271 sd_start_cmds(un, NULL); 13272 } 13273 13274 mutex_exit(SD_MUTEX(un)); 13275 13276 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13277 } 13278 13279 13280 /* 13281 * Function: sd_init_cdb_limits 13282 * 13283 * Description: This is to handle scsi_pkt initialization differences 13284 * between the driver platforms. 13285 * 13286 * Legacy behaviors: 13287 * 13288 * If the block number or the sector count exceeds the 13289 * capabilities of a Group 0 command, shift over to a 13290 * Group 1 command. We don't blindly use Group 1 13291 * commands because a) some drives (CDC Wren IVs) get a 13292 * bit confused, and b) there is probably a fair amount 13293 * of speed difference for a target to receive and decode 13294 * a 10 byte command instead of a 6 byte command. 13295 * 13296 * The xfer time difference of 6 vs 10 byte CDBs is 13297 * still significant so this code is still worthwhile. 13298 * 10 byte CDBs are very inefficient with the fas HBA driver 13299 * and older disks. Each CDB byte took 1 usec with some 13300 * popular disks. 13301 * 13302 * Context: Must be called at attach time 13303 */ 13304 13305 static void 13306 sd_init_cdb_limits(struct sd_lun *un) 13307 { 13308 int hba_cdb_limit; 13309 13310 /* 13311 * Use CDB_GROUP1 commands for most devices except for 13312 * parallel SCSI fixed drives in which case we get better 13313 * performance using CDB_GROUP0 commands (where applicable). 13314 */ 13315 un->un_mincdb = SD_CDB_GROUP1; 13316 #if !defined(__fibre) 13317 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13318 !un->un_f_has_removable_media) { 13319 un->un_mincdb = SD_CDB_GROUP0; 13320 } 13321 #endif 13322 13323 /* 13324 * Try to read the max-cdb-length supported by HBA. 13325 */ 13326 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13327 if (0 >= un->un_max_hba_cdb) { 13328 un->un_max_hba_cdb = CDB_GROUP4; 13329 hba_cdb_limit = SD_CDB_GROUP4; 13330 } else if (0 < un->un_max_hba_cdb && 13331 un->un_max_hba_cdb < CDB_GROUP1) { 13332 hba_cdb_limit = SD_CDB_GROUP0; 13333 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13334 un->un_max_hba_cdb < CDB_GROUP5) { 13335 hba_cdb_limit = SD_CDB_GROUP1; 13336 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13337 un->un_max_hba_cdb < CDB_GROUP4) { 13338 hba_cdb_limit = SD_CDB_GROUP5; 13339 } else { 13340 hba_cdb_limit = SD_CDB_GROUP4; 13341 } 13342 13343 /* 13344 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13345 * commands for fixed disks unless we are building for a 32 bit 13346 * kernel. 13347 */ 13348 #ifdef _LP64 13349 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13350 min(hba_cdb_limit, SD_CDB_GROUP4); 13351 #else 13352 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13353 min(hba_cdb_limit, SD_CDB_GROUP1); 13354 #endif 13355 13356 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13357 ? sizeof (struct scsi_arq_status) : 1); 13358 un->un_cmd_timeout = (ushort_t)sd_io_time; 13359 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13360 } 13361 13362 13363 /* 13364 * Function: sd_initpkt_for_buf 13365 * 13366 * Description: Allocate and initialize for transport a scsi_pkt struct, 13367 * based upon the info specified in the given buf struct. 13368 * 13369 * Assumes the xb_blkno in the request is absolute (ie, 13370 * relative to the start of the device (NOT partition!). 13371 * Also assumes that the request is using the native block 13372 * size of the device (as returned by the READ CAPACITY 13373 * command). 13374 * 13375 * Return Code: SD_PKT_ALLOC_SUCCESS 13376 * SD_PKT_ALLOC_FAILURE 13377 * SD_PKT_ALLOC_FAILURE_NO_DMA 13378 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13379 * 13380 * Context: Kernel thread and may be called from software interrupt context 13381 * as part of a sdrunout callback. This function may not block or 13382 * call routines that block 13383 */ 13384 13385 static int 13386 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13387 { 13388 struct sd_xbuf *xp; 13389 struct scsi_pkt *pktp = NULL; 13390 struct sd_lun *un; 13391 size_t blockcount; 13392 daddr_t startblock; 13393 int rval; 13394 int cmd_flags; 13395 13396 ASSERT(bp != NULL); 13397 ASSERT(pktpp != NULL); 13398 xp = SD_GET_XBUF(bp); 13399 ASSERT(xp != NULL); 13400 un = SD_GET_UN(bp); 13401 ASSERT(un != NULL); 13402 ASSERT(mutex_owned(SD_MUTEX(un))); 13403 ASSERT(bp->b_resid == 0); 13404 13405 SD_TRACE(SD_LOG_IO_CORE, un, 13406 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13407 13408 mutex_exit(SD_MUTEX(un)); 13409 13410 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13411 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13412 /* 13413 * Already have a scsi_pkt -- just need DMA resources. 13414 * We must recompute the CDB in case the mapping returns 13415 * a nonzero pkt_resid. 13416 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13417 * that is being retried, the unmap/remap of the DMA resouces 13418 * will result in the entire transfer starting over again 13419 * from the very first block. 13420 */ 13421 ASSERT(xp->xb_pktp != NULL); 13422 pktp = xp->xb_pktp; 13423 } else { 13424 pktp = NULL; 13425 } 13426 #endif /* __i386 || __amd64 */ 13427 13428 startblock = xp->xb_blkno; /* Absolute block num. */ 13429 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13430 13431 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13432 13433 /* 13434 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13435 * call scsi_init_pkt, and build the CDB. 13436 */ 13437 rval = sd_setup_rw_pkt(un, &pktp, bp, 13438 cmd_flags, sdrunout, (caddr_t)un, 13439 startblock, blockcount); 13440 13441 if (rval == 0) { 13442 /* 13443 * Success. 13444 * 13445 * If partial DMA is being used and required for this transfer. 13446 * set it up here. 13447 */ 13448 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13449 (pktp->pkt_resid != 0)) { 13450 13451 /* 13452 * Save the CDB length and pkt_resid for the 13453 * next xfer 13454 */ 13455 xp->xb_dma_resid = pktp->pkt_resid; 13456 13457 /* rezero resid */ 13458 pktp->pkt_resid = 0; 13459 13460 } else { 13461 xp->xb_dma_resid = 0; 13462 } 13463 13464 pktp->pkt_flags = un->un_tagflags; 13465 pktp->pkt_time = un->un_cmd_timeout; 13466 pktp->pkt_comp = sdintr; 13467 13468 pktp->pkt_private = bp; 13469 *pktpp = pktp; 13470 13471 SD_TRACE(SD_LOG_IO_CORE, un, 13472 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13473 13474 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13475 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13476 #endif 13477 13478 mutex_enter(SD_MUTEX(un)); 13479 return (SD_PKT_ALLOC_SUCCESS); 13480 13481 } 13482 13483 /* 13484 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13485 * from sd_setup_rw_pkt. 13486 */ 13487 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13488 13489 if (rval == SD_PKT_ALLOC_FAILURE) { 13490 *pktpp = NULL; 13491 /* 13492 * Set the driver state to RWAIT to indicate the driver 13493 * is waiting on resource allocations. The driver will not 13494 * suspend, pm_suspend, or detatch while the state is RWAIT. 13495 */ 13496 mutex_enter(SD_MUTEX(un)); 13497 New_state(un, SD_STATE_RWAIT); 13498 13499 SD_ERROR(SD_LOG_IO_CORE, un, 13500 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13501 13502 if ((bp->b_flags & B_ERROR) != 0) { 13503 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13504 } 13505 return (SD_PKT_ALLOC_FAILURE); 13506 } else { 13507 /* 13508 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13509 * 13510 * This should never happen. Maybe someone messed with the 13511 * kernel's minphys? 13512 */ 13513 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13514 "Request rejected: too large for CDB: " 13515 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13516 SD_ERROR(SD_LOG_IO_CORE, un, 13517 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13518 mutex_enter(SD_MUTEX(un)); 13519 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13520 13521 } 13522 } 13523 13524 13525 /* 13526 * Function: sd_destroypkt_for_buf 13527 * 13528 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13529 * 13530 * Context: Kernel thread or interrupt context 13531 */ 13532 13533 static void 13534 sd_destroypkt_for_buf(struct buf *bp) 13535 { 13536 ASSERT(bp != NULL); 13537 ASSERT(SD_GET_UN(bp) != NULL); 13538 13539 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13540 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13541 13542 ASSERT(SD_GET_PKTP(bp) != NULL); 13543 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13544 13545 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13546 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13547 } 13548 13549 /* 13550 * Function: sd_setup_rw_pkt 13551 * 13552 * Description: Determines appropriate CDB group for the requested LBA 13553 * and transfer length, calls scsi_init_pkt, and builds 13554 * the CDB. Do not use for partial DMA transfers except 13555 * for the initial transfer since the CDB size must 13556 * remain constant. 13557 * 13558 * Context: Kernel thread and may be called from software interrupt 13559 * context as part of a sdrunout callback. This function may not 13560 * block or call routines that block 13561 */ 13562 13563 13564 int 13565 sd_setup_rw_pkt(struct sd_lun *un, 13566 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13567 int (*callback)(caddr_t), caddr_t callback_arg, 13568 diskaddr_t lba, uint32_t blockcount) 13569 { 13570 struct scsi_pkt *return_pktp; 13571 union scsi_cdb *cdbp; 13572 struct sd_cdbinfo *cp = NULL; 13573 int i; 13574 13575 /* 13576 * See which size CDB to use, based upon the request. 13577 */ 13578 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13579 13580 /* 13581 * Check lba and block count against sd_cdbtab limits. 13582 * In the partial DMA case, we have to use the same size 13583 * CDB for all the transfers. Check lba + blockcount 13584 * against the max LBA so we know that segment of the 13585 * transfer can use the CDB we select. 13586 */ 13587 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13588 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13589 13590 /* 13591 * The command will fit into the CDB type 13592 * specified by sd_cdbtab[i]. 13593 */ 13594 cp = sd_cdbtab + i; 13595 13596 /* 13597 * Call scsi_init_pkt so we can fill in the 13598 * CDB. 13599 */ 13600 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13601 bp, cp->sc_grpcode, un->un_status_len, 0, 13602 flags, callback, callback_arg); 13603 13604 if (return_pktp != NULL) { 13605 13606 /* 13607 * Return new value of pkt 13608 */ 13609 *pktpp = return_pktp; 13610 13611 /* 13612 * To be safe, zero the CDB insuring there is 13613 * no leftover data from a previous command. 13614 */ 13615 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13616 13617 /* 13618 * Handle partial DMA mapping 13619 */ 13620 if (return_pktp->pkt_resid != 0) { 13621 13622 /* 13623 * Not going to xfer as many blocks as 13624 * originally expected 13625 */ 13626 blockcount -= 13627 SD_BYTES2TGTBLOCKS(un, 13628 return_pktp->pkt_resid); 13629 } 13630 13631 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13632 13633 /* 13634 * Set command byte based on the CDB 13635 * type we matched. 13636 */ 13637 cdbp->scc_cmd = cp->sc_grpmask | 13638 ((bp->b_flags & B_READ) ? 13639 SCMD_READ : SCMD_WRITE); 13640 13641 SD_FILL_SCSI1_LUN(un, return_pktp); 13642 13643 /* 13644 * Fill in LBA and length 13645 */ 13646 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13647 (cp->sc_grpcode == CDB_GROUP4) || 13648 (cp->sc_grpcode == CDB_GROUP0) || 13649 (cp->sc_grpcode == CDB_GROUP5)); 13650 13651 if (cp->sc_grpcode == CDB_GROUP1) { 13652 FORMG1ADDR(cdbp, lba); 13653 FORMG1COUNT(cdbp, blockcount); 13654 return (0); 13655 } else if (cp->sc_grpcode == CDB_GROUP4) { 13656 FORMG4LONGADDR(cdbp, lba); 13657 FORMG4COUNT(cdbp, blockcount); 13658 return (0); 13659 } else if (cp->sc_grpcode == CDB_GROUP0) { 13660 FORMG0ADDR(cdbp, lba); 13661 FORMG0COUNT(cdbp, blockcount); 13662 return (0); 13663 } else if (cp->sc_grpcode == CDB_GROUP5) { 13664 FORMG5ADDR(cdbp, lba); 13665 FORMG5COUNT(cdbp, blockcount); 13666 return (0); 13667 } 13668 13669 /* 13670 * It should be impossible to not match one 13671 * of the CDB types above, so we should never 13672 * reach this point. Set the CDB command byte 13673 * to test-unit-ready to avoid writing 13674 * to somewhere we don't intend. 13675 */ 13676 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13677 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13678 } else { 13679 /* 13680 * Couldn't get scsi_pkt 13681 */ 13682 return (SD_PKT_ALLOC_FAILURE); 13683 } 13684 } 13685 } 13686 13687 /* 13688 * None of the available CDB types were suitable. This really 13689 * should never happen: on a 64 bit system we support 13690 * READ16/WRITE16 which will hold an entire 64 bit disk address 13691 * and on a 32 bit system we will refuse to bind to a device 13692 * larger than 2TB so addresses will never be larger than 32 bits. 13693 */ 13694 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13695 } 13696 13697 /* 13698 * Function: sd_setup_next_rw_pkt 13699 * 13700 * Description: Setup packet for partial DMA transfers, except for the 13701 * initial transfer. sd_setup_rw_pkt should be used for 13702 * the initial transfer. 13703 * 13704 * Context: Kernel thread and may be called from interrupt context. 13705 */ 13706 13707 int 13708 sd_setup_next_rw_pkt(struct sd_lun *un, 13709 struct scsi_pkt *pktp, struct buf *bp, 13710 diskaddr_t lba, uint32_t blockcount) 13711 { 13712 uchar_t com; 13713 union scsi_cdb *cdbp; 13714 uchar_t cdb_group_id; 13715 13716 ASSERT(pktp != NULL); 13717 ASSERT(pktp->pkt_cdbp != NULL); 13718 13719 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13720 com = cdbp->scc_cmd; 13721 cdb_group_id = CDB_GROUPID(com); 13722 13723 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13724 (cdb_group_id == CDB_GROUPID_1) || 13725 (cdb_group_id == CDB_GROUPID_4) || 13726 (cdb_group_id == CDB_GROUPID_5)); 13727 13728 /* 13729 * Move pkt to the next portion of the xfer. 13730 * func is NULL_FUNC so we do not have to release 13731 * the disk mutex here. 13732 */ 13733 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13734 NULL_FUNC, NULL) == pktp) { 13735 /* Success. Handle partial DMA */ 13736 if (pktp->pkt_resid != 0) { 13737 blockcount -= 13738 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13739 } 13740 13741 cdbp->scc_cmd = com; 13742 SD_FILL_SCSI1_LUN(un, pktp); 13743 if (cdb_group_id == CDB_GROUPID_1) { 13744 FORMG1ADDR(cdbp, lba); 13745 FORMG1COUNT(cdbp, blockcount); 13746 return (0); 13747 } else if (cdb_group_id == CDB_GROUPID_4) { 13748 FORMG4LONGADDR(cdbp, lba); 13749 FORMG4COUNT(cdbp, blockcount); 13750 return (0); 13751 } else if (cdb_group_id == CDB_GROUPID_0) { 13752 FORMG0ADDR(cdbp, lba); 13753 FORMG0COUNT(cdbp, blockcount); 13754 return (0); 13755 } else if (cdb_group_id == CDB_GROUPID_5) { 13756 FORMG5ADDR(cdbp, lba); 13757 FORMG5COUNT(cdbp, blockcount); 13758 return (0); 13759 } 13760 13761 /* Unreachable */ 13762 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13763 } 13764 13765 /* 13766 * Error setting up next portion of cmd transfer. 13767 * Something is definitely very wrong and this 13768 * should not happen. 13769 */ 13770 return (SD_PKT_ALLOC_FAILURE); 13771 } 13772 13773 /* 13774 * Function: sd_initpkt_for_uscsi 13775 * 13776 * Description: Allocate and initialize for transport a scsi_pkt struct, 13777 * based upon the info specified in the given uscsi_cmd struct. 13778 * 13779 * Return Code: SD_PKT_ALLOC_SUCCESS 13780 * SD_PKT_ALLOC_FAILURE 13781 * SD_PKT_ALLOC_FAILURE_NO_DMA 13782 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13783 * 13784 * Context: Kernel thread and may be called from software interrupt context 13785 * as part of a sdrunout callback. This function may not block or 13786 * call routines that block 13787 */ 13788 13789 static int 13790 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13791 { 13792 struct uscsi_cmd *uscmd; 13793 struct sd_xbuf *xp; 13794 struct scsi_pkt *pktp; 13795 struct sd_lun *un; 13796 uint32_t flags = 0; 13797 13798 ASSERT(bp != NULL); 13799 ASSERT(pktpp != NULL); 13800 xp = SD_GET_XBUF(bp); 13801 ASSERT(xp != NULL); 13802 un = SD_GET_UN(bp); 13803 ASSERT(un != NULL); 13804 ASSERT(mutex_owned(SD_MUTEX(un))); 13805 13806 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13807 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13808 ASSERT(uscmd != NULL); 13809 13810 SD_TRACE(SD_LOG_IO_CORE, un, 13811 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13812 13813 /* 13814 * Allocate the scsi_pkt for the command. 13815 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13816 * during scsi_init_pkt time and will continue to use the 13817 * same path as long as the same scsi_pkt is used without 13818 * intervening scsi_dma_free(). Since uscsi command does 13819 * not call scsi_dmafree() before retry failed command, it 13820 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13821 * set such that scsi_vhci can use other available path for 13822 * retry. Besides, ucsci command does not allow DMA breakup, 13823 * so there is no need to set PKT_DMA_PARTIAL flag. 13824 */ 13825 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13826 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13827 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13828 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13829 - sizeof (struct scsi_extended_sense)), 0, 13830 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13831 sdrunout, (caddr_t)un); 13832 } else { 13833 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13834 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13835 sizeof (struct scsi_arq_status), 0, 13836 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13837 sdrunout, (caddr_t)un); 13838 } 13839 13840 if (pktp == NULL) { 13841 *pktpp = NULL; 13842 /* 13843 * Set the driver state to RWAIT to indicate the driver 13844 * is waiting on resource allocations. The driver will not 13845 * suspend, pm_suspend, or detatch while the state is RWAIT. 13846 */ 13847 New_state(un, SD_STATE_RWAIT); 13848 13849 SD_ERROR(SD_LOG_IO_CORE, un, 13850 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13851 13852 if ((bp->b_flags & B_ERROR) != 0) { 13853 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13854 } 13855 return (SD_PKT_ALLOC_FAILURE); 13856 } 13857 13858 /* 13859 * We do not do DMA breakup for USCSI commands, so return failure 13860 * here if all the needed DMA resources were not allocated. 13861 */ 13862 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13863 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13864 scsi_destroy_pkt(pktp); 13865 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13866 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13867 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13868 } 13869 13870 /* Init the cdb from the given uscsi struct */ 13871 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13872 uscmd->uscsi_cdb[0], 0, 0, 0); 13873 13874 SD_FILL_SCSI1_LUN(un, pktp); 13875 13876 /* 13877 * Set up the optional USCSI flags. See the uscsi (7I) man page 13878 * for listing of the supported flags. 13879 */ 13880 13881 if (uscmd->uscsi_flags & USCSI_SILENT) { 13882 flags |= FLAG_SILENT; 13883 } 13884 13885 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13886 flags |= FLAG_DIAGNOSE; 13887 } 13888 13889 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13890 flags |= FLAG_ISOLATE; 13891 } 13892 13893 if (un->un_f_is_fibre == FALSE) { 13894 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13895 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13896 } 13897 } 13898 13899 /* 13900 * Set the pkt flags here so we save time later. 13901 * Note: These flags are NOT in the uscsi man page!!! 13902 */ 13903 if (uscmd->uscsi_flags & USCSI_HEAD) { 13904 flags |= FLAG_HEAD; 13905 } 13906 13907 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13908 flags |= FLAG_NOINTR; 13909 } 13910 13911 /* 13912 * For tagged queueing, things get a bit complicated. 13913 * Check first for head of queue and last for ordered queue. 13914 * If neither head nor order, use the default driver tag flags. 13915 */ 13916 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13917 if (uscmd->uscsi_flags & USCSI_HTAG) { 13918 flags |= FLAG_HTAG; 13919 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13920 flags |= FLAG_OTAG; 13921 } else { 13922 flags |= un->un_tagflags & FLAG_TAGMASK; 13923 } 13924 } 13925 13926 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13927 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13928 } 13929 13930 pktp->pkt_flags = flags; 13931 13932 /* Transfer uscsi information to scsi_pkt */ 13933 (void) scsi_uscsi_pktinit(uscmd, pktp); 13934 13935 /* Copy the caller's CDB into the pkt... */ 13936 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13937 13938 if (uscmd->uscsi_timeout == 0) { 13939 pktp->pkt_time = un->un_uscsi_timeout; 13940 } else { 13941 pktp->pkt_time = uscmd->uscsi_timeout; 13942 } 13943 13944 /* need it later to identify USCSI request in sdintr */ 13945 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13946 13947 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13948 13949 pktp->pkt_private = bp; 13950 pktp->pkt_comp = sdintr; 13951 *pktpp = pktp; 13952 13953 SD_TRACE(SD_LOG_IO_CORE, un, 13954 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13955 13956 return (SD_PKT_ALLOC_SUCCESS); 13957 } 13958 13959 13960 /* 13961 * Function: sd_destroypkt_for_uscsi 13962 * 13963 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13964 * IOs.. Also saves relevant info into the associated uscsi_cmd 13965 * struct. 13966 * 13967 * Context: May be called under interrupt context 13968 */ 13969 13970 static void 13971 sd_destroypkt_for_uscsi(struct buf *bp) 13972 { 13973 struct uscsi_cmd *uscmd; 13974 struct sd_xbuf *xp; 13975 struct scsi_pkt *pktp; 13976 struct sd_lun *un; 13977 struct sd_uscsi_info *suip; 13978 13979 ASSERT(bp != NULL); 13980 xp = SD_GET_XBUF(bp); 13981 ASSERT(xp != NULL); 13982 un = SD_GET_UN(bp); 13983 ASSERT(un != NULL); 13984 ASSERT(!mutex_owned(SD_MUTEX(un))); 13985 pktp = SD_GET_PKTP(bp); 13986 ASSERT(pktp != NULL); 13987 13988 SD_TRACE(SD_LOG_IO_CORE, un, 13989 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13990 13991 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13992 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13993 ASSERT(uscmd != NULL); 13994 13995 /* Save the status and the residual into the uscsi_cmd struct */ 13996 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13997 uscmd->uscsi_resid = bp->b_resid; 13998 13999 /* Transfer scsi_pkt information to uscsi */ 14000 (void) scsi_uscsi_pktfini(pktp, uscmd); 14001 14002 /* 14003 * If enabled, copy any saved sense data into the area specified 14004 * by the uscsi command. 14005 */ 14006 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14007 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14008 /* 14009 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14010 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14011 */ 14012 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14013 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14014 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14015 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14016 MAX_SENSE_LENGTH); 14017 } else { 14018 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14019 SENSE_LENGTH); 14020 } 14021 } 14022 /* 14023 * The following assignments are for SCSI FMA. 14024 */ 14025 ASSERT(xp->xb_private != NULL); 14026 suip = (struct sd_uscsi_info *)xp->xb_private; 14027 suip->ui_pkt_reason = pktp->pkt_reason; 14028 suip->ui_pkt_state = pktp->pkt_state; 14029 suip->ui_pkt_statistics = pktp->pkt_statistics; 14030 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14031 14032 /* We are done with the scsi_pkt; free it now */ 14033 ASSERT(SD_GET_PKTP(bp) != NULL); 14034 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14035 14036 SD_TRACE(SD_LOG_IO_CORE, un, 14037 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14038 } 14039 14040 14041 /* 14042 * Function: sd_bioclone_alloc 14043 * 14044 * Description: Allocate a buf(9S) and init it as per the given buf 14045 * and the various arguments. The associated sd_xbuf 14046 * struct is (nearly) duplicated. The struct buf *bp 14047 * argument is saved in new_xp->xb_private. 14048 * 14049 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14050 * datalen - size of data area for the shadow bp 14051 * blkno - starting LBA 14052 * func - function pointer for b_iodone in the shadow buf. (May 14053 * be NULL if none.) 14054 * 14055 * Return Code: Pointer to allocates buf(9S) struct 14056 * 14057 * Context: Can sleep. 14058 */ 14059 14060 static struct buf * 14061 sd_bioclone_alloc(struct buf *bp, size_t datalen, 14062 daddr_t blkno, int (*func)(struct buf *)) 14063 { 14064 struct sd_lun *un; 14065 struct sd_xbuf *xp; 14066 struct sd_xbuf *new_xp; 14067 struct buf *new_bp; 14068 14069 ASSERT(bp != NULL); 14070 xp = SD_GET_XBUF(bp); 14071 ASSERT(xp != NULL); 14072 un = SD_GET_UN(bp); 14073 ASSERT(un != NULL); 14074 ASSERT(!mutex_owned(SD_MUTEX(un))); 14075 14076 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14077 NULL, KM_SLEEP); 14078 14079 new_bp->b_lblkno = blkno; 14080 14081 /* 14082 * Allocate an xbuf for the shadow bp and copy the contents of the 14083 * original xbuf into it. 14084 */ 14085 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14086 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14087 14088 /* 14089 * The given bp is automatically saved in the xb_private member 14090 * of the new xbuf. Callers are allowed to depend on this. 14091 */ 14092 new_xp->xb_private = bp; 14093 14094 new_bp->b_private = new_xp; 14095 14096 return (new_bp); 14097 } 14098 14099 /* 14100 * Function: sd_shadow_buf_alloc 14101 * 14102 * Description: Allocate a buf(9S) and init it as per the given buf 14103 * and the various arguments. The associated sd_xbuf 14104 * struct is (nearly) duplicated. The struct buf *bp 14105 * argument is saved in new_xp->xb_private. 14106 * 14107 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14108 * datalen - size of data area for the shadow bp 14109 * bflags - B_READ or B_WRITE (pseudo flag) 14110 * blkno - starting LBA 14111 * func - function pointer for b_iodone in the shadow buf. (May 14112 * be NULL if none.) 14113 * 14114 * Return Code: Pointer to allocates buf(9S) struct 14115 * 14116 * Context: Can sleep. 14117 */ 14118 14119 static struct buf * 14120 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14121 daddr_t blkno, int (*func)(struct buf *)) 14122 { 14123 struct sd_lun *un; 14124 struct sd_xbuf *xp; 14125 struct sd_xbuf *new_xp; 14126 struct buf *new_bp; 14127 14128 ASSERT(bp != NULL); 14129 xp = SD_GET_XBUF(bp); 14130 ASSERT(xp != NULL); 14131 un = SD_GET_UN(bp); 14132 ASSERT(un != NULL); 14133 ASSERT(!mutex_owned(SD_MUTEX(un))); 14134 14135 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14136 bp_mapin(bp); 14137 } 14138 14139 bflags &= (B_READ | B_WRITE); 14140 #if defined(__i386) || defined(__amd64) 14141 new_bp = getrbuf(KM_SLEEP); 14142 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14143 new_bp->b_bcount = datalen; 14144 new_bp->b_flags = bflags | 14145 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14146 #else 14147 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14148 datalen, bflags, SLEEP_FUNC, NULL); 14149 #endif 14150 new_bp->av_forw = NULL; 14151 new_bp->av_back = NULL; 14152 new_bp->b_dev = bp->b_dev; 14153 new_bp->b_blkno = blkno; 14154 new_bp->b_iodone = func; 14155 new_bp->b_edev = bp->b_edev; 14156 new_bp->b_resid = 0; 14157 14158 /* We need to preserve the B_FAILFAST flag */ 14159 if (bp->b_flags & B_FAILFAST) { 14160 new_bp->b_flags |= B_FAILFAST; 14161 } 14162 14163 /* 14164 * Allocate an xbuf for the shadow bp and copy the contents of the 14165 * original xbuf into it. 14166 */ 14167 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14168 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14169 14170 /* Need later to copy data between the shadow buf & original buf! */ 14171 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14172 14173 /* 14174 * The given bp is automatically saved in the xb_private member 14175 * of the new xbuf. Callers are allowed to depend on this. 14176 */ 14177 new_xp->xb_private = bp; 14178 14179 new_bp->b_private = new_xp; 14180 14181 return (new_bp); 14182 } 14183 14184 /* 14185 * Function: sd_bioclone_free 14186 * 14187 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14188 * in the larger than partition operation. 14189 * 14190 * Context: May be called under interrupt context 14191 */ 14192 14193 static void 14194 sd_bioclone_free(struct buf *bp) 14195 { 14196 struct sd_xbuf *xp; 14197 14198 ASSERT(bp != NULL); 14199 xp = SD_GET_XBUF(bp); 14200 ASSERT(xp != NULL); 14201 14202 /* 14203 * Call bp_mapout() before freeing the buf, in case a lower 14204 * layer or HBA had done a bp_mapin(). we must do this here 14205 * as we are the "originator" of the shadow buf. 14206 */ 14207 bp_mapout(bp); 14208 14209 /* 14210 * Null out b_iodone before freeing the bp, to ensure that the driver 14211 * never gets confused by a stale value in this field. (Just a little 14212 * extra defensiveness here.) 14213 */ 14214 bp->b_iodone = NULL; 14215 14216 freerbuf(bp); 14217 14218 kmem_free(xp, sizeof (struct sd_xbuf)); 14219 } 14220 14221 /* 14222 * Function: sd_shadow_buf_free 14223 * 14224 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14225 * 14226 * Context: May be called under interrupt context 14227 */ 14228 14229 static void 14230 sd_shadow_buf_free(struct buf *bp) 14231 { 14232 struct sd_xbuf *xp; 14233 14234 ASSERT(bp != NULL); 14235 xp = SD_GET_XBUF(bp); 14236 ASSERT(xp != NULL); 14237 14238 #if defined(__sparc) 14239 /* 14240 * Call bp_mapout() before freeing the buf, in case a lower 14241 * layer or HBA had done a bp_mapin(). we must do this here 14242 * as we are the "originator" of the shadow buf. 14243 */ 14244 bp_mapout(bp); 14245 #endif 14246 14247 /* 14248 * Null out b_iodone before freeing the bp, to ensure that the driver 14249 * never gets confused by a stale value in this field. (Just a little 14250 * extra defensiveness here.) 14251 */ 14252 bp->b_iodone = NULL; 14253 14254 #if defined(__i386) || defined(__amd64) 14255 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14256 freerbuf(bp); 14257 #else 14258 scsi_free_consistent_buf(bp); 14259 #endif 14260 14261 kmem_free(xp, sizeof (struct sd_xbuf)); 14262 } 14263 14264 14265 /* 14266 * Function: sd_print_transport_rejected_message 14267 * 14268 * Description: This implements the ludicrously complex rules for printing 14269 * a "transport rejected" message. This is to address the 14270 * specific problem of having a flood of this error message 14271 * produced when a failover occurs. 14272 * 14273 * Context: Any. 14274 */ 14275 14276 static void 14277 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14278 int code) 14279 { 14280 ASSERT(un != NULL); 14281 ASSERT(mutex_owned(SD_MUTEX(un))); 14282 ASSERT(xp != NULL); 14283 14284 /* 14285 * Print the "transport rejected" message under the following 14286 * conditions: 14287 * 14288 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14289 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14290 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14291 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14292 * scsi_transport(9F) (which indicates that the target might have 14293 * gone off-line). This uses the un->un_tran_fatal_count 14294 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14295 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14296 * from scsi_transport(). 14297 * 14298 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14299 * the preceeding cases in order for the message to be printed. 14300 */ 14301 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14302 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14303 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14304 (code != TRAN_FATAL_ERROR) || 14305 (un->un_tran_fatal_count == 1)) { 14306 switch (code) { 14307 case TRAN_BADPKT: 14308 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14309 "transport rejected bad packet\n"); 14310 break; 14311 case TRAN_FATAL_ERROR: 14312 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14313 "transport rejected fatal error\n"); 14314 break; 14315 default: 14316 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14317 "transport rejected (%d)\n", code); 14318 break; 14319 } 14320 } 14321 } 14322 } 14323 14324 14325 /* 14326 * Function: sd_add_buf_to_waitq 14327 * 14328 * Description: Add the given buf(9S) struct to the wait queue for the 14329 * instance. If sorting is enabled, then the buf is added 14330 * to the queue via an elevator sort algorithm (a la 14331 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14332 * If sorting is not enabled, then the buf is just added 14333 * to the end of the wait queue. 14334 * 14335 * Return Code: void 14336 * 14337 * Context: Does not sleep/block, therefore technically can be called 14338 * from any context. However if sorting is enabled then the 14339 * execution time is indeterminate, and may take long if 14340 * the wait queue grows large. 14341 */ 14342 14343 static void 14344 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14345 { 14346 struct buf *ap; 14347 14348 ASSERT(bp != NULL); 14349 ASSERT(un != NULL); 14350 ASSERT(mutex_owned(SD_MUTEX(un))); 14351 14352 /* If the queue is empty, add the buf as the only entry & return. */ 14353 if (un->un_waitq_headp == NULL) { 14354 ASSERT(un->un_waitq_tailp == NULL); 14355 un->un_waitq_headp = un->un_waitq_tailp = bp; 14356 bp->av_forw = NULL; 14357 return; 14358 } 14359 14360 ASSERT(un->un_waitq_tailp != NULL); 14361 14362 /* 14363 * If sorting is disabled, just add the buf to the tail end of 14364 * the wait queue and return. 14365 */ 14366 if (un->un_f_disksort_disabled) { 14367 un->un_waitq_tailp->av_forw = bp; 14368 un->un_waitq_tailp = bp; 14369 bp->av_forw = NULL; 14370 return; 14371 } 14372 14373 /* 14374 * Sort thru the list of requests currently on the wait queue 14375 * and add the new buf request at the appropriate position. 14376 * 14377 * The un->un_waitq_headp is an activity chain pointer on which 14378 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14379 * first queue holds those requests which are positioned after 14380 * the current SD_GET_BLKNO() (in the first request); the second holds 14381 * requests which came in after their SD_GET_BLKNO() number was passed. 14382 * Thus we implement a one way scan, retracting after reaching 14383 * the end of the drive to the first request on the second 14384 * queue, at which time it becomes the first queue. 14385 * A one-way scan is natural because of the way UNIX read-ahead 14386 * blocks are allocated. 14387 * 14388 * If we lie after the first request, then we must locate the 14389 * second request list and add ourselves to it. 14390 */ 14391 ap = un->un_waitq_headp; 14392 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14393 while (ap->av_forw != NULL) { 14394 /* 14395 * Look for an "inversion" in the (normally 14396 * ascending) block numbers. This indicates 14397 * the start of the second request list. 14398 */ 14399 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14400 /* 14401 * Search the second request list for the 14402 * first request at a larger block number. 14403 * We go before that; however if there is 14404 * no such request, we go at the end. 14405 */ 14406 do { 14407 if (SD_GET_BLKNO(bp) < 14408 SD_GET_BLKNO(ap->av_forw)) { 14409 goto insert; 14410 } 14411 ap = ap->av_forw; 14412 } while (ap->av_forw != NULL); 14413 goto insert; /* after last */ 14414 } 14415 ap = ap->av_forw; 14416 } 14417 14418 /* 14419 * No inversions... we will go after the last, and 14420 * be the first request in the second request list. 14421 */ 14422 goto insert; 14423 } 14424 14425 /* 14426 * Request is at/after the current request... 14427 * sort in the first request list. 14428 */ 14429 while (ap->av_forw != NULL) { 14430 /* 14431 * We want to go after the current request (1) if 14432 * there is an inversion after it (i.e. it is the end 14433 * of the first request list), or (2) if the next 14434 * request is a larger block no. than our request. 14435 */ 14436 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14437 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14438 goto insert; 14439 } 14440 ap = ap->av_forw; 14441 } 14442 14443 /* 14444 * Neither a second list nor a larger request, therefore 14445 * we go at the end of the first list (which is the same 14446 * as the end of the whole schebang). 14447 */ 14448 insert: 14449 bp->av_forw = ap->av_forw; 14450 ap->av_forw = bp; 14451 14452 /* 14453 * If we inserted onto the tail end of the waitq, make sure the 14454 * tail pointer is updated. 14455 */ 14456 if (ap == un->un_waitq_tailp) { 14457 un->un_waitq_tailp = bp; 14458 } 14459 } 14460 14461 14462 /* 14463 * Function: sd_start_cmds 14464 * 14465 * Description: Remove and transport cmds from the driver queues. 14466 * 14467 * Arguments: un - pointer to the unit (soft state) struct for the target. 14468 * 14469 * immed_bp - ptr to a buf to be transported immediately. Only 14470 * the immed_bp is transported; bufs on the waitq are not 14471 * processed and the un_retry_bp is not checked. If immed_bp is 14472 * NULL, then normal queue processing is performed. 14473 * 14474 * Context: May be called from kernel thread context, interrupt context, 14475 * or runout callback context. This function may not block or 14476 * call routines that block. 14477 */ 14478 14479 static void 14480 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14481 { 14482 struct sd_xbuf *xp; 14483 struct buf *bp; 14484 void (*statp)(kstat_io_t *); 14485 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14486 void (*saved_statp)(kstat_io_t *); 14487 #endif 14488 int rval; 14489 struct sd_fm_internal *sfip = NULL; 14490 14491 ASSERT(un != NULL); 14492 ASSERT(mutex_owned(SD_MUTEX(un))); 14493 ASSERT(un->un_ncmds_in_transport >= 0); 14494 ASSERT(un->un_throttle >= 0); 14495 14496 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14497 14498 do { 14499 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14500 saved_statp = NULL; 14501 #endif 14502 14503 /* 14504 * If we are syncing or dumping, fail the command to 14505 * avoid recursively calling back into scsi_transport(). 14506 * The dump I/O itself uses a separate code path so this 14507 * only prevents non-dump I/O from being sent while dumping. 14508 * File system sync takes place before dumping begins. 14509 * During panic, filesystem I/O is allowed provided 14510 * un_in_callback is <= 1. This is to prevent recursion 14511 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14512 * sd_start_cmds and so on. See panic.c for more information 14513 * about the states the system can be in during panic. 14514 */ 14515 if ((un->un_state == SD_STATE_DUMPING) || 14516 (ddi_in_panic() && (un->un_in_callback > 1))) { 14517 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14518 "sd_start_cmds: panicking\n"); 14519 goto exit; 14520 } 14521 14522 if ((bp = immed_bp) != NULL) { 14523 /* 14524 * We have a bp that must be transported immediately. 14525 * It's OK to transport the immed_bp here without doing 14526 * the throttle limit check because the immed_bp is 14527 * always used in a retry/recovery case. This means 14528 * that we know we are not at the throttle limit by 14529 * virtue of the fact that to get here we must have 14530 * already gotten a command back via sdintr(). This also 14531 * relies on (1) the command on un_retry_bp preventing 14532 * further commands from the waitq from being issued; 14533 * and (2) the code in sd_retry_command checking the 14534 * throttle limit before issuing a delayed or immediate 14535 * retry. This holds even if the throttle limit is 14536 * currently ratcheted down from its maximum value. 14537 */ 14538 statp = kstat_runq_enter; 14539 if (bp == un->un_retry_bp) { 14540 ASSERT((un->un_retry_statp == NULL) || 14541 (un->un_retry_statp == kstat_waitq_enter) || 14542 (un->un_retry_statp == 14543 kstat_runq_back_to_waitq)); 14544 /* 14545 * If the waitq kstat was incremented when 14546 * sd_set_retry_bp() queued this bp for a retry, 14547 * then we must set up statp so that the waitq 14548 * count will get decremented correctly below. 14549 * Also we must clear un->un_retry_statp to 14550 * ensure that we do not act on a stale value 14551 * in this field. 14552 */ 14553 if ((un->un_retry_statp == kstat_waitq_enter) || 14554 (un->un_retry_statp == 14555 kstat_runq_back_to_waitq)) { 14556 statp = kstat_waitq_to_runq; 14557 } 14558 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14559 saved_statp = un->un_retry_statp; 14560 #endif 14561 un->un_retry_statp = NULL; 14562 14563 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14564 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14565 "un_throttle:%d un_ncmds_in_transport:%d\n", 14566 un, un->un_retry_bp, un->un_throttle, 14567 un->un_ncmds_in_transport); 14568 } else { 14569 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14570 "processing priority bp:0x%p\n", bp); 14571 } 14572 14573 } else if ((bp = un->un_waitq_headp) != NULL) { 14574 /* 14575 * A command on the waitq is ready to go, but do not 14576 * send it if: 14577 * 14578 * (1) the throttle limit has been reached, or 14579 * (2) a retry is pending, or 14580 * (3) a START_STOP_UNIT callback pending, or 14581 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14582 * command is pending. 14583 * 14584 * For all of these conditions, IO processing will 14585 * restart after the condition is cleared. 14586 */ 14587 if (un->un_ncmds_in_transport >= un->un_throttle) { 14588 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14589 "sd_start_cmds: exiting, " 14590 "throttle limit reached!\n"); 14591 goto exit; 14592 } 14593 if (un->un_retry_bp != NULL) { 14594 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14595 "sd_start_cmds: exiting, retry pending!\n"); 14596 goto exit; 14597 } 14598 if (un->un_startstop_timeid != NULL) { 14599 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14600 "sd_start_cmds: exiting, " 14601 "START_STOP pending!\n"); 14602 goto exit; 14603 } 14604 if (un->un_direct_priority_timeid != NULL) { 14605 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14606 "sd_start_cmds: exiting, " 14607 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14608 goto exit; 14609 } 14610 14611 /* Dequeue the command */ 14612 un->un_waitq_headp = bp->av_forw; 14613 if (un->un_waitq_headp == NULL) { 14614 un->un_waitq_tailp = NULL; 14615 } 14616 bp->av_forw = NULL; 14617 statp = kstat_waitq_to_runq; 14618 SD_TRACE(SD_LOG_IO_CORE, un, 14619 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14620 14621 } else { 14622 /* No work to do so bail out now */ 14623 SD_TRACE(SD_LOG_IO_CORE, un, 14624 "sd_start_cmds: no more work, exiting!\n"); 14625 goto exit; 14626 } 14627 14628 /* 14629 * Reset the state to normal. This is the mechanism by which 14630 * the state transitions from either SD_STATE_RWAIT or 14631 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14632 * If state is SD_STATE_PM_CHANGING then this command is 14633 * part of the device power control and the state must 14634 * not be put back to normal. Doing so would would 14635 * allow new commands to proceed when they shouldn't, 14636 * the device may be going off. 14637 */ 14638 if ((un->un_state != SD_STATE_SUSPENDED) && 14639 (un->un_state != SD_STATE_PM_CHANGING)) { 14640 New_state(un, SD_STATE_NORMAL); 14641 } 14642 14643 xp = SD_GET_XBUF(bp); 14644 ASSERT(xp != NULL); 14645 14646 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14647 /* 14648 * Allocate the scsi_pkt if we need one, or attach DMA 14649 * resources if we have a scsi_pkt that needs them. The 14650 * latter should only occur for commands that are being 14651 * retried. 14652 */ 14653 if ((xp->xb_pktp == NULL) || 14654 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14655 #else 14656 if (xp->xb_pktp == NULL) { 14657 #endif 14658 /* 14659 * There is no scsi_pkt allocated for this buf. Call 14660 * the initpkt function to allocate & init one. 14661 * 14662 * The scsi_init_pkt runout callback functionality is 14663 * implemented as follows: 14664 * 14665 * 1) The initpkt function always calls 14666 * scsi_init_pkt(9F) with sdrunout specified as the 14667 * callback routine. 14668 * 2) A successful packet allocation is initialized and 14669 * the I/O is transported. 14670 * 3) The I/O associated with an allocation resource 14671 * failure is left on its queue to be retried via 14672 * runout or the next I/O. 14673 * 4) The I/O associated with a DMA error is removed 14674 * from the queue and failed with EIO. Processing of 14675 * the transport queues is also halted to be 14676 * restarted via runout or the next I/O. 14677 * 5) The I/O associated with a CDB size or packet 14678 * size error is removed from the queue and failed 14679 * with EIO. Processing of the transport queues is 14680 * continued. 14681 * 14682 * Note: there is no interface for canceling a runout 14683 * callback. To prevent the driver from detaching or 14684 * suspending while a runout is pending the driver 14685 * state is set to SD_STATE_RWAIT 14686 * 14687 * Note: using the scsi_init_pkt callback facility can 14688 * result in an I/O request persisting at the head of 14689 * the list which cannot be satisfied even after 14690 * multiple retries. In the future the driver may 14691 * implement some kind of maximum runout count before 14692 * failing an I/O. 14693 * 14694 * Note: the use of funcp below may seem superfluous, 14695 * but it helps warlock figure out the correct 14696 * initpkt function calls (see [s]sd.wlcmd). 14697 */ 14698 struct scsi_pkt *pktp; 14699 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14700 14701 ASSERT(bp != un->un_rqs_bp); 14702 14703 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14704 switch ((*funcp)(bp, &pktp)) { 14705 case SD_PKT_ALLOC_SUCCESS: 14706 xp->xb_pktp = pktp; 14707 SD_TRACE(SD_LOG_IO_CORE, un, 14708 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14709 pktp); 14710 goto got_pkt; 14711 14712 case SD_PKT_ALLOC_FAILURE: 14713 /* 14714 * Temporary (hopefully) resource depletion. 14715 * Since retries and RQS commands always have a 14716 * scsi_pkt allocated, these cases should never 14717 * get here. So the only cases this needs to 14718 * handle is a bp from the waitq (which we put 14719 * back onto the waitq for sdrunout), or a bp 14720 * sent as an immed_bp (which we just fail). 14721 */ 14722 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14723 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14724 14725 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14726 14727 if (bp == immed_bp) { 14728 /* 14729 * If SD_XB_DMA_FREED is clear, then 14730 * this is a failure to allocate a 14731 * scsi_pkt, and we must fail the 14732 * command. 14733 */ 14734 if ((xp->xb_pkt_flags & 14735 SD_XB_DMA_FREED) == 0) { 14736 break; 14737 } 14738 14739 /* 14740 * If this immediate command is NOT our 14741 * un_retry_bp, then we must fail it. 14742 */ 14743 if (bp != un->un_retry_bp) { 14744 break; 14745 } 14746 14747 /* 14748 * We get here if this cmd is our 14749 * un_retry_bp that was DMAFREED, but 14750 * scsi_init_pkt() failed to reallocate 14751 * DMA resources when we attempted to 14752 * retry it. This can happen when an 14753 * mpxio failover is in progress, but 14754 * we don't want to just fail the 14755 * command in this case. 14756 * 14757 * Use timeout(9F) to restart it after 14758 * a 100ms delay. We don't want to 14759 * let sdrunout() restart it, because 14760 * sdrunout() is just supposed to start 14761 * commands that are sitting on the 14762 * wait queue. The un_retry_bp stays 14763 * set until the command completes, but 14764 * sdrunout can be called many times 14765 * before that happens. Since sdrunout 14766 * cannot tell if the un_retry_bp is 14767 * already in the transport, it could 14768 * end up calling scsi_transport() for 14769 * the un_retry_bp multiple times. 14770 * 14771 * Also: don't schedule the callback 14772 * if some other callback is already 14773 * pending. 14774 */ 14775 if (un->un_retry_statp == NULL) { 14776 /* 14777 * restore the kstat pointer to 14778 * keep kstat counts coherent 14779 * when we do retry the command. 14780 */ 14781 un->un_retry_statp = 14782 saved_statp; 14783 } 14784 14785 if ((un->un_startstop_timeid == NULL) && 14786 (un->un_retry_timeid == NULL) && 14787 (un->un_direct_priority_timeid == 14788 NULL)) { 14789 14790 un->un_retry_timeid = 14791 timeout( 14792 sd_start_retry_command, 14793 un, SD_RESTART_TIMEOUT); 14794 } 14795 goto exit; 14796 } 14797 14798 #else 14799 if (bp == immed_bp) { 14800 break; /* Just fail the command */ 14801 } 14802 #endif 14803 14804 /* Add the buf back to the head of the waitq */ 14805 bp->av_forw = un->un_waitq_headp; 14806 un->un_waitq_headp = bp; 14807 if (un->un_waitq_tailp == NULL) { 14808 un->un_waitq_tailp = bp; 14809 } 14810 goto exit; 14811 14812 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14813 /* 14814 * HBA DMA resource failure. Fail the command 14815 * and continue processing of the queues. 14816 */ 14817 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14818 "sd_start_cmds: " 14819 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14820 break; 14821 14822 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14823 /* 14824 * Note:x86: Partial DMA mapping not supported 14825 * for USCSI commands, and all the needed DMA 14826 * resources were not allocated. 14827 */ 14828 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14829 "sd_start_cmds: " 14830 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14831 break; 14832 14833 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14834 /* 14835 * Note:x86: Request cannot fit into CDB based 14836 * on lba and len. 14837 */ 14838 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14839 "sd_start_cmds: " 14840 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14841 break; 14842 14843 default: 14844 /* Should NEVER get here! */ 14845 panic("scsi_initpkt error"); 14846 /*NOTREACHED*/ 14847 } 14848 14849 /* 14850 * Fatal error in allocating a scsi_pkt for this buf. 14851 * Update kstats & return the buf with an error code. 14852 * We must use sd_return_failed_command_no_restart() to 14853 * avoid a recursive call back into sd_start_cmds(). 14854 * However this also means that we must keep processing 14855 * the waitq here in order to avoid stalling. 14856 */ 14857 if (statp == kstat_waitq_to_runq) { 14858 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14859 } 14860 sd_return_failed_command_no_restart(un, bp, EIO); 14861 if (bp == immed_bp) { 14862 /* immed_bp is gone by now, so clear this */ 14863 immed_bp = NULL; 14864 } 14865 continue; 14866 } 14867 got_pkt: 14868 if (bp == immed_bp) { 14869 /* goto the head of the class.... */ 14870 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14871 } 14872 14873 un->un_ncmds_in_transport++; 14874 SD_UPDATE_KSTATS(un, statp, bp); 14875 14876 /* 14877 * Call scsi_transport() to send the command to the target. 14878 * According to SCSA architecture, we must drop the mutex here 14879 * before calling scsi_transport() in order to avoid deadlock. 14880 * Note that the scsi_pkt's completion routine can be executed 14881 * (from interrupt context) even before the call to 14882 * scsi_transport() returns. 14883 */ 14884 SD_TRACE(SD_LOG_IO_CORE, un, 14885 "sd_start_cmds: calling scsi_transport()\n"); 14886 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14887 14888 mutex_exit(SD_MUTEX(un)); 14889 rval = scsi_transport(xp->xb_pktp); 14890 mutex_enter(SD_MUTEX(un)); 14891 14892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14893 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14894 14895 switch (rval) { 14896 case TRAN_ACCEPT: 14897 /* Clear this with every pkt accepted by the HBA */ 14898 un->un_tran_fatal_count = 0; 14899 break; /* Success; try the next cmd (if any) */ 14900 14901 case TRAN_BUSY: 14902 un->un_ncmds_in_transport--; 14903 ASSERT(un->un_ncmds_in_transport >= 0); 14904 14905 /* 14906 * Don't retry request sense, the sense data 14907 * is lost when another request is sent. 14908 * Free up the rqs buf and retry 14909 * the original failed cmd. Update kstat. 14910 */ 14911 if (bp == un->un_rqs_bp) { 14912 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14913 bp = sd_mark_rqs_idle(un, xp); 14914 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14915 NULL, NULL, EIO, un->un_busy_timeout / 500, 14916 kstat_waitq_enter); 14917 goto exit; 14918 } 14919 14920 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14921 /* 14922 * Free the DMA resources for the scsi_pkt. This will 14923 * allow mpxio to select another path the next time 14924 * we call scsi_transport() with this scsi_pkt. 14925 * See sdintr() for the rationalization behind this. 14926 */ 14927 if ((un->un_f_is_fibre == TRUE) && 14928 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14929 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14930 scsi_dmafree(xp->xb_pktp); 14931 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14932 } 14933 #endif 14934 14935 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14936 /* 14937 * Commands that are SD_PATH_DIRECT_PRIORITY 14938 * are for error recovery situations. These do 14939 * not use the normal command waitq, so if they 14940 * get a TRAN_BUSY we cannot put them back onto 14941 * the waitq for later retry. One possible 14942 * problem is that there could already be some 14943 * other command on un_retry_bp that is waiting 14944 * for this one to complete, so we would be 14945 * deadlocked if we put this command back onto 14946 * the waitq for later retry (since un_retry_bp 14947 * must complete before the driver gets back to 14948 * commands on the waitq). 14949 * 14950 * To avoid deadlock we must schedule a callback 14951 * that will restart this command after a set 14952 * interval. This should keep retrying for as 14953 * long as the underlying transport keeps 14954 * returning TRAN_BUSY (just like for other 14955 * commands). Use the same timeout interval as 14956 * for the ordinary TRAN_BUSY retry. 14957 */ 14958 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14959 "sd_start_cmds: scsi_transport() returned " 14960 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14961 14962 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14963 un->un_direct_priority_timeid = 14964 timeout(sd_start_direct_priority_command, 14965 bp, un->un_busy_timeout / 500); 14966 14967 goto exit; 14968 } 14969 14970 /* 14971 * For TRAN_BUSY, we want to reduce the throttle value, 14972 * unless we are retrying a command. 14973 */ 14974 if (bp != un->un_retry_bp) { 14975 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14976 } 14977 14978 /* 14979 * Set up the bp to be tried again 10 ms later. 14980 * Note:x86: Is there a timeout value in the sd_lun 14981 * for this condition? 14982 */ 14983 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14984 kstat_runq_back_to_waitq); 14985 goto exit; 14986 14987 case TRAN_FATAL_ERROR: 14988 un->un_tran_fatal_count++; 14989 /* FALLTHRU */ 14990 14991 case TRAN_BADPKT: 14992 default: 14993 un->un_ncmds_in_transport--; 14994 ASSERT(un->un_ncmds_in_transport >= 0); 14995 14996 /* 14997 * If this is our REQUEST SENSE command with a 14998 * transport error, we must get back the pointers 14999 * to the original buf, and mark the REQUEST 15000 * SENSE command as "available". 15001 */ 15002 if (bp == un->un_rqs_bp) { 15003 bp = sd_mark_rqs_idle(un, xp); 15004 xp = SD_GET_XBUF(bp); 15005 } else { 15006 /* 15007 * Legacy behavior: do not update transport 15008 * error count for request sense commands. 15009 */ 15010 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15011 } 15012 15013 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15014 sd_print_transport_rejected_message(un, xp, rval); 15015 15016 /* 15017 * This command will be terminated by SD driver due 15018 * to a fatal transport error. We should post 15019 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15020 * of "fail" for any command to indicate this 15021 * situation. 15022 */ 15023 if (xp->xb_ena > 0) { 15024 ASSERT(un->un_fm_private != NULL); 15025 sfip = un->un_fm_private; 15026 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15027 sd_ssc_extract_info(&sfip->fm_ssc, un, 15028 xp->xb_pktp, bp, xp); 15029 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15030 } 15031 15032 /* 15033 * We must use sd_return_failed_command_no_restart() to 15034 * avoid a recursive call back into sd_start_cmds(). 15035 * However this also means that we must keep processing 15036 * the waitq here in order to avoid stalling. 15037 */ 15038 sd_return_failed_command_no_restart(un, bp, EIO); 15039 15040 /* 15041 * Notify any threads waiting in sd_ddi_suspend() that 15042 * a command completion has occurred. 15043 */ 15044 if (un->un_state == SD_STATE_SUSPENDED) { 15045 cv_broadcast(&un->un_disk_busy_cv); 15046 } 15047 15048 if (bp == immed_bp) { 15049 /* immed_bp is gone by now, so clear this */ 15050 immed_bp = NULL; 15051 } 15052 break; 15053 } 15054 15055 } while (immed_bp == NULL); 15056 15057 exit: 15058 ASSERT(mutex_owned(SD_MUTEX(un))); 15059 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15060 } 15061 15062 15063 /* 15064 * Function: sd_return_command 15065 * 15066 * Description: Returns a command to its originator (with or without an 15067 * error). Also starts commands waiting to be transported 15068 * to the target. 15069 * 15070 * Context: May be called from interrupt, kernel, or timeout context 15071 */ 15072 15073 static void 15074 sd_return_command(struct sd_lun *un, struct buf *bp) 15075 { 15076 struct sd_xbuf *xp; 15077 struct scsi_pkt *pktp; 15078 struct sd_fm_internal *sfip; 15079 15080 ASSERT(bp != NULL); 15081 ASSERT(un != NULL); 15082 ASSERT(mutex_owned(SD_MUTEX(un))); 15083 ASSERT(bp != un->un_rqs_bp); 15084 xp = SD_GET_XBUF(bp); 15085 ASSERT(xp != NULL); 15086 15087 pktp = SD_GET_PKTP(bp); 15088 sfip = (struct sd_fm_internal *)un->un_fm_private; 15089 ASSERT(sfip != NULL); 15090 15091 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15092 15093 /* 15094 * Note: check for the "sdrestart failed" case. 15095 */ 15096 if ((un->un_partial_dma_supported == 1) && 15097 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15098 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15099 (xp->xb_pktp->pkt_resid == 0)) { 15100 15101 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15102 /* 15103 * Successfully set up next portion of cmd 15104 * transfer, try sending it 15105 */ 15106 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15107 NULL, NULL, 0, (clock_t)0, NULL); 15108 sd_start_cmds(un, NULL); 15109 return; /* Note:x86: need a return here? */ 15110 } 15111 } 15112 15113 /* 15114 * If this is the failfast bp, clear it from un_failfast_bp. This 15115 * can happen if upon being re-tried the failfast bp either 15116 * succeeded or encountered another error (possibly even a different 15117 * error than the one that precipitated the failfast state, but in 15118 * that case it would have had to exhaust retries as well). Regardless, 15119 * this should not occur whenever the instance is in the active 15120 * failfast state. 15121 */ 15122 if (bp == un->un_failfast_bp) { 15123 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15124 un->un_failfast_bp = NULL; 15125 } 15126 15127 /* 15128 * Clear the failfast state upon successful completion of ANY cmd. 15129 */ 15130 if (bp->b_error == 0) { 15131 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15132 /* 15133 * If this is a successful command, but used to be retried, 15134 * we will take it as a recovered command and post an 15135 * ereport with driver-assessment of "recovered". 15136 */ 15137 if (xp->xb_ena > 0) { 15138 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15139 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15140 } 15141 } else { 15142 /* 15143 * If this is a failed non-USCSI command we will post an 15144 * ereport with driver-assessment set accordingly("fail" or 15145 * "fatal"). 15146 */ 15147 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15148 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15149 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15150 } 15151 } 15152 15153 /* 15154 * This is used if the command was retried one or more times. Show that 15155 * we are done with it, and allow processing of the waitq to resume. 15156 */ 15157 if (bp == un->un_retry_bp) { 15158 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15159 "sd_return_command: un:0x%p: " 15160 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15161 un->un_retry_bp = NULL; 15162 un->un_retry_statp = NULL; 15163 } 15164 15165 SD_UPDATE_RDWR_STATS(un, bp); 15166 SD_UPDATE_PARTITION_STATS(un, bp); 15167 15168 switch (un->un_state) { 15169 case SD_STATE_SUSPENDED: 15170 /* 15171 * Notify any threads waiting in sd_ddi_suspend() that 15172 * a command completion has occurred. 15173 */ 15174 cv_broadcast(&un->un_disk_busy_cv); 15175 break; 15176 default: 15177 sd_start_cmds(un, NULL); 15178 break; 15179 } 15180 15181 /* Return this command up the iodone chain to its originator. */ 15182 mutex_exit(SD_MUTEX(un)); 15183 15184 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15185 xp->xb_pktp = NULL; 15186 15187 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15188 15189 ASSERT(!mutex_owned(SD_MUTEX(un))); 15190 mutex_enter(SD_MUTEX(un)); 15191 15192 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15193 } 15194 15195 15196 /* 15197 * Function: sd_return_failed_command 15198 * 15199 * Description: Command completion when an error occurred. 15200 * 15201 * Context: May be called from interrupt context 15202 */ 15203 15204 static void 15205 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15206 { 15207 ASSERT(bp != NULL); 15208 ASSERT(un != NULL); 15209 ASSERT(mutex_owned(SD_MUTEX(un))); 15210 15211 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15212 "sd_return_failed_command: entry\n"); 15213 15214 /* 15215 * b_resid could already be nonzero due to a partial data 15216 * transfer, so do not change it here. 15217 */ 15218 SD_BIOERROR(bp, errcode); 15219 15220 sd_return_command(un, bp); 15221 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15222 "sd_return_failed_command: exit\n"); 15223 } 15224 15225 15226 /* 15227 * Function: sd_return_failed_command_no_restart 15228 * 15229 * Description: Same as sd_return_failed_command, but ensures that no 15230 * call back into sd_start_cmds will be issued. 15231 * 15232 * Context: May be called from interrupt context 15233 */ 15234 15235 static void 15236 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15237 int errcode) 15238 { 15239 struct sd_xbuf *xp; 15240 15241 ASSERT(bp != NULL); 15242 ASSERT(un != NULL); 15243 ASSERT(mutex_owned(SD_MUTEX(un))); 15244 xp = SD_GET_XBUF(bp); 15245 ASSERT(xp != NULL); 15246 ASSERT(errcode != 0); 15247 15248 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15249 "sd_return_failed_command_no_restart: entry\n"); 15250 15251 /* 15252 * b_resid could already be nonzero due to a partial data 15253 * transfer, so do not change it here. 15254 */ 15255 SD_BIOERROR(bp, errcode); 15256 15257 /* 15258 * If this is the failfast bp, clear it. This can happen if the 15259 * failfast bp encounterd a fatal error when we attempted to 15260 * re-try it (such as a scsi_transport(9F) failure). However 15261 * we should NOT be in an active failfast state if the failfast 15262 * bp is not NULL. 15263 */ 15264 if (bp == un->un_failfast_bp) { 15265 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15266 un->un_failfast_bp = NULL; 15267 } 15268 15269 if (bp == un->un_retry_bp) { 15270 /* 15271 * This command was retried one or more times. Show that we are 15272 * done with it, and allow processing of the waitq to resume. 15273 */ 15274 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15275 "sd_return_failed_command_no_restart: " 15276 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15277 un->un_retry_bp = NULL; 15278 un->un_retry_statp = NULL; 15279 } 15280 15281 SD_UPDATE_RDWR_STATS(un, bp); 15282 SD_UPDATE_PARTITION_STATS(un, bp); 15283 15284 mutex_exit(SD_MUTEX(un)); 15285 15286 if (xp->xb_pktp != NULL) { 15287 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15288 xp->xb_pktp = NULL; 15289 } 15290 15291 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15292 15293 mutex_enter(SD_MUTEX(un)); 15294 15295 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15296 "sd_return_failed_command_no_restart: exit\n"); 15297 } 15298 15299 15300 /* 15301 * Function: sd_retry_command 15302 * 15303 * Description: queue up a command for retry, or (optionally) fail it 15304 * if retry counts are exhausted. 15305 * 15306 * Arguments: un - Pointer to the sd_lun struct for the target. 15307 * 15308 * bp - Pointer to the buf for the command to be retried. 15309 * 15310 * retry_check_flag - Flag to see which (if any) of the retry 15311 * counts should be decremented/checked. If the indicated 15312 * retry count is exhausted, then the command will not be 15313 * retried; it will be failed instead. This should use a 15314 * value equal to one of the following: 15315 * 15316 * SD_RETRIES_NOCHECK 15317 * SD_RESD_RETRIES_STANDARD 15318 * SD_RETRIES_VICTIM 15319 * 15320 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15321 * if the check should be made to see of FLAG_ISOLATE is set 15322 * in the pkt. If FLAG_ISOLATE is set, then the command is 15323 * not retried, it is simply failed. 15324 * 15325 * user_funcp - Ptr to function to call before dispatching the 15326 * command. May be NULL if no action needs to be performed. 15327 * (Primarily intended for printing messages.) 15328 * 15329 * user_arg - Optional argument to be passed along to 15330 * the user_funcp call. 15331 * 15332 * failure_code - errno return code to set in the bp if the 15333 * command is going to be failed. 15334 * 15335 * retry_delay - Retry delay interval in (clock_t) units. May 15336 * be zero which indicates that the retry should be retried 15337 * immediately (ie, without an intervening delay). 15338 * 15339 * statp - Ptr to kstat function to be updated if the command 15340 * is queued for a delayed retry. May be NULL if no kstat 15341 * update is desired. 15342 * 15343 * Context: May be called from interrupt context. 15344 */ 15345 15346 static void 15347 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15348 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15349 code), void *user_arg, int failure_code, clock_t retry_delay, 15350 void (*statp)(kstat_io_t *)) 15351 { 15352 struct sd_xbuf *xp; 15353 struct scsi_pkt *pktp; 15354 struct sd_fm_internal *sfip; 15355 15356 ASSERT(un != NULL); 15357 ASSERT(mutex_owned(SD_MUTEX(un))); 15358 ASSERT(bp != NULL); 15359 xp = SD_GET_XBUF(bp); 15360 ASSERT(xp != NULL); 15361 pktp = SD_GET_PKTP(bp); 15362 ASSERT(pktp != NULL); 15363 15364 sfip = (struct sd_fm_internal *)un->un_fm_private; 15365 ASSERT(sfip != NULL); 15366 15367 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15368 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15369 15370 /* 15371 * If we are syncing or dumping, fail the command to avoid 15372 * recursively calling back into scsi_transport(). 15373 */ 15374 if (ddi_in_panic()) { 15375 goto fail_command_no_log; 15376 } 15377 15378 /* 15379 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15380 * log an error and fail the command. 15381 */ 15382 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15383 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15384 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15385 sd_dump_memory(un, SD_LOG_IO, "CDB", 15386 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15387 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15388 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15389 goto fail_command; 15390 } 15391 15392 /* 15393 * If we are suspended, then put the command onto head of the 15394 * wait queue since we don't want to start more commands, and 15395 * clear the un_retry_bp. Next time when we are resumed, will 15396 * handle the command in the wait queue. 15397 */ 15398 switch (un->un_state) { 15399 case SD_STATE_SUSPENDED: 15400 case SD_STATE_DUMPING: 15401 bp->av_forw = un->un_waitq_headp; 15402 un->un_waitq_headp = bp; 15403 if (un->un_waitq_tailp == NULL) { 15404 un->un_waitq_tailp = bp; 15405 } 15406 if (bp == un->un_retry_bp) { 15407 un->un_retry_bp = NULL; 15408 un->un_retry_statp = NULL; 15409 } 15410 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15411 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15412 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15413 return; 15414 default: 15415 break; 15416 } 15417 15418 /* 15419 * If the caller wants us to check FLAG_ISOLATE, then see if that 15420 * is set; if it is then we do not want to retry the command. 15421 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15422 */ 15423 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15424 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15425 goto fail_command; 15426 } 15427 } 15428 15429 15430 /* 15431 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15432 * command timeout or a selection timeout has occurred. This means 15433 * that we were unable to establish an kind of communication with 15434 * the target, and subsequent retries and/or commands are likely 15435 * to encounter similar results and take a long time to complete. 15436 * 15437 * If this is a failfast error condition, we need to update the 15438 * failfast state, even if this bp does not have B_FAILFAST set. 15439 */ 15440 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15441 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15442 ASSERT(un->un_failfast_bp == NULL); 15443 /* 15444 * If we are already in the active failfast state, and 15445 * another failfast error condition has been detected, 15446 * then fail this command if it has B_FAILFAST set. 15447 * If B_FAILFAST is clear, then maintain the legacy 15448 * behavior of retrying heroically, even tho this will 15449 * take a lot more time to fail the command. 15450 */ 15451 if (bp->b_flags & B_FAILFAST) { 15452 goto fail_command; 15453 } 15454 } else { 15455 /* 15456 * We're not in the active failfast state, but we 15457 * have a failfast error condition, so we must begin 15458 * transition to the next state. We do this regardless 15459 * of whether or not this bp has B_FAILFAST set. 15460 */ 15461 if (un->un_failfast_bp == NULL) { 15462 /* 15463 * This is the first bp to meet a failfast 15464 * condition so save it on un_failfast_bp & 15465 * do normal retry processing. Do not enter 15466 * active failfast state yet. This marks 15467 * entry into the "failfast pending" state. 15468 */ 15469 un->un_failfast_bp = bp; 15470 15471 } else if (un->un_failfast_bp == bp) { 15472 /* 15473 * This is the second time *this* bp has 15474 * encountered a failfast error condition, 15475 * so enter active failfast state & flush 15476 * queues as appropriate. 15477 */ 15478 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15479 un->un_failfast_bp = NULL; 15480 sd_failfast_flushq(un); 15481 15482 /* 15483 * Fail this bp now if B_FAILFAST set; 15484 * otherwise continue with retries. (It would 15485 * be pretty ironic if this bp succeeded on a 15486 * subsequent retry after we just flushed all 15487 * the queues). 15488 */ 15489 if (bp->b_flags & B_FAILFAST) { 15490 goto fail_command; 15491 } 15492 15493 #if !defined(lint) && !defined(__lint) 15494 } else { 15495 /* 15496 * If neither of the preceeding conditionals 15497 * was true, it means that there is some 15498 * *other* bp that has met an inital failfast 15499 * condition and is currently either being 15500 * retried or is waiting to be retried. In 15501 * that case we should perform normal retry 15502 * processing on *this* bp, since there is a 15503 * chance that the current failfast condition 15504 * is transient and recoverable. If that does 15505 * not turn out to be the case, then retries 15506 * will be cleared when the wait queue is 15507 * flushed anyway. 15508 */ 15509 #endif 15510 } 15511 } 15512 } else { 15513 /* 15514 * SD_RETRIES_FAILFAST is clear, which indicates that we 15515 * likely were able to at least establish some level of 15516 * communication with the target and subsequent commands 15517 * and/or retries are likely to get through to the target, 15518 * In this case we want to be aggressive about clearing 15519 * the failfast state. Note that this does not affect 15520 * the "failfast pending" condition. 15521 */ 15522 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15523 } 15524 15525 15526 /* 15527 * Check the specified retry count to see if we can still do 15528 * any retries with this pkt before we should fail it. 15529 */ 15530 switch (retry_check_flag & SD_RETRIES_MASK) { 15531 case SD_RETRIES_VICTIM: 15532 /* 15533 * Check the victim retry count. If exhausted, then fall 15534 * thru & check against the standard retry count. 15535 */ 15536 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15537 /* Increment count & proceed with the retry */ 15538 xp->xb_victim_retry_count++; 15539 break; 15540 } 15541 /* Victim retries exhausted, fall back to std. retries... */ 15542 /* FALLTHRU */ 15543 15544 case SD_RETRIES_STANDARD: 15545 if (xp->xb_retry_count >= un->un_retry_count) { 15546 /* Retries exhausted, fail the command */ 15547 SD_TRACE(SD_LOG_IO_CORE, un, 15548 "sd_retry_command: retries exhausted!\n"); 15549 /* 15550 * update b_resid for failed SCMD_READ & SCMD_WRITE 15551 * commands with nonzero pkt_resid. 15552 */ 15553 if ((pktp->pkt_reason == CMD_CMPLT) && 15554 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15555 (pktp->pkt_resid != 0)) { 15556 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15557 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15558 SD_UPDATE_B_RESID(bp, pktp); 15559 } 15560 } 15561 goto fail_command; 15562 } 15563 xp->xb_retry_count++; 15564 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15565 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15566 break; 15567 15568 case SD_RETRIES_UA: 15569 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15570 /* Retries exhausted, fail the command */ 15571 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15572 "Unit Attention retries exhausted. " 15573 "Check the target.\n"); 15574 goto fail_command; 15575 } 15576 xp->xb_ua_retry_count++; 15577 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15578 "sd_retry_command: retry count:%d\n", 15579 xp->xb_ua_retry_count); 15580 break; 15581 15582 case SD_RETRIES_BUSY: 15583 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15584 /* Retries exhausted, fail the command */ 15585 SD_TRACE(SD_LOG_IO_CORE, un, 15586 "sd_retry_command: retries exhausted!\n"); 15587 goto fail_command; 15588 } 15589 xp->xb_retry_count++; 15590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15591 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15592 break; 15593 15594 case SD_RETRIES_NOCHECK: 15595 default: 15596 /* No retry count to check. Just proceed with the retry */ 15597 break; 15598 } 15599 15600 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15601 15602 /* 15603 * If this is a non-USCSI command being retried 15604 * during execution last time, we should post an ereport with 15605 * driver-assessment of the value "retry". 15606 * For partial DMA, request sense and STATUS_QFULL, there are no 15607 * hardware errors, we bypass ereport posting. 15608 */ 15609 if (failure_code != 0) { 15610 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15611 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15612 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15613 } 15614 } 15615 15616 /* 15617 * If we were given a zero timeout, we must attempt to retry the 15618 * command immediately (ie, without a delay). 15619 */ 15620 if (retry_delay == 0) { 15621 /* 15622 * Check some limiting conditions to see if we can actually 15623 * do the immediate retry. If we cannot, then we must 15624 * fall back to queueing up a delayed retry. 15625 */ 15626 if (un->un_ncmds_in_transport >= un->un_throttle) { 15627 /* 15628 * We are at the throttle limit for the target, 15629 * fall back to delayed retry. 15630 */ 15631 retry_delay = un->un_busy_timeout; 15632 statp = kstat_waitq_enter; 15633 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15634 "sd_retry_command: immed. retry hit " 15635 "throttle!\n"); 15636 } else { 15637 /* 15638 * We're clear to proceed with the immediate retry. 15639 * First call the user-provided function (if any) 15640 */ 15641 if (user_funcp != NULL) { 15642 (*user_funcp)(un, bp, user_arg, 15643 SD_IMMEDIATE_RETRY_ISSUED); 15644 #ifdef __lock_lint 15645 sd_print_incomplete_msg(un, bp, user_arg, 15646 SD_IMMEDIATE_RETRY_ISSUED); 15647 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15648 SD_IMMEDIATE_RETRY_ISSUED); 15649 sd_print_sense_failed_msg(un, bp, user_arg, 15650 SD_IMMEDIATE_RETRY_ISSUED); 15651 #endif 15652 } 15653 15654 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15655 "sd_retry_command: issuing immediate retry\n"); 15656 15657 /* 15658 * Call sd_start_cmds() to transport the command to 15659 * the target. 15660 */ 15661 sd_start_cmds(un, bp); 15662 15663 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15664 "sd_retry_command exit\n"); 15665 return; 15666 } 15667 } 15668 15669 /* 15670 * Set up to retry the command after a delay. 15671 * First call the user-provided function (if any) 15672 */ 15673 if (user_funcp != NULL) { 15674 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15675 } 15676 15677 sd_set_retry_bp(un, bp, retry_delay, statp); 15678 15679 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15680 return; 15681 15682 fail_command: 15683 15684 if (user_funcp != NULL) { 15685 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15686 } 15687 15688 fail_command_no_log: 15689 15690 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15691 "sd_retry_command: returning failed command\n"); 15692 15693 sd_return_failed_command(un, bp, failure_code); 15694 15695 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15696 } 15697 15698 15699 /* 15700 * Function: sd_set_retry_bp 15701 * 15702 * Description: Set up the given bp for retry. 15703 * 15704 * Arguments: un - ptr to associated softstate 15705 * bp - ptr to buf(9S) for the command 15706 * retry_delay - time interval before issuing retry (may be 0) 15707 * statp - optional pointer to kstat function 15708 * 15709 * Context: May be called under interrupt context 15710 */ 15711 15712 static void 15713 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15714 void (*statp)(kstat_io_t *)) 15715 { 15716 ASSERT(un != NULL); 15717 ASSERT(mutex_owned(SD_MUTEX(un))); 15718 ASSERT(bp != NULL); 15719 15720 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15721 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15722 15723 /* 15724 * Indicate that the command is being retried. This will not allow any 15725 * other commands on the wait queue to be transported to the target 15726 * until this command has been completed (success or failure). The 15727 * "retry command" is not transported to the target until the given 15728 * time delay expires, unless the user specified a 0 retry_delay. 15729 * 15730 * Note: the timeout(9F) callback routine is what actually calls 15731 * sd_start_cmds() to transport the command, with the exception of a 15732 * zero retry_delay. The only current implementor of a zero retry delay 15733 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15734 */ 15735 if (un->un_retry_bp == NULL) { 15736 ASSERT(un->un_retry_statp == NULL); 15737 un->un_retry_bp = bp; 15738 15739 /* 15740 * If the user has not specified a delay the command should 15741 * be queued and no timeout should be scheduled. 15742 */ 15743 if (retry_delay == 0) { 15744 /* 15745 * Save the kstat pointer that will be used in the 15746 * call to SD_UPDATE_KSTATS() below, so that 15747 * sd_start_cmds() can correctly decrement the waitq 15748 * count when it is time to transport this command. 15749 */ 15750 un->un_retry_statp = statp; 15751 goto done; 15752 } 15753 } 15754 15755 if (un->un_retry_bp == bp) { 15756 /* 15757 * Save the kstat pointer that will be used in the call to 15758 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15759 * correctly decrement the waitq count when it is time to 15760 * transport this command. 15761 */ 15762 un->un_retry_statp = statp; 15763 15764 /* 15765 * Schedule a timeout if: 15766 * 1) The user has specified a delay. 15767 * 2) There is not a START_STOP_UNIT callback pending. 15768 * 15769 * If no delay has been specified, then it is up to the caller 15770 * to ensure that IO processing continues without stalling. 15771 * Effectively, this means that the caller will issue the 15772 * required call to sd_start_cmds(). The START_STOP_UNIT 15773 * callback does this after the START STOP UNIT command has 15774 * completed. In either of these cases we should not schedule 15775 * a timeout callback here. Also don't schedule the timeout if 15776 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15777 */ 15778 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15779 (un->un_direct_priority_timeid == NULL)) { 15780 un->un_retry_timeid = 15781 timeout(sd_start_retry_command, un, retry_delay); 15782 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15783 "sd_set_retry_bp: setting timeout: un: 0x%p" 15784 " bp:0x%p un_retry_timeid:0x%p\n", 15785 un, bp, un->un_retry_timeid); 15786 } 15787 } else { 15788 /* 15789 * We only get in here if there is already another command 15790 * waiting to be retried. In this case, we just put the 15791 * given command onto the wait queue, so it can be transported 15792 * after the current retry command has completed. 15793 * 15794 * Also we have to make sure that if the command at the head 15795 * of the wait queue is the un_failfast_bp, that we do not 15796 * put ahead of it any other commands that are to be retried. 15797 */ 15798 if ((un->un_failfast_bp != NULL) && 15799 (un->un_failfast_bp == un->un_waitq_headp)) { 15800 /* 15801 * Enqueue this command AFTER the first command on 15802 * the wait queue (which is also un_failfast_bp). 15803 */ 15804 bp->av_forw = un->un_waitq_headp->av_forw; 15805 un->un_waitq_headp->av_forw = bp; 15806 if (un->un_waitq_headp == un->un_waitq_tailp) { 15807 un->un_waitq_tailp = bp; 15808 } 15809 } else { 15810 /* Enqueue this command at the head of the waitq. */ 15811 bp->av_forw = un->un_waitq_headp; 15812 un->un_waitq_headp = bp; 15813 if (un->un_waitq_tailp == NULL) { 15814 un->un_waitq_tailp = bp; 15815 } 15816 } 15817 15818 if (statp == NULL) { 15819 statp = kstat_waitq_enter; 15820 } 15821 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15822 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15823 } 15824 15825 done: 15826 if (statp != NULL) { 15827 SD_UPDATE_KSTATS(un, statp, bp); 15828 } 15829 15830 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15831 "sd_set_retry_bp: exit un:0x%p\n", un); 15832 } 15833 15834 15835 /* 15836 * Function: sd_start_retry_command 15837 * 15838 * Description: Start the command that has been waiting on the target's 15839 * retry queue. Called from timeout(9F) context after the 15840 * retry delay interval has expired. 15841 * 15842 * Arguments: arg - pointer to associated softstate for the device. 15843 * 15844 * Context: timeout(9F) thread context. May not sleep. 15845 */ 15846 15847 static void 15848 sd_start_retry_command(void *arg) 15849 { 15850 struct sd_lun *un = arg; 15851 15852 ASSERT(un != NULL); 15853 ASSERT(!mutex_owned(SD_MUTEX(un))); 15854 15855 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15856 "sd_start_retry_command: entry\n"); 15857 15858 mutex_enter(SD_MUTEX(un)); 15859 15860 un->un_retry_timeid = NULL; 15861 15862 if (un->un_retry_bp != NULL) { 15863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15864 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15865 un, un->un_retry_bp); 15866 sd_start_cmds(un, un->un_retry_bp); 15867 } 15868 15869 mutex_exit(SD_MUTEX(un)); 15870 15871 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15872 "sd_start_retry_command: exit\n"); 15873 } 15874 15875 /* 15876 * Function: sd_rmw_msg_print_handler 15877 * 15878 * Description: If RMW mode is enabled and warning message is triggered 15879 * print I/O count during a fixed interval. 15880 * 15881 * Arguments: arg - pointer to associated softstate for the device. 15882 * 15883 * Context: timeout(9F) thread context. May not sleep. 15884 */ 15885 static void 15886 sd_rmw_msg_print_handler(void *arg) 15887 { 15888 struct sd_lun *un = arg; 15889 15890 ASSERT(un != NULL); 15891 ASSERT(!mutex_owned(SD_MUTEX(un))); 15892 15893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15894 "sd_rmw_msg_print_handler: entry\n"); 15895 15896 mutex_enter(SD_MUTEX(un)); 15897 15898 if (un->un_rmw_incre_count > 0) { 15899 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15900 "%"PRIu64" I/O requests are not aligned with %d disk " 15901 "sector size in %ld seconds. They are handled through " 15902 "Read Modify Write but the performance is very low!\n", 15903 un->un_rmw_incre_count, un->un_tgt_blocksize, 15904 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 15905 un->un_rmw_incre_count = 0; 15906 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 15907 un, SD_RMW_MSG_PRINT_TIMEOUT); 15908 } else { 15909 un->un_rmw_msg_timeid = NULL; 15910 } 15911 15912 mutex_exit(SD_MUTEX(un)); 15913 15914 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15915 "sd_rmw_msg_print_handler: exit\n"); 15916 } 15917 15918 /* 15919 * Function: sd_start_direct_priority_command 15920 * 15921 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15922 * received TRAN_BUSY when we called scsi_transport() to send it 15923 * to the underlying HBA. This function is called from timeout(9F) 15924 * context after the delay interval has expired. 15925 * 15926 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15927 * 15928 * Context: timeout(9F) thread context. May not sleep. 15929 */ 15930 15931 static void 15932 sd_start_direct_priority_command(void *arg) 15933 { 15934 struct buf *priority_bp = arg; 15935 struct sd_lun *un; 15936 15937 ASSERT(priority_bp != NULL); 15938 un = SD_GET_UN(priority_bp); 15939 ASSERT(un != NULL); 15940 ASSERT(!mutex_owned(SD_MUTEX(un))); 15941 15942 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15943 "sd_start_direct_priority_command: entry\n"); 15944 15945 mutex_enter(SD_MUTEX(un)); 15946 un->un_direct_priority_timeid = NULL; 15947 sd_start_cmds(un, priority_bp); 15948 mutex_exit(SD_MUTEX(un)); 15949 15950 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15951 "sd_start_direct_priority_command: exit\n"); 15952 } 15953 15954 15955 /* 15956 * Function: sd_send_request_sense_command 15957 * 15958 * Description: Sends a REQUEST SENSE command to the target 15959 * 15960 * Context: May be called from interrupt context. 15961 */ 15962 15963 static void 15964 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15965 struct scsi_pkt *pktp) 15966 { 15967 ASSERT(bp != NULL); 15968 ASSERT(un != NULL); 15969 ASSERT(mutex_owned(SD_MUTEX(un))); 15970 15971 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15972 "entry: buf:0x%p\n", bp); 15973 15974 /* 15975 * If we are syncing or dumping, then fail the command to avoid a 15976 * recursive callback into scsi_transport(). Also fail the command 15977 * if we are suspended (legacy behavior). 15978 */ 15979 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15980 (un->un_state == SD_STATE_DUMPING)) { 15981 sd_return_failed_command(un, bp, EIO); 15982 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15983 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15984 return; 15985 } 15986 15987 /* 15988 * Retry the failed command and don't issue the request sense if: 15989 * 1) the sense buf is busy 15990 * 2) we have 1 or more outstanding commands on the target 15991 * (the sense data will be cleared or invalidated any way) 15992 * 15993 * Note: There could be an issue with not checking a retry limit here, 15994 * the problem is determining which retry limit to check. 15995 */ 15996 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15997 /* Don't retry if the command is flagged as non-retryable */ 15998 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15999 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16000 NULL, NULL, 0, un->un_busy_timeout, 16001 kstat_waitq_enter); 16002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16003 "sd_send_request_sense_command: " 16004 "at full throttle, retrying exit\n"); 16005 } else { 16006 sd_return_failed_command(un, bp, EIO); 16007 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16008 "sd_send_request_sense_command: " 16009 "at full throttle, non-retryable exit\n"); 16010 } 16011 return; 16012 } 16013 16014 sd_mark_rqs_busy(un, bp); 16015 sd_start_cmds(un, un->un_rqs_bp); 16016 16017 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16018 "sd_send_request_sense_command: exit\n"); 16019 } 16020 16021 16022 /* 16023 * Function: sd_mark_rqs_busy 16024 * 16025 * Description: Indicate that the request sense bp for this instance is 16026 * in use. 16027 * 16028 * Context: May be called under interrupt context 16029 */ 16030 16031 static void 16032 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16033 { 16034 struct sd_xbuf *sense_xp; 16035 16036 ASSERT(un != NULL); 16037 ASSERT(bp != NULL); 16038 ASSERT(mutex_owned(SD_MUTEX(un))); 16039 ASSERT(un->un_sense_isbusy == 0); 16040 16041 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16042 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16043 16044 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16045 ASSERT(sense_xp != NULL); 16046 16047 SD_INFO(SD_LOG_IO, un, 16048 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16049 16050 ASSERT(sense_xp->xb_pktp != NULL); 16051 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16052 == (FLAG_SENSING | FLAG_HEAD)); 16053 16054 un->un_sense_isbusy = 1; 16055 un->un_rqs_bp->b_resid = 0; 16056 sense_xp->xb_pktp->pkt_resid = 0; 16057 sense_xp->xb_pktp->pkt_reason = 0; 16058 16059 /* So we can get back the bp at interrupt time! */ 16060 sense_xp->xb_sense_bp = bp; 16061 16062 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16063 16064 /* 16065 * Mark this buf as awaiting sense data. (This is already set in 16066 * the pkt_flags for the RQS packet.) 16067 */ 16068 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16069 16070 /* Request sense down same path */ 16071 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16072 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16073 sense_xp->xb_pktp->pkt_path_instance = 16074 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16075 16076 sense_xp->xb_retry_count = 0; 16077 sense_xp->xb_victim_retry_count = 0; 16078 sense_xp->xb_ua_retry_count = 0; 16079 sense_xp->xb_nr_retry_count = 0; 16080 sense_xp->xb_dma_resid = 0; 16081 16082 /* Clean up the fields for auto-request sense */ 16083 sense_xp->xb_sense_status = 0; 16084 sense_xp->xb_sense_state = 0; 16085 sense_xp->xb_sense_resid = 0; 16086 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16087 16088 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16089 } 16090 16091 16092 /* 16093 * Function: sd_mark_rqs_idle 16094 * 16095 * Description: SD_MUTEX must be held continuously through this routine 16096 * to prevent reuse of the rqs struct before the caller can 16097 * complete it's processing. 16098 * 16099 * Return Code: Pointer to the RQS buf 16100 * 16101 * Context: May be called under interrupt context 16102 */ 16103 16104 static struct buf * 16105 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16106 { 16107 struct buf *bp; 16108 ASSERT(un != NULL); 16109 ASSERT(sense_xp != NULL); 16110 ASSERT(mutex_owned(SD_MUTEX(un))); 16111 ASSERT(un->un_sense_isbusy != 0); 16112 16113 un->un_sense_isbusy = 0; 16114 bp = sense_xp->xb_sense_bp; 16115 sense_xp->xb_sense_bp = NULL; 16116 16117 /* This pkt is no longer interested in getting sense data */ 16118 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16119 16120 return (bp); 16121 } 16122 16123 16124 16125 /* 16126 * Function: sd_alloc_rqs 16127 * 16128 * Description: Set up the unit to receive auto request sense data 16129 * 16130 * Return Code: DDI_SUCCESS or DDI_FAILURE 16131 * 16132 * Context: Called under attach(9E) context 16133 */ 16134 16135 static int 16136 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16137 { 16138 struct sd_xbuf *xp; 16139 16140 ASSERT(un != NULL); 16141 ASSERT(!mutex_owned(SD_MUTEX(un))); 16142 ASSERT(un->un_rqs_bp == NULL); 16143 ASSERT(un->un_rqs_pktp == NULL); 16144 16145 /* 16146 * First allocate the required buf and scsi_pkt structs, then set up 16147 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16148 */ 16149 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16150 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16151 if (un->un_rqs_bp == NULL) { 16152 return (DDI_FAILURE); 16153 } 16154 16155 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16156 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16157 16158 if (un->un_rqs_pktp == NULL) { 16159 sd_free_rqs(un); 16160 return (DDI_FAILURE); 16161 } 16162 16163 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16164 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16165 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16166 16167 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16168 16169 /* Set up the other needed members in the ARQ scsi_pkt. */ 16170 un->un_rqs_pktp->pkt_comp = sdintr; 16171 un->un_rqs_pktp->pkt_time = sd_io_time; 16172 un->un_rqs_pktp->pkt_flags |= 16173 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16174 16175 /* 16176 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16177 * provide any intpkt, destroypkt routines as we take care of 16178 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16179 */ 16180 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16181 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16182 xp->xb_pktp = un->un_rqs_pktp; 16183 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16184 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16185 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16186 16187 /* 16188 * Save the pointer to the request sense private bp so it can 16189 * be retrieved in sdintr. 16190 */ 16191 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16192 ASSERT(un->un_rqs_bp->b_private == xp); 16193 16194 /* 16195 * See if the HBA supports auto-request sense for the specified 16196 * target/lun. If it does, then try to enable it (if not already 16197 * enabled). 16198 * 16199 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16200 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16201 * return success. However, in both of these cases ARQ is always 16202 * enabled and scsi_ifgetcap will always return true. The best approach 16203 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16204 * 16205 * The 3rd case is the HBA (adp) always return enabled on 16206 * scsi_ifgetgetcap even when it's not enable, the best approach 16207 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16208 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16209 */ 16210 16211 if (un->un_f_is_fibre == TRUE) { 16212 un->un_f_arq_enabled = TRUE; 16213 } else { 16214 #if defined(__i386) || defined(__amd64) 16215 /* 16216 * Circumvent the Adaptec bug, remove this code when 16217 * the bug is fixed 16218 */ 16219 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16220 #endif 16221 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16222 case 0: 16223 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16224 "sd_alloc_rqs: HBA supports ARQ\n"); 16225 /* 16226 * ARQ is supported by this HBA but currently is not 16227 * enabled. Attempt to enable it and if successful then 16228 * mark this instance as ARQ enabled. 16229 */ 16230 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16231 == 1) { 16232 /* Successfully enabled ARQ in the HBA */ 16233 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16234 "sd_alloc_rqs: ARQ enabled\n"); 16235 un->un_f_arq_enabled = TRUE; 16236 } else { 16237 /* Could not enable ARQ in the HBA */ 16238 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16239 "sd_alloc_rqs: failed ARQ enable\n"); 16240 un->un_f_arq_enabled = FALSE; 16241 } 16242 break; 16243 case 1: 16244 /* 16245 * ARQ is supported by this HBA and is already enabled. 16246 * Just mark ARQ as enabled for this instance. 16247 */ 16248 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16249 "sd_alloc_rqs: ARQ already enabled\n"); 16250 un->un_f_arq_enabled = TRUE; 16251 break; 16252 default: 16253 /* 16254 * ARQ is not supported by this HBA; disable it for this 16255 * instance. 16256 */ 16257 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16258 "sd_alloc_rqs: HBA does not support ARQ\n"); 16259 un->un_f_arq_enabled = FALSE; 16260 break; 16261 } 16262 } 16263 16264 return (DDI_SUCCESS); 16265 } 16266 16267 16268 /* 16269 * Function: sd_free_rqs 16270 * 16271 * Description: Cleanup for the pre-instance RQS command. 16272 * 16273 * Context: Kernel thread context 16274 */ 16275 16276 static void 16277 sd_free_rqs(struct sd_lun *un) 16278 { 16279 ASSERT(un != NULL); 16280 16281 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16282 16283 /* 16284 * If consistent memory is bound to a scsi_pkt, the pkt 16285 * has to be destroyed *before* freeing the consistent memory. 16286 * Don't change the sequence of this operations. 16287 * scsi_destroy_pkt() might access memory, which isn't allowed, 16288 * after it was freed in scsi_free_consistent_buf(). 16289 */ 16290 if (un->un_rqs_pktp != NULL) { 16291 scsi_destroy_pkt(un->un_rqs_pktp); 16292 un->un_rqs_pktp = NULL; 16293 } 16294 16295 if (un->un_rqs_bp != NULL) { 16296 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16297 if (xp != NULL) { 16298 kmem_free(xp, sizeof (struct sd_xbuf)); 16299 } 16300 scsi_free_consistent_buf(un->un_rqs_bp); 16301 un->un_rqs_bp = NULL; 16302 } 16303 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16304 } 16305 16306 16307 16308 /* 16309 * Function: sd_reduce_throttle 16310 * 16311 * Description: Reduces the maximum # of outstanding commands on a 16312 * target to the current number of outstanding commands. 16313 * Queues a tiemout(9F) callback to restore the limit 16314 * after a specified interval has elapsed. 16315 * Typically used when we get a TRAN_BUSY return code 16316 * back from scsi_transport(). 16317 * 16318 * Arguments: un - ptr to the sd_lun softstate struct 16319 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16320 * 16321 * Context: May be called from interrupt context 16322 */ 16323 16324 static void 16325 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16326 { 16327 ASSERT(un != NULL); 16328 ASSERT(mutex_owned(SD_MUTEX(un))); 16329 ASSERT(un->un_ncmds_in_transport >= 0); 16330 16331 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16332 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16333 un, un->un_throttle, un->un_ncmds_in_transport); 16334 16335 if (un->un_throttle > 1) { 16336 if (un->un_f_use_adaptive_throttle == TRUE) { 16337 switch (throttle_type) { 16338 case SD_THROTTLE_TRAN_BUSY: 16339 if (un->un_busy_throttle == 0) { 16340 un->un_busy_throttle = un->un_throttle; 16341 } 16342 break; 16343 case SD_THROTTLE_QFULL: 16344 un->un_busy_throttle = 0; 16345 break; 16346 default: 16347 ASSERT(FALSE); 16348 } 16349 16350 if (un->un_ncmds_in_transport > 0) { 16351 un->un_throttle = un->un_ncmds_in_transport; 16352 } 16353 16354 } else { 16355 if (un->un_ncmds_in_transport == 0) { 16356 un->un_throttle = 1; 16357 } else { 16358 un->un_throttle = un->un_ncmds_in_transport; 16359 } 16360 } 16361 } 16362 16363 /* Reschedule the timeout if none is currently active */ 16364 if (un->un_reset_throttle_timeid == NULL) { 16365 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16366 un, SD_THROTTLE_RESET_INTERVAL); 16367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16368 "sd_reduce_throttle: timeout scheduled!\n"); 16369 } 16370 16371 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16372 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16373 } 16374 16375 16376 16377 /* 16378 * Function: sd_restore_throttle 16379 * 16380 * Description: Callback function for timeout(9F). Resets the current 16381 * value of un->un_throttle to its default. 16382 * 16383 * Arguments: arg - pointer to associated softstate for the device. 16384 * 16385 * Context: May be called from interrupt context 16386 */ 16387 16388 static void 16389 sd_restore_throttle(void *arg) 16390 { 16391 struct sd_lun *un = arg; 16392 16393 ASSERT(un != NULL); 16394 ASSERT(!mutex_owned(SD_MUTEX(un))); 16395 16396 mutex_enter(SD_MUTEX(un)); 16397 16398 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16399 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16400 16401 un->un_reset_throttle_timeid = NULL; 16402 16403 if (un->un_f_use_adaptive_throttle == TRUE) { 16404 /* 16405 * If un_busy_throttle is nonzero, then it contains the 16406 * value that un_throttle was when we got a TRAN_BUSY back 16407 * from scsi_transport(). We want to revert back to this 16408 * value. 16409 * 16410 * In the QFULL case, the throttle limit will incrementally 16411 * increase until it reaches max throttle. 16412 */ 16413 if (un->un_busy_throttle > 0) { 16414 un->un_throttle = un->un_busy_throttle; 16415 un->un_busy_throttle = 0; 16416 } else { 16417 /* 16418 * increase throttle by 10% open gate slowly, schedule 16419 * another restore if saved throttle has not been 16420 * reached 16421 */ 16422 short throttle; 16423 if (sd_qfull_throttle_enable) { 16424 throttle = un->un_throttle + 16425 max((un->un_throttle / 10), 1); 16426 un->un_throttle = 16427 (throttle < un->un_saved_throttle) ? 16428 throttle : un->un_saved_throttle; 16429 if (un->un_throttle < un->un_saved_throttle) { 16430 un->un_reset_throttle_timeid = 16431 timeout(sd_restore_throttle, 16432 un, 16433 SD_QFULL_THROTTLE_RESET_INTERVAL); 16434 } 16435 } 16436 } 16437 16438 /* 16439 * If un_throttle has fallen below the low-water mark, we 16440 * restore the maximum value here (and allow it to ratchet 16441 * down again if necessary). 16442 */ 16443 if (un->un_throttle < un->un_min_throttle) { 16444 un->un_throttle = un->un_saved_throttle; 16445 } 16446 } else { 16447 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16448 "restoring limit from 0x%x to 0x%x\n", 16449 un->un_throttle, un->un_saved_throttle); 16450 un->un_throttle = un->un_saved_throttle; 16451 } 16452 16453 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16454 "sd_restore_throttle: calling sd_start_cmds!\n"); 16455 16456 sd_start_cmds(un, NULL); 16457 16458 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16459 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16460 un, un->un_throttle); 16461 16462 mutex_exit(SD_MUTEX(un)); 16463 16464 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16465 } 16466 16467 /* 16468 * Function: sdrunout 16469 * 16470 * Description: Callback routine for scsi_init_pkt when a resource allocation 16471 * fails. 16472 * 16473 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16474 * soft state instance. 16475 * 16476 * Return Code: The scsi_init_pkt routine allows for the callback function to 16477 * return a 0 indicating the callback should be rescheduled or a 1 16478 * indicating not to reschedule. This routine always returns 1 16479 * because the driver always provides a callback function to 16480 * scsi_init_pkt. This results in a callback always being scheduled 16481 * (via the scsi_init_pkt callback implementation) if a resource 16482 * failure occurs. 16483 * 16484 * Context: This callback function may not block or call routines that block 16485 * 16486 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16487 * request persisting at the head of the list which cannot be 16488 * satisfied even after multiple retries. In the future the driver 16489 * may implement some time of maximum runout count before failing 16490 * an I/O. 16491 */ 16492 16493 static int 16494 sdrunout(caddr_t arg) 16495 { 16496 struct sd_lun *un = (struct sd_lun *)arg; 16497 16498 ASSERT(un != NULL); 16499 ASSERT(!mutex_owned(SD_MUTEX(un))); 16500 16501 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16502 16503 mutex_enter(SD_MUTEX(un)); 16504 sd_start_cmds(un, NULL); 16505 mutex_exit(SD_MUTEX(un)); 16506 /* 16507 * This callback routine always returns 1 (i.e. do not reschedule) 16508 * because we always specify sdrunout as the callback handler for 16509 * scsi_init_pkt inside the call to sd_start_cmds. 16510 */ 16511 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16512 return (1); 16513 } 16514 16515 16516 /* 16517 * Function: sdintr 16518 * 16519 * Description: Completion callback routine for scsi_pkt(9S) structs 16520 * sent to the HBA driver via scsi_transport(9F). 16521 * 16522 * Context: Interrupt context 16523 */ 16524 16525 static void 16526 sdintr(struct scsi_pkt *pktp) 16527 { 16528 struct buf *bp; 16529 struct sd_xbuf *xp; 16530 struct sd_lun *un; 16531 size_t actual_len; 16532 sd_ssc_t *sscp; 16533 16534 ASSERT(pktp != NULL); 16535 bp = (struct buf *)pktp->pkt_private; 16536 ASSERT(bp != NULL); 16537 xp = SD_GET_XBUF(bp); 16538 ASSERT(xp != NULL); 16539 ASSERT(xp->xb_pktp != NULL); 16540 un = SD_GET_UN(bp); 16541 ASSERT(un != NULL); 16542 ASSERT(!mutex_owned(SD_MUTEX(un))); 16543 16544 #ifdef SD_FAULT_INJECTION 16545 16546 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16547 /* SD FaultInjection */ 16548 sd_faultinjection(pktp); 16549 16550 #endif /* SD_FAULT_INJECTION */ 16551 16552 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16553 " xp:0x%p, un:0x%p\n", bp, xp, un); 16554 16555 mutex_enter(SD_MUTEX(un)); 16556 16557 ASSERT(un->un_fm_private != NULL); 16558 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16559 ASSERT(sscp != NULL); 16560 16561 /* Reduce the count of the #commands currently in transport */ 16562 un->un_ncmds_in_transport--; 16563 ASSERT(un->un_ncmds_in_transport >= 0); 16564 16565 /* Increment counter to indicate that the callback routine is active */ 16566 un->un_in_callback++; 16567 16568 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16569 16570 #ifdef SDDEBUG 16571 if (bp == un->un_retry_bp) { 16572 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16573 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16574 un, un->un_retry_bp, un->un_ncmds_in_transport); 16575 } 16576 #endif 16577 16578 /* 16579 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16580 * state if needed. 16581 */ 16582 if (pktp->pkt_reason == CMD_DEV_GONE) { 16583 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16584 "Command failed to complete...Device is gone\n"); 16585 if (un->un_mediastate != DKIO_DEV_GONE) { 16586 un->un_mediastate = DKIO_DEV_GONE; 16587 cv_broadcast(&un->un_state_cv); 16588 } 16589 sd_return_failed_command(un, bp, EIO); 16590 goto exit; 16591 } 16592 16593 if (pktp->pkt_state & STATE_XARQ_DONE) { 16594 SD_TRACE(SD_LOG_COMMON, un, 16595 "sdintr: extra sense data received. pkt=%p\n", pktp); 16596 } 16597 16598 /* 16599 * First see if the pkt has auto-request sense data with it.... 16600 * Look at the packet state first so we don't take a performance 16601 * hit looking at the arq enabled flag unless absolutely necessary. 16602 */ 16603 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16604 (un->un_f_arq_enabled == TRUE)) { 16605 /* 16606 * The HBA did an auto request sense for this command so check 16607 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16608 * driver command that should not be retried. 16609 */ 16610 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16611 /* 16612 * Save the relevant sense info into the xp for the 16613 * original cmd. 16614 */ 16615 struct scsi_arq_status *asp; 16616 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16617 xp->xb_sense_status = 16618 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16619 xp->xb_sense_state = asp->sts_rqpkt_state; 16620 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16621 if (pktp->pkt_state & STATE_XARQ_DONE) { 16622 actual_len = MAX_SENSE_LENGTH - 16623 xp->xb_sense_resid; 16624 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16625 MAX_SENSE_LENGTH); 16626 } else { 16627 if (xp->xb_sense_resid > SENSE_LENGTH) { 16628 actual_len = MAX_SENSE_LENGTH - 16629 xp->xb_sense_resid; 16630 } else { 16631 actual_len = SENSE_LENGTH - 16632 xp->xb_sense_resid; 16633 } 16634 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16635 if ((((struct uscsi_cmd *) 16636 (xp->xb_pktinfo))->uscsi_rqlen) > 16637 actual_len) { 16638 xp->xb_sense_resid = 16639 (((struct uscsi_cmd *) 16640 (xp->xb_pktinfo))-> 16641 uscsi_rqlen) - actual_len; 16642 } else { 16643 xp->xb_sense_resid = 0; 16644 } 16645 } 16646 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16647 SENSE_LENGTH); 16648 } 16649 16650 /* fail the command */ 16651 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16652 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16653 sd_return_failed_command(un, bp, EIO); 16654 goto exit; 16655 } 16656 16657 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16658 /* 16659 * We want to either retry or fail this command, so free 16660 * the DMA resources here. If we retry the command then 16661 * the DMA resources will be reallocated in sd_start_cmds(). 16662 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16663 * causes the *entire* transfer to start over again from the 16664 * beginning of the request, even for PARTIAL chunks that 16665 * have already transferred successfully. 16666 */ 16667 if ((un->un_f_is_fibre == TRUE) && 16668 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16669 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16670 scsi_dmafree(pktp); 16671 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16672 } 16673 #endif 16674 16675 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16676 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16677 16678 sd_handle_auto_request_sense(un, bp, xp, pktp); 16679 goto exit; 16680 } 16681 16682 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16683 if (pktp->pkt_flags & FLAG_SENSING) { 16684 /* This pktp is from the unit's REQUEST_SENSE command */ 16685 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16686 "sdintr: sd_handle_request_sense\n"); 16687 sd_handle_request_sense(un, bp, xp, pktp); 16688 goto exit; 16689 } 16690 16691 /* 16692 * Check to see if the command successfully completed as requested; 16693 * this is the most common case (and also the hot performance path). 16694 * 16695 * Requirements for successful completion are: 16696 * pkt_reason is CMD_CMPLT and packet status is status good. 16697 * In addition: 16698 * - A residual of zero indicates successful completion no matter what 16699 * the command is. 16700 * - If the residual is not zero and the command is not a read or 16701 * write, then it's still defined as successful completion. In other 16702 * words, if the command is a read or write the residual must be 16703 * zero for successful completion. 16704 * - If the residual is not zero and the command is a read or 16705 * write, and it's a USCSICMD, then it's still defined as 16706 * successful completion. 16707 */ 16708 if ((pktp->pkt_reason == CMD_CMPLT) && 16709 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16710 16711 /* 16712 * Since this command is returned with a good status, we 16713 * can reset the count for Sonoma failover. 16714 */ 16715 un->un_sonoma_failure_count = 0; 16716 16717 /* 16718 * Return all USCSI commands on good status 16719 */ 16720 if (pktp->pkt_resid == 0) { 16721 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16722 "sdintr: returning command for resid == 0\n"); 16723 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16724 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16725 SD_UPDATE_B_RESID(bp, pktp); 16726 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16727 "sdintr: returning command for resid != 0\n"); 16728 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16729 SD_UPDATE_B_RESID(bp, pktp); 16730 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16731 "sdintr: returning uscsi command\n"); 16732 } else { 16733 goto not_successful; 16734 } 16735 sd_return_command(un, bp); 16736 16737 /* 16738 * Decrement counter to indicate that the callback routine 16739 * is done. 16740 */ 16741 un->un_in_callback--; 16742 ASSERT(un->un_in_callback >= 0); 16743 mutex_exit(SD_MUTEX(un)); 16744 16745 return; 16746 } 16747 16748 not_successful: 16749 16750 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16751 /* 16752 * The following is based upon knowledge of the underlying transport 16753 * and its use of DMA resources. This code should be removed when 16754 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16755 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16756 * and sd_start_cmds(). 16757 * 16758 * Free any DMA resources associated with this command if there 16759 * is a chance it could be retried or enqueued for later retry. 16760 * If we keep the DMA binding then mpxio cannot reissue the 16761 * command on another path whenever a path failure occurs. 16762 * 16763 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16764 * causes the *entire* transfer to start over again from the 16765 * beginning of the request, even for PARTIAL chunks that 16766 * have already transferred successfully. 16767 * 16768 * This is only done for non-uscsi commands (and also skipped for the 16769 * driver's internal RQS command). Also just do this for Fibre Channel 16770 * devices as these are the only ones that support mpxio. 16771 */ 16772 if ((un->un_f_is_fibre == TRUE) && 16773 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16774 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16775 scsi_dmafree(pktp); 16776 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16777 } 16778 #endif 16779 16780 /* 16781 * The command did not successfully complete as requested so check 16782 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16783 * driver command that should not be retried so just return. If 16784 * FLAG_DIAGNOSE is not set the error will be processed below. 16785 */ 16786 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16787 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16788 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16789 /* 16790 * Issue a request sense if a check condition caused the error 16791 * (we handle the auto request sense case above), otherwise 16792 * just fail the command. 16793 */ 16794 if ((pktp->pkt_reason == CMD_CMPLT) && 16795 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16796 sd_send_request_sense_command(un, bp, pktp); 16797 } else { 16798 sd_return_failed_command(un, bp, EIO); 16799 } 16800 goto exit; 16801 } 16802 16803 /* 16804 * The command did not successfully complete as requested so process 16805 * the error, retry, and/or attempt recovery. 16806 */ 16807 switch (pktp->pkt_reason) { 16808 case CMD_CMPLT: 16809 switch (SD_GET_PKT_STATUS(pktp)) { 16810 case STATUS_GOOD: 16811 /* 16812 * The command completed successfully with a non-zero 16813 * residual 16814 */ 16815 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16816 "sdintr: STATUS_GOOD \n"); 16817 sd_pkt_status_good(un, bp, xp, pktp); 16818 break; 16819 16820 case STATUS_CHECK: 16821 case STATUS_TERMINATED: 16822 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16823 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16824 sd_pkt_status_check_condition(un, bp, xp, pktp); 16825 break; 16826 16827 case STATUS_BUSY: 16828 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16829 "sdintr: STATUS_BUSY\n"); 16830 sd_pkt_status_busy(un, bp, xp, pktp); 16831 break; 16832 16833 case STATUS_RESERVATION_CONFLICT: 16834 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16835 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16836 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16837 break; 16838 16839 case STATUS_QFULL: 16840 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16841 "sdintr: STATUS_QFULL\n"); 16842 sd_pkt_status_qfull(un, bp, xp, pktp); 16843 break; 16844 16845 case STATUS_MET: 16846 case STATUS_INTERMEDIATE: 16847 case STATUS_SCSI2: 16848 case STATUS_INTERMEDIATE_MET: 16849 case STATUS_ACA_ACTIVE: 16850 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16851 "Unexpected SCSI status received: 0x%x\n", 16852 SD_GET_PKT_STATUS(pktp)); 16853 /* 16854 * Mark the ssc_flags when detected invalid status 16855 * code for non-USCSI command. 16856 */ 16857 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16858 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16859 0, "stat-code"); 16860 } 16861 sd_return_failed_command(un, bp, EIO); 16862 break; 16863 16864 default: 16865 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16866 "Invalid SCSI status received: 0x%x\n", 16867 SD_GET_PKT_STATUS(pktp)); 16868 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16869 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16870 0, "stat-code"); 16871 } 16872 sd_return_failed_command(un, bp, EIO); 16873 break; 16874 16875 } 16876 break; 16877 16878 case CMD_INCOMPLETE: 16879 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16880 "sdintr: CMD_INCOMPLETE\n"); 16881 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16882 break; 16883 case CMD_TRAN_ERR: 16884 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16885 "sdintr: CMD_TRAN_ERR\n"); 16886 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16887 break; 16888 case CMD_RESET: 16889 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16890 "sdintr: CMD_RESET \n"); 16891 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16892 break; 16893 case CMD_ABORTED: 16894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16895 "sdintr: CMD_ABORTED \n"); 16896 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16897 break; 16898 case CMD_TIMEOUT: 16899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16900 "sdintr: CMD_TIMEOUT\n"); 16901 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16902 break; 16903 case CMD_UNX_BUS_FREE: 16904 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16905 "sdintr: CMD_UNX_BUS_FREE \n"); 16906 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16907 break; 16908 case CMD_TAG_REJECT: 16909 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16910 "sdintr: CMD_TAG_REJECT\n"); 16911 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16912 break; 16913 default: 16914 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16915 "sdintr: default\n"); 16916 /* 16917 * Mark the ssc_flags for detecting invliad pkt_reason. 16918 */ 16919 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16920 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16921 0, "pkt-reason"); 16922 } 16923 sd_pkt_reason_default(un, bp, xp, pktp); 16924 break; 16925 } 16926 16927 exit: 16928 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16929 16930 /* Decrement counter to indicate that the callback routine is done. */ 16931 un->un_in_callback--; 16932 ASSERT(un->un_in_callback >= 0); 16933 16934 /* 16935 * At this point, the pkt has been dispatched, ie, it is either 16936 * being re-tried or has been returned to its caller and should 16937 * not be referenced. 16938 */ 16939 16940 mutex_exit(SD_MUTEX(un)); 16941 } 16942 16943 16944 /* 16945 * Function: sd_print_incomplete_msg 16946 * 16947 * Description: Prints the error message for a CMD_INCOMPLETE error. 16948 * 16949 * Arguments: un - ptr to associated softstate for the device. 16950 * bp - ptr to the buf(9S) for the command. 16951 * arg - message string ptr 16952 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16953 * or SD_NO_RETRY_ISSUED. 16954 * 16955 * Context: May be called under interrupt context 16956 */ 16957 16958 static void 16959 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16960 { 16961 struct scsi_pkt *pktp; 16962 char *msgp; 16963 char *cmdp = arg; 16964 16965 ASSERT(un != NULL); 16966 ASSERT(mutex_owned(SD_MUTEX(un))); 16967 ASSERT(bp != NULL); 16968 ASSERT(arg != NULL); 16969 pktp = SD_GET_PKTP(bp); 16970 ASSERT(pktp != NULL); 16971 16972 switch (code) { 16973 case SD_DELAYED_RETRY_ISSUED: 16974 case SD_IMMEDIATE_RETRY_ISSUED: 16975 msgp = "retrying"; 16976 break; 16977 case SD_NO_RETRY_ISSUED: 16978 default: 16979 msgp = "giving up"; 16980 break; 16981 } 16982 16983 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16984 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16985 "incomplete %s- %s\n", cmdp, msgp); 16986 } 16987 } 16988 16989 16990 16991 /* 16992 * Function: sd_pkt_status_good 16993 * 16994 * Description: Processing for a STATUS_GOOD code in pkt_status. 16995 * 16996 * Context: May be called under interrupt context 16997 */ 16998 16999 static void 17000 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17001 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17002 { 17003 char *cmdp; 17004 17005 ASSERT(un != NULL); 17006 ASSERT(mutex_owned(SD_MUTEX(un))); 17007 ASSERT(bp != NULL); 17008 ASSERT(xp != NULL); 17009 ASSERT(pktp != NULL); 17010 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17011 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17012 ASSERT(pktp->pkt_resid != 0); 17013 17014 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17015 17016 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17017 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17018 case SCMD_READ: 17019 cmdp = "read"; 17020 break; 17021 case SCMD_WRITE: 17022 cmdp = "write"; 17023 break; 17024 default: 17025 SD_UPDATE_B_RESID(bp, pktp); 17026 sd_return_command(un, bp); 17027 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17028 return; 17029 } 17030 17031 /* 17032 * See if we can retry the read/write, preferrably immediately. 17033 * If retries are exhaused, then sd_retry_command() will update 17034 * the b_resid count. 17035 */ 17036 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17037 cmdp, EIO, (clock_t)0, NULL); 17038 17039 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17040 } 17041 17042 17043 17044 17045 17046 /* 17047 * Function: sd_handle_request_sense 17048 * 17049 * Description: Processing for non-auto Request Sense command. 17050 * 17051 * Arguments: un - ptr to associated softstate 17052 * sense_bp - ptr to buf(9S) for the RQS command 17053 * sense_xp - ptr to the sd_xbuf for the RQS command 17054 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17055 * 17056 * Context: May be called under interrupt context 17057 */ 17058 17059 static void 17060 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17061 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17062 { 17063 struct buf *cmd_bp; /* buf for the original command */ 17064 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17065 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17066 size_t actual_len; /* actual sense data length */ 17067 17068 ASSERT(un != NULL); 17069 ASSERT(mutex_owned(SD_MUTEX(un))); 17070 ASSERT(sense_bp != NULL); 17071 ASSERT(sense_xp != NULL); 17072 ASSERT(sense_pktp != NULL); 17073 17074 /* 17075 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17076 * RQS command and not the original command. 17077 */ 17078 ASSERT(sense_pktp == un->un_rqs_pktp); 17079 ASSERT(sense_bp == un->un_rqs_bp); 17080 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17081 (FLAG_SENSING | FLAG_HEAD)); 17082 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17083 FLAG_SENSING) == FLAG_SENSING); 17084 17085 /* These are the bp, xp, and pktp for the original command */ 17086 cmd_bp = sense_xp->xb_sense_bp; 17087 cmd_xp = SD_GET_XBUF(cmd_bp); 17088 cmd_pktp = SD_GET_PKTP(cmd_bp); 17089 17090 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17091 /* 17092 * The REQUEST SENSE command failed. Release the REQUEST 17093 * SENSE command for re-use, get back the bp for the original 17094 * command, and attempt to re-try the original command if 17095 * FLAG_DIAGNOSE is not set in the original packet. 17096 */ 17097 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17098 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17099 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17100 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17101 NULL, NULL, EIO, (clock_t)0, NULL); 17102 return; 17103 } 17104 } 17105 17106 /* 17107 * Save the relevant sense info into the xp for the original cmd. 17108 * 17109 * Note: if the request sense failed the state info will be zero 17110 * as set in sd_mark_rqs_busy() 17111 */ 17112 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17113 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17114 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17115 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17116 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17117 SENSE_LENGTH)) { 17118 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17119 MAX_SENSE_LENGTH); 17120 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17121 } else { 17122 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17123 SENSE_LENGTH); 17124 if (actual_len < SENSE_LENGTH) { 17125 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17126 } else { 17127 cmd_xp->xb_sense_resid = 0; 17128 } 17129 } 17130 17131 /* 17132 * Free up the RQS command.... 17133 * NOTE: 17134 * Must do this BEFORE calling sd_validate_sense_data! 17135 * sd_validate_sense_data may return the original command in 17136 * which case the pkt will be freed and the flags can no 17137 * longer be touched. 17138 * SD_MUTEX is held through this process until the command 17139 * is dispatched based upon the sense data, so there are 17140 * no race conditions. 17141 */ 17142 (void) sd_mark_rqs_idle(un, sense_xp); 17143 17144 /* 17145 * For a retryable command see if we have valid sense data, if so then 17146 * turn it over to sd_decode_sense() to figure out the right course of 17147 * action. Just fail a non-retryable command. 17148 */ 17149 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17150 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17151 SD_SENSE_DATA_IS_VALID) { 17152 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17153 } 17154 } else { 17155 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17156 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17157 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17158 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17159 sd_return_failed_command(un, cmd_bp, EIO); 17160 } 17161 } 17162 17163 17164 17165 17166 /* 17167 * Function: sd_handle_auto_request_sense 17168 * 17169 * Description: Processing for auto-request sense information. 17170 * 17171 * Arguments: un - ptr to associated softstate 17172 * bp - ptr to buf(9S) for the command 17173 * xp - ptr to the sd_xbuf for the command 17174 * pktp - ptr to the scsi_pkt(9S) for the command 17175 * 17176 * Context: May be called under interrupt context 17177 */ 17178 17179 static void 17180 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17181 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17182 { 17183 struct scsi_arq_status *asp; 17184 size_t actual_len; 17185 17186 ASSERT(un != NULL); 17187 ASSERT(mutex_owned(SD_MUTEX(un))); 17188 ASSERT(bp != NULL); 17189 ASSERT(xp != NULL); 17190 ASSERT(pktp != NULL); 17191 ASSERT(pktp != un->un_rqs_pktp); 17192 ASSERT(bp != un->un_rqs_bp); 17193 17194 /* 17195 * For auto-request sense, we get a scsi_arq_status back from 17196 * the HBA, with the sense data in the sts_sensedata member. 17197 * The pkt_scbp of the packet points to this scsi_arq_status. 17198 */ 17199 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17200 17201 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17202 /* 17203 * The auto REQUEST SENSE failed; see if we can re-try 17204 * the original command. 17205 */ 17206 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17207 "auto request sense failed (reason=%s)\n", 17208 scsi_rname(asp->sts_rqpkt_reason)); 17209 17210 sd_reset_target(un, pktp); 17211 17212 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17213 NULL, NULL, EIO, (clock_t)0, NULL); 17214 return; 17215 } 17216 17217 /* Save the relevant sense info into the xp for the original cmd. */ 17218 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17219 xp->xb_sense_state = asp->sts_rqpkt_state; 17220 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17221 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17222 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17223 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17224 MAX_SENSE_LENGTH); 17225 } else { 17226 if (xp->xb_sense_resid > SENSE_LENGTH) { 17227 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17228 } else { 17229 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17230 } 17231 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17232 if ((((struct uscsi_cmd *) 17233 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17234 xp->xb_sense_resid = (((struct uscsi_cmd *) 17235 (xp->xb_pktinfo))->uscsi_rqlen) - 17236 actual_len; 17237 } else { 17238 xp->xb_sense_resid = 0; 17239 } 17240 } 17241 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17242 } 17243 17244 /* 17245 * See if we have valid sense data, if so then turn it over to 17246 * sd_decode_sense() to figure out the right course of action. 17247 */ 17248 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17249 SD_SENSE_DATA_IS_VALID) { 17250 sd_decode_sense(un, bp, xp, pktp); 17251 } 17252 } 17253 17254 17255 /* 17256 * Function: sd_print_sense_failed_msg 17257 * 17258 * Description: Print log message when RQS has failed. 17259 * 17260 * Arguments: un - ptr to associated softstate 17261 * bp - ptr to buf(9S) for the command 17262 * arg - generic message string ptr 17263 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17264 * or SD_NO_RETRY_ISSUED 17265 * 17266 * Context: May be called from interrupt context 17267 */ 17268 17269 static void 17270 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17271 int code) 17272 { 17273 char *msgp = arg; 17274 17275 ASSERT(un != NULL); 17276 ASSERT(mutex_owned(SD_MUTEX(un))); 17277 ASSERT(bp != NULL); 17278 17279 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17280 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17281 } 17282 } 17283 17284 17285 /* 17286 * Function: sd_validate_sense_data 17287 * 17288 * Description: Check the given sense data for validity. 17289 * If the sense data is not valid, the command will 17290 * be either failed or retried! 17291 * 17292 * Return Code: SD_SENSE_DATA_IS_INVALID 17293 * SD_SENSE_DATA_IS_VALID 17294 * 17295 * Context: May be called from interrupt context 17296 */ 17297 17298 static int 17299 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17300 size_t actual_len) 17301 { 17302 struct scsi_extended_sense *esp; 17303 struct scsi_pkt *pktp; 17304 char *msgp = NULL; 17305 sd_ssc_t *sscp; 17306 17307 ASSERT(un != NULL); 17308 ASSERT(mutex_owned(SD_MUTEX(un))); 17309 ASSERT(bp != NULL); 17310 ASSERT(bp != un->un_rqs_bp); 17311 ASSERT(xp != NULL); 17312 ASSERT(un->un_fm_private != NULL); 17313 17314 pktp = SD_GET_PKTP(bp); 17315 ASSERT(pktp != NULL); 17316 17317 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17318 ASSERT(sscp != NULL); 17319 17320 /* 17321 * Check the status of the RQS command (auto or manual). 17322 */ 17323 switch (xp->xb_sense_status & STATUS_MASK) { 17324 case STATUS_GOOD: 17325 break; 17326 17327 case STATUS_RESERVATION_CONFLICT: 17328 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17329 return (SD_SENSE_DATA_IS_INVALID); 17330 17331 case STATUS_BUSY: 17332 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17333 "Busy Status on REQUEST SENSE\n"); 17334 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17335 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17336 return (SD_SENSE_DATA_IS_INVALID); 17337 17338 case STATUS_QFULL: 17339 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17340 "QFULL Status on REQUEST SENSE\n"); 17341 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17342 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17343 return (SD_SENSE_DATA_IS_INVALID); 17344 17345 case STATUS_CHECK: 17346 case STATUS_TERMINATED: 17347 msgp = "Check Condition on REQUEST SENSE\n"; 17348 goto sense_failed; 17349 17350 default: 17351 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17352 goto sense_failed; 17353 } 17354 17355 /* 17356 * See if we got the minimum required amount of sense data. 17357 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17358 * or less. 17359 */ 17360 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17361 (actual_len == 0)) { 17362 msgp = "Request Sense couldn't get sense data\n"; 17363 goto sense_failed; 17364 } 17365 17366 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17367 msgp = "Not enough sense information\n"; 17368 /* Mark the ssc_flags for detecting invalid sense data */ 17369 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17370 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17371 "sense-data"); 17372 } 17373 goto sense_failed; 17374 } 17375 17376 /* 17377 * We require the extended sense data 17378 */ 17379 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17380 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17381 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17382 static char tmp[8]; 17383 static char buf[148]; 17384 char *p = (char *)(xp->xb_sense_data); 17385 int i; 17386 17387 mutex_enter(&sd_sense_mutex); 17388 (void) strcpy(buf, "undecodable sense information:"); 17389 for (i = 0; i < actual_len; i++) { 17390 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17391 (void) strcpy(&buf[strlen(buf)], tmp); 17392 } 17393 i = strlen(buf); 17394 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17395 17396 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17397 scsi_log(SD_DEVINFO(un), sd_label, 17398 CE_WARN, buf); 17399 } 17400 mutex_exit(&sd_sense_mutex); 17401 } 17402 17403 /* Mark the ssc_flags for detecting invalid sense data */ 17404 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17405 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17406 "sense-data"); 17407 } 17408 17409 /* Note: Legacy behavior, fail the command with no retry */ 17410 sd_return_failed_command(un, bp, EIO); 17411 return (SD_SENSE_DATA_IS_INVALID); 17412 } 17413 17414 /* 17415 * Check that es_code is valid (es_class concatenated with es_code 17416 * make up the "response code" field. es_class will always be 7, so 17417 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17418 * format. 17419 */ 17420 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17421 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17422 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17423 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17424 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17425 /* Mark the ssc_flags for detecting invalid sense data */ 17426 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17427 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17428 "sense-data"); 17429 } 17430 goto sense_failed; 17431 } 17432 17433 return (SD_SENSE_DATA_IS_VALID); 17434 17435 sense_failed: 17436 /* 17437 * If the request sense failed (for whatever reason), attempt 17438 * to retry the original command. 17439 */ 17440 #if defined(__i386) || defined(__amd64) 17441 /* 17442 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17443 * sddef.h for Sparc platform, and x86 uses 1 binary 17444 * for both SCSI/FC. 17445 * The SD_RETRY_DELAY value need to be adjusted here 17446 * when SD_RETRY_DELAY change in sddef.h 17447 */ 17448 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17449 sd_print_sense_failed_msg, msgp, EIO, 17450 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17451 #else 17452 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17453 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17454 #endif 17455 17456 return (SD_SENSE_DATA_IS_INVALID); 17457 } 17458 17459 /* 17460 * Function: sd_decode_sense 17461 * 17462 * Description: Take recovery action(s) when SCSI Sense Data is received. 17463 * 17464 * Context: Interrupt context. 17465 */ 17466 17467 static void 17468 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17469 struct scsi_pkt *pktp) 17470 { 17471 uint8_t sense_key; 17472 17473 ASSERT(un != NULL); 17474 ASSERT(mutex_owned(SD_MUTEX(un))); 17475 ASSERT(bp != NULL); 17476 ASSERT(bp != un->un_rqs_bp); 17477 ASSERT(xp != NULL); 17478 ASSERT(pktp != NULL); 17479 17480 sense_key = scsi_sense_key(xp->xb_sense_data); 17481 17482 switch (sense_key) { 17483 case KEY_NO_SENSE: 17484 sd_sense_key_no_sense(un, bp, xp, pktp); 17485 break; 17486 case KEY_RECOVERABLE_ERROR: 17487 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17488 bp, xp, pktp); 17489 break; 17490 case KEY_NOT_READY: 17491 sd_sense_key_not_ready(un, xp->xb_sense_data, 17492 bp, xp, pktp); 17493 break; 17494 case KEY_MEDIUM_ERROR: 17495 case KEY_HARDWARE_ERROR: 17496 sd_sense_key_medium_or_hardware_error(un, 17497 xp->xb_sense_data, bp, xp, pktp); 17498 break; 17499 case KEY_ILLEGAL_REQUEST: 17500 sd_sense_key_illegal_request(un, bp, xp, pktp); 17501 break; 17502 case KEY_UNIT_ATTENTION: 17503 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17504 bp, xp, pktp); 17505 break; 17506 case KEY_WRITE_PROTECT: 17507 case KEY_VOLUME_OVERFLOW: 17508 case KEY_MISCOMPARE: 17509 sd_sense_key_fail_command(un, bp, xp, pktp); 17510 break; 17511 case KEY_BLANK_CHECK: 17512 sd_sense_key_blank_check(un, bp, xp, pktp); 17513 break; 17514 case KEY_ABORTED_COMMAND: 17515 sd_sense_key_aborted_command(un, bp, xp, pktp); 17516 break; 17517 case KEY_VENDOR_UNIQUE: 17518 case KEY_COPY_ABORTED: 17519 case KEY_EQUAL: 17520 case KEY_RESERVED: 17521 default: 17522 sd_sense_key_default(un, xp->xb_sense_data, 17523 bp, xp, pktp); 17524 break; 17525 } 17526 } 17527 17528 17529 /* 17530 * Function: sd_dump_memory 17531 * 17532 * Description: Debug logging routine to print the contents of a user provided 17533 * buffer. The output of the buffer is broken up into 256 byte 17534 * segments due to a size constraint of the scsi_log. 17535 * implementation. 17536 * 17537 * Arguments: un - ptr to softstate 17538 * comp - component mask 17539 * title - "title" string to preceed data when printed 17540 * data - ptr to data block to be printed 17541 * len - size of data block to be printed 17542 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17543 * 17544 * Context: May be called from interrupt context 17545 */ 17546 17547 #define SD_DUMP_MEMORY_BUF_SIZE 256 17548 17549 static char *sd_dump_format_string[] = { 17550 " 0x%02x", 17551 " %c" 17552 }; 17553 17554 static void 17555 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17556 int len, int fmt) 17557 { 17558 int i, j; 17559 int avail_count; 17560 int start_offset; 17561 int end_offset; 17562 size_t entry_len; 17563 char *bufp; 17564 char *local_buf; 17565 char *format_string; 17566 17567 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17568 17569 /* 17570 * In the debug version of the driver, this function is called from a 17571 * number of places which are NOPs in the release driver. 17572 * The debug driver therefore has additional methods of filtering 17573 * debug output. 17574 */ 17575 #ifdef SDDEBUG 17576 /* 17577 * In the debug version of the driver we can reduce the amount of debug 17578 * messages by setting sd_error_level to something other than 17579 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17580 * sd_component_mask. 17581 */ 17582 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17583 (sd_error_level != SCSI_ERR_ALL)) { 17584 return; 17585 } 17586 if (((sd_component_mask & comp) == 0) || 17587 (sd_error_level != SCSI_ERR_ALL)) { 17588 return; 17589 } 17590 #else 17591 if (sd_error_level != SCSI_ERR_ALL) { 17592 return; 17593 } 17594 #endif 17595 17596 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17597 bufp = local_buf; 17598 /* 17599 * Available length is the length of local_buf[], minus the 17600 * length of the title string, minus one for the ":", minus 17601 * one for the newline, minus one for the NULL terminator. 17602 * This gives the #bytes available for holding the printed 17603 * values from the given data buffer. 17604 */ 17605 if (fmt == SD_LOG_HEX) { 17606 format_string = sd_dump_format_string[0]; 17607 } else /* SD_LOG_CHAR */ { 17608 format_string = sd_dump_format_string[1]; 17609 } 17610 /* 17611 * Available count is the number of elements from the given 17612 * data buffer that we can fit into the available length. 17613 * This is based upon the size of the format string used. 17614 * Make one entry and find it's size. 17615 */ 17616 (void) sprintf(bufp, format_string, data[0]); 17617 entry_len = strlen(bufp); 17618 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17619 17620 j = 0; 17621 while (j < len) { 17622 bufp = local_buf; 17623 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17624 start_offset = j; 17625 17626 end_offset = start_offset + avail_count; 17627 17628 (void) sprintf(bufp, "%s:", title); 17629 bufp += strlen(bufp); 17630 for (i = start_offset; ((i < end_offset) && (j < len)); 17631 i++, j++) { 17632 (void) sprintf(bufp, format_string, data[i]); 17633 bufp += entry_len; 17634 } 17635 (void) sprintf(bufp, "\n"); 17636 17637 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17638 } 17639 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17640 } 17641 17642 /* 17643 * Function: sd_print_sense_msg 17644 * 17645 * Description: Log a message based upon the given sense data. 17646 * 17647 * Arguments: un - ptr to associated softstate 17648 * bp - ptr to buf(9S) for the command 17649 * arg - ptr to associate sd_sense_info struct 17650 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17651 * or SD_NO_RETRY_ISSUED 17652 * 17653 * Context: May be called from interrupt context 17654 */ 17655 17656 static void 17657 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17658 { 17659 struct sd_xbuf *xp; 17660 struct scsi_pkt *pktp; 17661 uint8_t *sensep; 17662 daddr_t request_blkno; 17663 diskaddr_t err_blkno; 17664 int severity; 17665 int pfa_flag; 17666 extern struct scsi_key_strings scsi_cmds[]; 17667 17668 ASSERT(un != NULL); 17669 ASSERT(mutex_owned(SD_MUTEX(un))); 17670 ASSERT(bp != NULL); 17671 xp = SD_GET_XBUF(bp); 17672 ASSERT(xp != NULL); 17673 pktp = SD_GET_PKTP(bp); 17674 ASSERT(pktp != NULL); 17675 ASSERT(arg != NULL); 17676 17677 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17678 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17679 17680 if ((code == SD_DELAYED_RETRY_ISSUED) || 17681 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17682 severity = SCSI_ERR_RETRYABLE; 17683 } 17684 17685 /* Use absolute block number for the request block number */ 17686 request_blkno = xp->xb_blkno; 17687 17688 /* 17689 * Now try to get the error block number from the sense data 17690 */ 17691 sensep = xp->xb_sense_data; 17692 17693 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17694 (uint64_t *)&err_blkno)) { 17695 /* 17696 * We retrieved the error block number from the information 17697 * portion of the sense data. 17698 * 17699 * For USCSI commands we are better off using the error 17700 * block no. as the requested block no. (This is the best 17701 * we can estimate.) 17702 */ 17703 if ((SD_IS_BUFIO(xp) == FALSE) && 17704 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17705 request_blkno = err_blkno; 17706 } 17707 } else { 17708 /* 17709 * Without the es_valid bit set (for fixed format) or an 17710 * information descriptor (for descriptor format) we cannot 17711 * be certain of the error blkno, so just use the 17712 * request_blkno. 17713 */ 17714 err_blkno = (diskaddr_t)request_blkno; 17715 } 17716 17717 /* 17718 * The following will log the buffer contents for the release driver 17719 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17720 * level is set to verbose. 17721 */ 17722 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17723 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17724 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17725 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17726 17727 if (pfa_flag == FALSE) { 17728 /* This is normally only set for USCSI */ 17729 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17730 return; 17731 } 17732 17733 if ((SD_IS_BUFIO(xp) == TRUE) && 17734 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17735 (severity < sd_error_level))) { 17736 return; 17737 } 17738 } 17739 /* 17740 * Check for Sonoma Failover and keep a count of how many failed I/O's 17741 */ 17742 if ((SD_IS_LSI(un)) && 17743 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17744 (scsi_sense_asc(sensep) == 0x94) && 17745 (scsi_sense_ascq(sensep) == 0x01)) { 17746 un->un_sonoma_failure_count++; 17747 if (un->un_sonoma_failure_count > 1) { 17748 return; 17749 } 17750 } 17751 17752 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17753 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17754 (pktp->pkt_resid == 0))) { 17755 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17756 request_blkno, err_blkno, scsi_cmds, 17757 (struct scsi_extended_sense *)sensep, 17758 un->un_additional_codes, NULL); 17759 } 17760 } 17761 17762 /* 17763 * Function: sd_sense_key_no_sense 17764 * 17765 * Description: Recovery action when sense data was not received. 17766 * 17767 * Context: May be called from interrupt context 17768 */ 17769 17770 static void 17771 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17772 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17773 { 17774 struct sd_sense_info si; 17775 17776 ASSERT(un != NULL); 17777 ASSERT(mutex_owned(SD_MUTEX(un))); 17778 ASSERT(bp != NULL); 17779 ASSERT(xp != NULL); 17780 ASSERT(pktp != NULL); 17781 17782 si.ssi_severity = SCSI_ERR_FATAL; 17783 si.ssi_pfa_flag = FALSE; 17784 17785 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17786 17787 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17788 &si, EIO, (clock_t)0, NULL); 17789 } 17790 17791 17792 /* 17793 * Function: sd_sense_key_recoverable_error 17794 * 17795 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17796 * 17797 * Context: May be called from interrupt context 17798 */ 17799 17800 static void 17801 sd_sense_key_recoverable_error(struct sd_lun *un, 17802 uint8_t *sense_datap, 17803 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17804 { 17805 struct sd_sense_info si; 17806 uint8_t asc = scsi_sense_asc(sense_datap); 17807 17808 ASSERT(un != NULL); 17809 ASSERT(mutex_owned(SD_MUTEX(un))); 17810 ASSERT(bp != NULL); 17811 ASSERT(xp != NULL); 17812 ASSERT(pktp != NULL); 17813 17814 /* 17815 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17816 */ 17817 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17818 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17819 si.ssi_severity = SCSI_ERR_INFO; 17820 si.ssi_pfa_flag = TRUE; 17821 } else { 17822 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17823 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17824 si.ssi_severity = SCSI_ERR_RECOVERED; 17825 si.ssi_pfa_flag = FALSE; 17826 } 17827 17828 if (pktp->pkt_resid == 0) { 17829 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17830 sd_return_command(un, bp); 17831 return; 17832 } 17833 17834 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17835 &si, EIO, (clock_t)0, NULL); 17836 } 17837 17838 17839 17840 17841 /* 17842 * Function: sd_sense_key_not_ready 17843 * 17844 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17845 * 17846 * Context: May be called from interrupt context 17847 */ 17848 17849 static void 17850 sd_sense_key_not_ready(struct sd_lun *un, 17851 uint8_t *sense_datap, 17852 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17853 { 17854 struct sd_sense_info si; 17855 uint8_t asc = scsi_sense_asc(sense_datap); 17856 uint8_t ascq = scsi_sense_ascq(sense_datap); 17857 17858 ASSERT(un != NULL); 17859 ASSERT(mutex_owned(SD_MUTEX(un))); 17860 ASSERT(bp != NULL); 17861 ASSERT(xp != NULL); 17862 ASSERT(pktp != NULL); 17863 17864 si.ssi_severity = SCSI_ERR_FATAL; 17865 si.ssi_pfa_flag = FALSE; 17866 17867 /* 17868 * Update error stats after first NOT READY error. Disks may have 17869 * been powered down and may need to be restarted. For CDROMs, 17870 * report NOT READY errors only if media is present. 17871 */ 17872 if ((ISCD(un) && (asc == 0x3A)) || 17873 (xp->xb_nr_retry_count > 0)) { 17874 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17875 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17876 } 17877 17878 /* 17879 * Just fail if the "not ready" retry limit has been reached. 17880 */ 17881 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17882 /* Special check for error message printing for removables. */ 17883 if (un->un_f_has_removable_media && (asc == 0x04) && 17884 (ascq >= 0x04)) { 17885 si.ssi_severity = SCSI_ERR_ALL; 17886 } 17887 goto fail_command; 17888 } 17889 17890 /* 17891 * Check the ASC and ASCQ in the sense data as needed, to determine 17892 * what to do. 17893 */ 17894 switch (asc) { 17895 case 0x04: /* LOGICAL UNIT NOT READY */ 17896 /* 17897 * disk drives that don't spin up result in a very long delay 17898 * in format without warning messages. We will log a message 17899 * if the error level is set to verbose. 17900 */ 17901 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17902 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17903 "logical unit not ready, resetting disk\n"); 17904 } 17905 17906 /* 17907 * There are different requirements for CDROMs and disks for 17908 * the number of retries. If a CD-ROM is giving this, it is 17909 * probably reading TOC and is in the process of getting 17910 * ready, so we should keep on trying for a long time to make 17911 * sure that all types of media are taken in account (for 17912 * some media the drive takes a long time to read TOC). For 17913 * disks we do not want to retry this too many times as this 17914 * can cause a long hang in format when the drive refuses to 17915 * spin up (a very common failure). 17916 */ 17917 switch (ascq) { 17918 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17919 /* 17920 * Disk drives frequently refuse to spin up which 17921 * results in a very long hang in format without 17922 * warning messages. 17923 * 17924 * Note: This code preserves the legacy behavior of 17925 * comparing xb_nr_retry_count against zero for fibre 17926 * channel targets instead of comparing against the 17927 * un_reset_retry_count value. The reason for this 17928 * discrepancy has been so utterly lost beneath the 17929 * Sands of Time that even Indiana Jones could not 17930 * find it. 17931 */ 17932 if (un->un_f_is_fibre == TRUE) { 17933 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17934 (xp->xb_nr_retry_count > 0)) && 17935 (un->un_startstop_timeid == NULL)) { 17936 scsi_log(SD_DEVINFO(un), sd_label, 17937 CE_WARN, "logical unit not ready, " 17938 "resetting disk\n"); 17939 sd_reset_target(un, pktp); 17940 } 17941 } else { 17942 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17943 (xp->xb_nr_retry_count > 17944 un->un_reset_retry_count)) && 17945 (un->un_startstop_timeid == NULL)) { 17946 scsi_log(SD_DEVINFO(un), sd_label, 17947 CE_WARN, "logical unit not ready, " 17948 "resetting disk\n"); 17949 sd_reset_target(un, pktp); 17950 } 17951 } 17952 break; 17953 17954 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17955 /* 17956 * If the target is in the process of becoming 17957 * ready, just proceed with the retry. This can 17958 * happen with CD-ROMs that take a long time to 17959 * read TOC after a power cycle or reset. 17960 */ 17961 goto do_retry; 17962 17963 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17964 break; 17965 17966 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17967 /* 17968 * Retries cannot help here so just fail right away. 17969 */ 17970 goto fail_command; 17971 17972 case 0x88: 17973 /* 17974 * Vendor-unique code for T3/T4: it indicates a 17975 * path problem in a mutipathed config, but as far as 17976 * the target driver is concerned it equates to a fatal 17977 * error, so we should just fail the command right away 17978 * (without printing anything to the console). If this 17979 * is not a T3/T4, fall thru to the default recovery 17980 * action. 17981 * T3/T4 is FC only, don't need to check is_fibre 17982 */ 17983 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17984 sd_return_failed_command(un, bp, EIO); 17985 return; 17986 } 17987 /* FALLTHRU */ 17988 17989 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17990 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17991 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17992 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17993 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17994 default: /* Possible future codes in SCSI spec? */ 17995 /* 17996 * For removable-media devices, do not retry if 17997 * ASCQ > 2 as these result mostly from USCSI commands 17998 * on MMC devices issued to check status of an 17999 * operation initiated in immediate mode. Also for 18000 * ASCQ >= 4 do not print console messages as these 18001 * mainly represent a user-initiated operation 18002 * instead of a system failure. 18003 */ 18004 if (un->un_f_has_removable_media) { 18005 si.ssi_severity = SCSI_ERR_ALL; 18006 goto fail_command; 18007 } 18008 break; 18009 } 18010 18011 /* 18012 * As part of our recovery attempt for the NOT READY 18013 * condition, we issue a START STOP UNIT command. However 18014 * we want to wait for a short delay before attempting this 18015 * as there may still be more commands coming back from the 18016 * target with the check condition. To do this we use 18017 * timeout(9F) to call sd_start_stop_unit_callback() after 18018 * the delay interval expires. (sd_start_stop_unit_callback() 18019 * dispatches sd_start_stop_unit_task(), which will issue 18020 * the actual START STOP UNIT command. The delay interval 18021 * is one-half of the delay that we will use to retry the 18022 * command that generated the NOT READY condition. 18023 * 18024 * Note that we could just dispatch sd_start_stop_unit_task() 18025 * from here and allow it to sleep for the delay interval, 18026 * but then we would be tying up the taskq thread 18027 * uncesessarily for the duration of the delay. 18028 * 18029 * Do not issue the START STOP UNIT if the current command 18030 * is already a START STOP UNIT. 18031 */ 18032 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18033 break; 18034 } 18035 18036 /* 18037 * Do not schedule the timeout if one is already pending. 18038 */ 18039 if (un->un_startstop_timeid != NULL) { 18040 SD_INFO(SD_LOG_ERROR, un, 18041 "sd_sense_key_not_ready: restart already issued to" 18042 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18043 ddi_get_instance(SD_DEVINFO(un))); 18044 break; 18045 } 18046 18047 /* 18048 * Schedule the START STOP UNIT command, then queue the command 18049 * for a retry. 18050 * 18051 * Note: A timeout is not scheduled for this retry because we 18052 * want the retry to be serial with the START_STOP_UNIT. The 18053 * retry will be started when the START_STOP_UNIT is completed 18054 * in sd_start_stop_unit_task. 18055 */ 18056 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18057 un, un->un_busy_timeout / 2); 18058 xp->xb_nr_retry_count++; 18059 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18060 return; 18061 18062 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18063 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18064 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18065 "unit does not respond to selection\n"); 18066 } 18067 break; 18068 18069 case 0x3A: /* MEDIUM NOT PRESENT */ 18070 if (sd_error_level >= SCSI_ERR_FATAL) { 18071 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18072 "Caddy not inserted in drive\n"); 18073 } 18074 18075 sr_ejected(un); 18076 un->un_mediastate = DKIO_EJECTED; 18077 /* The state has changed, inform the media watch routines */ 18078 cv_broadcast(&un->un_state_cv); 18079 /* Just fail if no media is present in the drive. */ 18080 goto fail_command; 18081 18082 default: 18083 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18084 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18085 "Unit not Ready. Additional sense code 0x%x\n", 18086 asc); 18087 } 18088 break; 18089 } 18090 18091 do_retry: 18092 18093 /* 18094 * Retry the command, as some targets may report NOT READY for 18095 * several seconds after being reset. 18096 */ 18097 xp->xb_nr_retry_count++; 18098 si.ssi_severity = SCSI_ERR_RETRYABLE; 18099 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18100 &si, EIO, un->un_busy_timeout, NULL); 18101 18102 return; 18103 18104 fail_command: 18105 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18106 sd_return_failed_command(un, bp, EIO); 18107 } 18108 18109 18110 18111 /* 18112 * Function: sd_sense_key_medium_or_hardware_error 18113 * 18114 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18115 * sense key. 18116 * 18117 * Context: May be called from interrupt context 18118 */ 18119 18120 static void 18121 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 18122 uint8_t *sense_datap, 18123 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18124 { 18125 struct sd_sense_info si; 18126 uint8_t sense_key = scsi_sense_key(sense_datap); 18127 uint8_t asc = scsi_sense_asc(sense_datap); 18128 18129 ASSERT(un != NULL); 18130 ASSERT(mutex_owned(SD_MUTEX(un))); 18131 ASSERT(bp != NULL); 18132 ASSERT(xp != NULL); 18133 ASSERT(pktp != NULL); 18134 18135 si.ssi_severity = SCSI_ERR_FATAL; 18136 si.ssi_pfa_flag = FALSE; 18137 18138 if (sense_key == KEY_MEDIUM_ERROR) { 18139 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18140 } 18141 18142 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18143 18144 if ((un->un_reset_retry_count != 0) && 18145 (xp->xb_retry_count == un->un_reset_retry_count)) { 18146 mutex_exit(SD_MUTEX(un)); 18147 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18148 if (un->un_f_allow_bus_device_reset == TRUE) { 18149 18150 boolean_t try_resetting_target = B_TRUE; 18151 18152 /* 18153 * We need to be able to handle specific ASC when we are 18154 * handling a KEY_HARDWARE_ERROR. In particular 18155 * taking the default action of resetting the target may 18156 * not be the appropriate way to attempt recovery. 18157 * Resetting a target because of a single LUN failure 18158 * victimizes all LUNs on that target. 18159 * 18160 * This is true for the LSI arrays, if an LSI 18161 * array controller returns an ASC of 0x84 (LUN Dead) we 18162 * should trust it. 18163 */ 18164 18165 if (sense_key == KEY_HARDWARE_ERROR) { 18166 switch (asc) { 18167 case 0x84: 18168 if (SD_IS_LSI(un)) { 18169 try_resetting_target = B_FALSE; 18170 } 18171 break; 18172 default: 18173 break; 18174 } 18175 } 18176 18177 if (try_resetting_target == B_TRUE) { 18178 int reset_retval = 0; 18179 if (un->un_f_lun_reset_enabled == TRUE) { 18180 SD_TRACE(SD_LOG_IO_CORE, un, 18181 "sd_sense_key_medium_or_hardware_" 18182 "error: issuing RESET_LUN\n"); 18183 reset_retval = 18184 scsi_reset(SD_ADDRESS(un), 18185 RESET_LUN); 18186 } 18187 if (reset_retval == 0) { 18188 SD_TRACE(SD_LOG_IO_CORE, un, 18189 "sd_sense_key_medium_or_hardware_" 18190 "error: issuing RESET_TARGET\n"); 18191 (void) scsi_reset(SD_ADDRESS(un), 18192 RESET_TARGET); 18193 } 18194 } 18195 } 18196 mutex_enter(SD_MUTEX(un)); 18197 } 18198 18199 /* 18200 * This really ought to be a fatal error, but we will retry anyway 18201 * as some drives report this as a spurious error. 18202 */ 18203 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18204 &si, EIO, (clock_t)0, NULL); 18205 } 18206 18207 18208 18209 /* 18210 * Function: sd_sense_key_illegal_request 18211 * 18212 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18213 * 18214 * Context: May be called from interrupt context 18215 */ 18216 18217 static void 18218 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18219 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18220 { 18221 struct sd_sense_info si; 18222 18223 ASSERT(un != NULL); 18224 ASSERT(mutex_owned(SD_MUTEX(un))); 18225 ASSERT(bp != NULL); 18226 ASSERT(xp != NULL); 18227 ASSERT(pktp != NULL); 18228 18229 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18230 18231 si.ssi_severity = SCSI_ERR_INFO; 18232 si.ssi_pfa_flag = FALSE; 18233 18234 /* Pointless to retry if the target thinks it's an illegal request */ 18235 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18236 sd_return_failed_command(un, bp, EIO); 18237 } 18238 18239 18240 18241 18242 /* 18243 * Function: sd_sense_key_unit_attention 18244 * 18245 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18246 * 18247 * Context: May be called from interrupt context 18248 */ 18249 18250 static void 18251 sd_sense_key_unit_attention(struct sd_lun *un, 18252 uint8_t *sense_datap, 18253 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18254 { 18255 /* 18256 * For UNIT ATTENTION we allow retries for one minute. Devices 18257 * like Sonoma can return UNIT ATTENTION close to a minute 18258 * under certain conditions. 18259 */ 18260 int retry_check_flag = SD_RETRIES_UA; 18261 boolean_t kstat_updated = B_FALSE; 18262 struct sd_sense_info si; 18263 uint8_t asc = scsi_sense_asc(sense_datap); 18264 uint8_t ascq = scsi_sense_ascq(sense_datap); 18265 18266 ASSERT(un != NULL); 18267 ASSERT(mutex_owned(SD_MUTEX(un))); 18268 ASSERT(bp != NULL); 18269 ASSERT(xp != NULL); 18270 ASSERT(pktp != NULL); 18271 18272 si.ssi_severity = SCSI_ERR_INFO; 18273 si.ssi_pfa_flag = FALSE; 18274 18275 18276 switch (asc) { 18277 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18278 if (sd_report_pfa != 0) { 18279 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18280 si.ssi_pfa_flag = TRUE; 18281 retry_check_flag = SD_RETRIES_STANDARD; 18282 goto do_retry; 18283 } 18284 18285 break; 18286 18287 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18288 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18289 un->un_resvd_status |= 18290 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18291 } 18292 #ifdef _LP64 18293 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18294 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18295 un, KM_NOSLEEP) == 0) { 18296 /* 18297 * If we can't dispatch the task we'll just 18298 * live without descriptor sense. We can 18299 * try again on the next "unit attention" 18300 */ 18301 SD_ERROR(SD_LOG_ERROR, un, 18302 "sd_sense_key_unit_attention: " 18303 "Could not dispatch " 18304 "sd_reenable_dsense_task\n"); 18305 } 18306 } 18307 #endif /* _LP64 */ 18308 /* FALLTHRU */ 18309 18310 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18311 if (!un->un_f_has_removable_media) { 18312 break; 18313 } 18314 18315 /* 18316 * When we get a unit attention from a removable-media device, 18317 * it may be in a state that will take a long time to recover 18318 * (e.g., from a reset). Since we are executing in interrupt 18319 * context here, we cannot wait around for the device to come 18320 * back. So hand this command off to sd_media_change_task() 18321 * for deferred processing under taskq thread context. (Note 18322 * that the command still may be failed if a problem is 18323 * encountered at a later time.) 18324 */ 18325 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18326 KM_NOSLEEP) == 0) { 18327 /* 18328 * Cannot dispatch the request so fail the command. 18329 */ 18330 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18331 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18332 si.ssi_severity = SCSI_ERR_FATAL; 18333 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18334 sd_return_failed_command(un, bp, EIO); 18335 } 18336 18337 /* 18338 * If failed to dispatch sd_media_change_task(), we already 18339 * updated kstat. If succeed to dispatch sd_media_change_task(), 18340 * we should update kstat later if it encounters an error. So, 18341 * we update kstat_updated flag here. 18342 */ 18343 kstat_updated = B_TRUE; 18344 18345 /* 18346 * Either the command has been successfully dispatched to a 18347 * task Q for retrying, or the dispatch failed. In either case 18348 * do NOT retry again by calling sd_retry_command. This sets up 18349 * two retries of the same command and when one completes and 18350 * frees the resources the other will access freed memory, 18351 * a bad thing. 18352 */ 18353 return; 18354 18355 default: 18356 break; 18357 } 18358 18359 /* 18360 * ASC ASCQ 18361 * 2A 09 Capacity data has changed 18362 * 2A 01 Mode parameters changed 18363 * 3F 0E Reported luns data has changed 18364 * Arrays that support logical unit expansion should report 18365 * capacity changes(2Ah/09). Mode parameters changed and 18366 * reported luns data has changed are the approximation. 18367 */ 18368 if (((asc == 0x2a) && (ascq == 0x09)) || 18369 ((asc == 0x2a) && (ascq == 0x01)) || 18370 ((asc == 0x3f) && (ascq == 0x0e))) { 18371 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18372 KM_NOSLEEP) == 0) { 18373 SD_ERROR(SD_LOG_ERROR, un, 18374 "sd_sense_key_unit_attention: " 18375 "Could not dispatch sd_target_change_task\n"); 18376 } 18377 } 18378 18379 /* 18380 * Update kstat if we haven't done that. 18381 */ 18382 if (!kstat_updated) { 18383 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18384 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18385 } 18386 18387 do_retry: 18388 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18389 EIO, SD_UA_RETRY_DELAY, NULL); 18390 } 18391 18392 18393 18394 /* 18395 * Function: sd_sense_key_fail_command 18396 * 18397 * Description: Use to fail a command when we don't like the sense key that 18398 * was returned. 18399 * 18400 * Context: May be called from interrupt context 18401 */ 18402 18403 static void 18404 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18405 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18406 { 18407 struct sd_sense_info si; 18408 18409 ASSERT(un != NULL); 18410 ASSERT(mutex_owned(SD_MUTEX(un))); 18411 ASSERT(bp != NULL); 18412 ASSERT(xp != NULL); 18413 ASSERT(pktp != NULL); 18414 18415 si.ssi_severity = SCSI_ERR_FATAL; 18416 si.ssi_pfa_flag = FALSE; 18417 18418 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18419 sd_return_failed_command(un, bp, EIO); 18420 } 18421 18422 18423 18424 /* 18425 * Function: sd_sense_key_blank_check 18426 * 18427 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18428 * Has no monetary connotation. 18429 * 18430 * Context: May be called from interrupt context 18431 */ 18432 18433 static void 18434 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18435 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18436 { 18437 struct sd_sense_info si; 18438 18439 ASSERT(un != NULL); 18440 ASSERT(mutex_owned(SD_MUTEX(un))); 18441 ASSERT(bp != NULL); 18442 ASSERT(xp != NULL); 18443 ASSERT(pktp != NULL); 18444 18445 /* 18446 * Blank check is not fatal for removable devices, therefore 18447 * it does not require a console message. 18448 */ 18449 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18450 SCSI_ERR_FATAL; 18451 si.ssi_pfa_flag = FALSE; 18452 18453 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18454 sd_return_failed_command(un, bp, EIO); 18455 } 18456 18457 18458 18459 18460 /* 18461 * Function: sd_sense_key_aborted_command 18462 * 18463 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18464 * 18465 * Context: May be called from interrupt context 18466 */ 18467 18468 static void 18469 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18470 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18471 { 18472 struct sd_sense_info si; 18473 18474 ASSERT(un != NULL); 18475 ASSERT(mutex_owned(SD_MUTEX(un))); 18476 ASSERT(bp != NULL); 18477 ASSERT(xp != NULL); 18478 ASSERT(pktp != NULL); 18479 18480 si.ssi_severity = SCSI_ERR_FATAL; 18481 si.ssi_pfa_flag = FALSE; 18482 18483 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18484 18485 /* 18486 * This really ought to be a fatal error, but we will retry anyway 18487 * as some drives report this as a spurious error. 18488 */ 18489 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18490 &si, EIO, drv_usectohz(100000), NULL); 18491 } 18492 18493 18494 18495 /* 18496 * Function: sd_sense_key_default 18497 * 18498 * Description: Default recovery action for several SCSI sense keys (basically 18499 * attempts a retry). 18500 * 18501 * Context: May be called from interrupt context 18502 */ 18503 18504 static void 18505 sd_sense_key_default(struct sd_lun *un, 18506 uint8_t *sense_datap, 18507 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18508 { 18509 struct sd_sense_info si; 18510 uint8_t sense_key = scsi_sense_key(sense_datap); 18511 18512 ASSERT(un != NULL); 18513 ASSERT(mutex_owned(SD_MUTEX(un))); 18514 ASSERT(bp != NULL); 18515 ASSERT(xp != NULL); 18516 ASSERT(pktp != NULL); 18517 18518 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18519 18520 /* 18521 * Undecoded sense key. Attempt retries and hope that will fix 18522 * the problem. Otherwise, we're dead. 18523 */ 18524 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18525 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18526 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18527 } 18528 18529 si.ssi_severity = SCSI_ERR_FATAL; 18530 si.ssi_pfa_flag = FALSE; 18531 18532 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18533 &si, EIO, (clock_t)0, NULL); 18534 } 18535 18536 18537 18538 /* 18539 * Function: sd_print_retry_msg 18540 * 18541 * Description: Print a message indicating the retry action being taken. 18542 * 18543 * Arguments: un - ptr to associated softstate 18544 * bp - ptr to buf(9S) for the command 18545 * arg - not used. 18546 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18547 * or SD_NO_RETRY_ISSUED 18548 * 18549 * Context: May be called from interrupt context 18550 */ 18551 /* ARGSUSED */ 18552 static void 18553 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18554 { 18555 struct sd_xbuf *xp; 18556 struct scsi_pkt *pktp; 18557 char *reasonp; 18558 char *msgp; 18559 18560 ASSERT(un != NULL); 18561 ASSERT(mutex_owned(SD_MUTEX(un))); 18562 ASSERT(bp != NULL); 18563 pktp = SD_GET_PKTP(bp); 18564 ASSERT(pktp != NULL); 18565 xp = SD_GET_XBUF(bp); 18566 ASSERT(xp != NULL); 18567 18568 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18569 mutex_enter(&un->un_pm_mutex); 18570 if ((un->un_state == SD_STATE_SUSPENDED) || 18571 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18572 (pktp->pkt_flags & FLAG_SILENT)) { 18573 mutex_exit(&un->un_pm_mutex); 18574 goto update_pkt_reason; 18575 } 18576 mutex_exit(&un->un_pm_mutex); 18577 18578 /* 18579 * Suppress messages if they are all the same pkt_reason; with 18580 * TQ, many (up to 256) are returned with the same pkt_reason. 18581 * If we are in panic, then suppress the retry messages. 18582 */ 18583 switch (flag) { 18584 case SD_NO_RETRY_ISSUED: 18585 msgp = "giving up"; 18586 break; 18587 case SD_IMMEDIATE_RETRY_ISSUED: 18588 case SD_DELAYED_RETRY_ISSUED: 18589 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18590 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18591 (sd_error_level != SCSI_ERR_ALL))) { 18592 return; 18593 } 18594 msgp = "retrying command"; 18595 break; 18596 default: 18597 goto update_pkt_reason; 18598 } 18599 18600 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18601 scsi_rname(pktp->pkt_reason)); 18602 18603 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18604 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18605 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18606 } 18607 18608 update_pkt_reason: 18609 /* 18610 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18611 * This is to prevent multiple console messages for the same failure 18612 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18613 * when the command is retried successfully because there still may be 18614 * more commands coming back with the same value of pktp->pkt_reason. 18615 */ 18616 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18617 un->un_last_pkt_reason = pktp->pkt_reason; 18618 } 18619 } 18620 18621 18622 /* 18623 * Function: sd_print_cmd_incomplete_msg 18624 * 18625 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18626 * 18627 * Arguments: un - ptr to associated softstate 18628 * bp - ptr to buf(9S) for the command 18629 * arg - passed to sd_print_retry_msg() 18630 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18631 * or SD_NO_RETRY_ISSUED 18632 * 18633 * Context: May be called from interrupt context 18634 */ 18635 18636 static void 18637 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18638 int code) 18639 { 18640 dev_info_t *dip; 18641 18642 ASSERT(un != NULL); 18643 ASSERT(mutex_owned(SD_MUTEX(un))); 18644 ASSERT(bp != NULL); 18645 18646 switch (code) { 18647 case SD_NO_RETRY_ISSUED: 18648 /* Command was failed. Someone turned off this target? */ 18649 if (un->un_state != SD_STATE_OFFLINE) { 18650 /* 18651 * Suppress message if we are detaching and 18652 * device has been disconnected 18653 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18654 * private interface and not part of the DDI 18655 */ 18656 dip = un->un_sd->sd_dev; 18657 if (!(DEVI_IS_DETACHING(dip) && 18658 DEVI_IS_DEVICE_REMOVED(dip))) { 18659 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18660 "disk not responding to selection\n"); 18661 } 18662 New_state(un, SD_STATE_OFFLINE); 18663 } 18664 break; 18665 18666 case SD_DELAYED_RETRY_ISSUED: 18667 case SD_IMMEDIATE_RETRY_ISSUED: 18668 default: 18669 /* Command was successfully queued for retry */ 18670 sd_print_retry_msg(un, bp, arg, code); 18671 break; 18672 } 18673 } 18674 18675 18676 /* 18677 * Function: sd_pkt_reason_cmd_incomplete 18678 * 18679 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18680 * 18681 * Context: May be called from interrupt context 18682 */ 18683 18684 static void 18685 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18686 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18687 { 18688 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18689 18690 ASSERT(un != NULL); 18691 ASSERT(mutex_owned(SD_MUTEX(un))); 18692 ASSERT(bp != NULL); 18693 ASSERT(xp != NULL); 18694 ASSERT(pktp != NULL); 18695 18696 /* Do not do a reset if selection did not complete */ 18697 /* Note: Should this not just check the bit? */ 18698 if (pktp->pkt_state != STATE_GOT_BUS) { 18699 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18700 sd_reset_target(un, pktp); 18701 } 18702 18703 /* 18704 * If the target was not successfully selected, then set 18705 * SD_RETRIES_FAILFAST to indicate that we lost communication 18706 * with the target, and further retries and/or commands are 18707 * likely to take a long time. 18708 */ 18709 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18710 flag |= SD_RETRIES_FAILFAST; 18711 } 18712 18713 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18714 18715 sd_retry_command(un, bp, flag, 18716 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18717 } 18718 18719 18720 18721 /* 18722 * Function: sd_pkt_reason_cmd_tran_err 18723 * 18724 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18725 * 18726 * Context: May be called from interrupt context 18727 */ 18728 18729 static void 18730 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18731 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18732 { 18733 ASSERT(un != NULL); 18734 ASSERT(mutex_owned(SD_MUTEX(un))); 18735 ASSERT(bp != NULL); 18736 ASSERT(xp != NULL); 18737 ASSERT(pktp != NULL); 18738 18739 /* 18740 * Do not reset if we got a parity error, or if 18741 * selection did not complete. 18742 */ 18743 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18744 /* Note: Should this not just check the bit for pkt_state? */ 18745 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18746 (pktp->pkt_state != STATE_GOT_BUS)) { 18747 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18748 sd_reset_target(un, pktp); 18749 } 18750 18751 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18752 18753 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18754 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18755 } 18756 18757 18758 18759 /* 18760 * Function: sd_pkt_reason_cmd_reset 18761 * 18762 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18763 * 18764 * Context: May be called from interrupt context 18765 */ 18766 18767 static void 18768 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18769 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18770 { 18771 ASSERT(un != NULL); 18772 ASSERT(mutex_owned(SD_MUTEX(un))); 18773 ASSERT(bp != NULL); 18774 ASSERT(xp != NULL); 18775 ASSERT(pktp != NULL); 18776 18777 /* The target may still be running the command, so try to reset. */ 18778 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18779 sd_reset_target(un, pktp); 18780 18781 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18782 18783 /* 18784 * If pkt_reason is CMD_RESET chances are that this pkt got 18785 * reset because another target on this bus caused it. The target 18786 * that caused it should get CMD_TIMEOUT with pkt_statistics 18787 * of STAT_TIMEOUT/STAT_DEV_RESET. 18788 */ 18789 18790 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18791 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18792 } 18793 18794 18795 18796 18797 /* 18798 * Function: sd_pkt_reason_cmd_aborted 18799 * 18800 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18801 * 18802 * Context: May be called from interrupt context 18803 */ 18804 18805 static void 18806 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18807 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18808 { 18809 ASSERT(un != NULL); 18810 ASSERT(mutex_owned(SD_MUTEX(un))); 18811 ASSERT(bp != NULL); 18812 ASSERT(xp != NULL); 18813 ASSERT(pktp != NULL); 18814 18815 /* The target may still be running the command, so try to reset. */ 18816 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18817 sd_reset_target(un, pktp); 18818 18819 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18820 18821 /* 18822 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18823 * aborted because another target on this bus caused it. The target 18824 * that caused it should get CMD_TIMEOUT with pkt_statistics 18825 * of STAT_TIMEOUT/STAT_DEV_RESET. 18826 */ 18827 18828 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18829 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18830 } 18831 18832 18833 18834 /* 18835 * Function: sd_pkt_reason_cmd_timeout 18836 * 18837 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18838 * 18839 * Context: May be called from interrupt context 18840 */ 18841 18842 static void 18843 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18844 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18845 { 18846 ASSERT(un != NULL); 18847 ASSERT(mutex_owned(SD_MUTEX(un))); 18848 ASSERT(bp != NULL); 18849 ASSERT(xp != NULL); 18850 ASSERT(pktp != NULL); 18851 18852 18853 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18854 sd_reset_target(un, pktp); 18855 18856 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18857 18858 /* 18859 * A command timeout indicates that we could not establish 18860 * communication with the target, so set SD_RETRIES_FAILFAST 18861 * as further retries/commands are likely to take a long time. 18862 */ 18863 sd_retry_command(un, bp, 18864 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18865 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18866 } 18867 18868 18869 18870 /* 18871 * Function: sd_pkt_reason_cmd_unx_bus_free 18872 * 18873 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18874 * 18875 * Context: May be called from interrupt context 18876 */ 18877 18878 static void 18879 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18880 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18881 { 18882 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18883 18884 ASSERT(un != NULL); 18885 ASSERT(mutex_owned(SD_MUTEX(un))); 18886 ASSERT(bp != NULL); 18887 ASSERT(xp != NULL); 18888 ASSERT(pktp != NULL); 18889 18890 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18891 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18892 18893 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18894 sd_print_retry_msg : NULL; 18895 18896 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18897 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18898 } 18899 18900 18901 /* 18902 * Function: sd_pkt_reason_cmd_tag_reject 18903 * 18904 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18905 * 18906 * Context: May be called from interrupt context 18907 */ 18908 18909 static void 18910 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18911 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18912 { 18913 ASSERT(un != NULL); 18914 ASSERT(mutex_owned(SD_MUTEX(un))); 18915 ASSERT(bp != NULL); 18916 ASSERT(xp != NULL); 18917 ASSERT(pktp != NULL); 18918 18919 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18920 pktp->pkt_flags = 0; 18921 un->un_tagflags = 0; 18922 if (un->un_f_opt_queueing == TRUE) { 18923 un->un_throttle = min(un->un_throttle, 3); 18924 } else { 18925 un->un_throttle = 1; 18926 } 18927 mutex_exit(SD_MUTEX(un)); 18928 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18929 mutex_enter(SD_MUTEX(un)); 18930 18931 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18932 18933 /* Legacy behavior not to check retry counts here. */ 18934 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18935 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18936 } 18937 18938 18939 /* 18940 * Function: sd_pkt_reason_default 18941 * 18942 * Description: Default recovery actions for SCSA pkt_reason values that 18943 * do not have more explicit recovery actions. 18944 * 18945 * Context: May be called from interrupt context 18946 */ 18947 18948 static void 18949 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18950 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18951 { 18952 ASSERT(un != NULL); 18953 ASSERT(mutex_owned(SD_MUTEX(un))); 18954 ASSERT(bp != NULL); 18955 ASSERT(xp != NULL); 18956 ASSERT(pktp != NULL); 18957 18958 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18959 sd_reset_target(un, pktp); 18960 18961 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18962 18963 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18964 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18965 } 18966 18967 18968 18969 /* 18970 * Function: sd_pkt_status_check_condition 18971 * 18972 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18973 * 18974 * Context: May be called from interrupt context 18975 */ 18976 18977 static void 18978 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18979 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18980 { 18981 ASSERT(un != NULL); 18982 ASSERT(mutex_owned(SD_MUTEX(un))); 18983 ASSERT(bp != NULL); 18984 ASSERT(xp != NULL); 18985 ASSERT(pktp != NULL); 18986 18987 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18988 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18989 18990 /* 18991 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18992 * command will be retried after the request sense). Otherwise, retry 18993 * the command. Note: we are issuing the request sense even though the 18994 * retry limit may have been reached for the failed command. 18995 */ 18996 if (un->un_f_arq_enabled == FALSE) { 18997 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18998 "no ARQ, sending request sense command\n"); 18999 sd_send_request_sense_command(un, bp, pktp); 19000 } else { 19001 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19002 "ARQ,retrying request sense command\n"); 19003 #if defined(__i386) || defined(__amd64) 19004 /* 19005 * The SD_RETRY_DELAY value need to be adjusted here 19006 * when SD_RETRY_DELAY change in sddef.h 19007 */ 19008 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19009 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19010 NULL); 19011 #else 19012 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19013 EIO, SD_RETRY_DELAY, NULL); 19014 #endif 19015 } 19016 19017 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19018 } 19019 19020 19021 /* 19022 * Function: sd_pkt_status_busy 19023 * 19024 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19025 * 19026 * Context: May be called from interrupt context 19027 */ 19028 19029 static void 19030 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19031 struct scsi_pkt *pktp) 19032 { 19033 ASSERT(un != NULL); 19034 ASSERT(mutex_owned(SD_MUTEX(un))); 19035 ASSERT(bp != NULL); 19036 ASSERT(xp != NULL); 19037 ASSERT(pktp != NULL); 19038 19039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19040 "sd_pkt_status_busy: entry\n"); 19041 19042 /* If retries are exhausted, just fail the command. */ 19043 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19044 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19045 "device busy too long\n"); 19046 sd_return_failed_command(un, bp, EIO); 19047 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19048 "sd_pkt_status_busy: exit\n"); 19049 return; 19050 } 19051 xp->xb_retry_count++; 19052 19053 /* 19054 * Try to reset the target. However, we do not want to perform 19055 * more than one reset if the device continues to fail. The reset 19056 * will be performed when the retry count reaches the reset 19057 * threshold. This threshold should be set such that at least 19058 * one retry is issued before the reset is performed. 19059 */ 19060 if (xp->xb_retry_count == 19061 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19062 int rval = 0; 19063 mutex_exit(SD_MUTEX(un)); 19064 if (un->un_f_allow_bus_device_reset == TRUE) { 19065 /* 19066 * First try to reset the LUN; if we cannot then 19067 * try to reset the target. 19068 */ 19069 if (un->un_f_lun_reset_enabled == TRUE) { 19070 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19071 "sd_pkt_status_busy: RESET_LUN\n"); 19072 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19073 } 19074 if (rval == 0) { 19075 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19076 "sd_pkt_status_busy: RESET_TARGET\n"); 19077 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19078 } 19079 } 19080 if (rval == 0) { 19081 /* 19082 * If the RESET_LUN and/or RESET_TARGET failed, 19083 * try RESET_ALL 19084 */ 19085 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19086 "sd_pkt_status_busy: RESET_ALL\n"); 19087 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19088 } 19089 mutex_enter(SD_MUTEX(un)); 19090 if (rval == 0) { 19091 /* 19092 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19093 * At this point we give up & fail the command. 19094 */ 19095 sd_return_failed_command(un, bp, EIO); 19096 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19097 "sd_pkt_status_busy: exit (failed cmd)\n"); 19098 return; 19099 } 19100 } 19101 19102 /* 19103 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19104 * we have already checked the retry counts above. 19105 */ 19106 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19107 EIO, un->un_busy_timeout, NULL); 19108 19109 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19110 "sd_pkt_status_busy: exit\n"); 19111 } 19112 19113 19114 /* 19115 * Function: sd_pkt_status_reservation_conflict 19116 * 19117 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19118 * command status. 19119 * 19120 * Context: May be called from interrupt context 19121 */ 19122 19123 static void 19124 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19125 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19126 { 19127 ASSERT(un != NULL); 19128 ASSERT(mutex_owned(SD_MUTEX(un))); 19129 ASSERT(bp != NULL); 19130 ASSERT(xp != NULL); 19131 ASSERT(pktp != NULL); 19132 19133 /* 19134 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19135 * conflict could be due to various reasons like incorrect keys, not 19136 * registered or not reserved etc. So, we return EACCES to the caller. 19137 */ 19138 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19139 int cmd = SD_GET_PKT_OPCODE(pktp); 19140 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19141 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19142 sd_return_failed_command(un, bp, EACCES); 19143 return; 19144 } 19145 } 19146 19147 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19148 19149 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19150 if (sd_failfast_enable != 0) { 19151 /* By definition, we must panic here.... */ 19152 sd_panic_for_res_conflict(un); 19153 /*NOTREACHED*/ 19154 } 19155 SD_ERROR(SD_LOG_IO, un, 19156 "sd_handle_resv_conflict: Disk Reserved\n"); 19157 sd_return_failed_command(un, bp, EACCES); 19158 return; 19159 } 19160 19161 /* 19162 * 1147670: retry only if sd_retry_on_reservation_conflict 19163 * property is set (default is 1). Retries will not succeed 19164 * on a disk reserved by another initiator. HA systems 19165 * may reset this via sd.conf to avoid these retries. 19166 * 19167 * Note: The legacy return code for this failure is EIO, however EACCES 19168 * seems more appropriate for a reservation conflict. 19169 */ 19170 if (sd_retry_on_reservation_conflict == 0) { 19171 SD_ERROR(SD_LOG_IO, un, 19172 "sd_handle_resv_conflict: Device Reserved\n"); 19173 sd_return_failed_command(un, bp, EIO); 19174 return; 19175 } 19176 19177 /* 19178 * Retry the command if we can. 19179 * 19180 * Note: The legacy return code for this failure is EIO, however EACCES 19181 * seems more appropriate for a reservation conflict. 19182 */ 19183 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19184 (clock_t)2, NULL); 19185 } 19186 19187 19188 19189 /* 19190 * Function: sd_pkt_status_qfull 19191 * 19192 * Description: Handle a QUEUE FULL condition from the target. This can 19193 * occur if the HBA does not handle the queue full condition. 19194 * (Basically this means third-party HBAs as Sun HBAs will 19195 * handle the queue full condition.) Note that if there are 19196 * some commands already in the transport, then the queue full 19197 * has occurred because the queue for this nexus is actually 19198 * full. If there are no commands in the transport, then the 19199 * queue full is resulting from some other initiator or lun 19200 * consuming all the resources at the target. 19201 * 19202 * Context: May be called from interrupt context 19203 */ 19204 19205 static void 19206 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 19207 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19208 { 19209 ASSERT(un != NULL); 19210 ASSERT(mutex_owned(SD_MUTEX(un))); 19211 ASSERT(bp != NULL); 19212 ASSERT(xp != NULL); 19213 ASSERT(pktp != NULL); 19214 19215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19216 "sd_pkt_status_qfull: entry\n"); 19217 19218 /* 19219 * Just lower the QFULL throttle and retry the command. Note that 19220 * we do not limit the number of retries here. 19221 */ 19222 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19223 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19224 SD_RESTART_TIMEOUT, NULL); 19225 19226 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19227 "sd_pkt_status_qfull: exit\n"); 19228 } 19229 19230 19231 /* 19232 * Function: sd_reset_target 19233 * 19234 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19235 * RESET_TARGET, or RESET_ALL. 19236 * 19237 * Context: May be called under interrupt context. 19238 */ 19239 19240 static void 19241 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19242 { 19243 int rval = 0; 19244 19245 ASSERT(un != NULL); 19246 ASSERT(mutex_owned(SD_MUTEX(un))); 19247 ASSERT(pktp != NULL); 19248 19249 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19250 19251 /* 19252 * No need to reset if the transport layer has already done so. 19253 */ 19254 if ((pktp->pkt_statistics & 19255 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19256 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19257 "sd_reset_target: no reset\n"); 19258 return; 19259 } 19260 19261 mutex_exit(SD_MUTEX(un)); 19262 19263 if (un->un_f_allow_bus_device_reset == TRUE) { 19264 if (un->un_f_lun_reset_enabled == TRUE) { 19265 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19266 "sd_reset_target: RESET_LUN\n"); 19267 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19268 } 19269 if (rval == 0) { 19270 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19271 "sd_reset_target: RESET_TARGET\n"); 19272 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19273 } 19274 } 19275 19276 if (rval == 0) { 19277 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19278 "sd_reset_target: RESET_ALL\n"); 19279 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19280 } 19281 19282 mutex_enter(SD_MUTEX(un)); 19283 19284 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19285 } 19286 19287 /* 19288 * Function: sd_target_change_task 19289 * 19290 * Description: Handle dynamic target change 19291 * 19292 * Context: Executes in a taskq() thread context 19293 */ 19294 static void 19295 sd_target_change_task(void *arg) 19296 { 19297 struct sd_lun *un = arg; 19298 uint64_t capacity; 19299 diskaddr_t label_cap; 19300 uint_t lbasize; 19301 sd_ssc_t *ssc; 19302 19303 ASSERT(un != NULL); 19304 ASSERT(!mutex_owned(SD_MUTEX(un))); 19305 19306 if ((un->un_f_blockcount_is_valid == FALSE) || 19307 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19308 return; 19309 } 19310 19311 ssc = sd_ssc_init(un); 19312 19313 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19314 &lbasize, SD_PATH_DIRECT) != 0) { 19315 SD_ERROR(SD_LOG_ERROR, un, 19316 "sd_target_change_task: fail to read capacity\n"); 19317 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19318 goto task_exit; 19319 } 19320 19321 mutex_enter(SD_MUTEX(un)); 19322 if (capacity <= un->un_blockcount) { 19323 mutex_exit(SD_MUTEX(un)); 19324 goto task_exit; 19325 } 19326 19327 sd_update_block_info(un, lbasize, capacity); 19328 mutex_exit(SD_MUTEX(un)); 19329 19330 /* 19331 * If lun is EFI labeled and lun capacity is greater than the 19332 * capacity contained in the label, log a sys event. 19333 */ 19334 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19335 (void*)SD_PATH_DIRECT) == 0) { 19336 mutex_enter(SD_MUTEX(un)); 19337 if (un->un_f_blockcount_is_valid && 19338 un->un_blockcount > label_cap) { 19339 mutex_exit(SD_MUTEX(un)); 19340 sd_log_lun_expansion_event(un, KM_SLEEP); 19341 } else { 19342 mutex_exit(SD_MUTEX(un)); 19343 } 19344 } 19345 19346 task_exit: 19347 sd_ssc_fini(ssc); 19348 } 19349 19350 /* 19351 * Function: sd_log_lun_expansion_event 19352 * 19353 * Description: Log lun expansion sys event 19354 * 19355 * Context: Never called from interrupt context 19356 */ 19357 static void 19358 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19359 { 19360 int err; 19361 char *path; 19362 nvlist_t *dle_attr_list; 19363 19364 /* Allocate and build sysevent attribute list */ 19365 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19366 if (err != 0) { 19367 SD_ERROR(SD_LOG_ERROR, un, 19368 "sd_log_lun_expansion_event: fail to allocate space\n"); 19369 return; 19370 } 19371 19372 path = kmem_alloc(MAXPATHLEN, km_flag); 19373 if (path == NULL) { 19374 nvlist_free(dle_attr_list); 19375 SD_ERROR(SD_LOG_ERROR, un, 19376 "sd_log_lun_expansion_event: fail to allocate space\n"); 19377 return; 19378 } 19379 /* 19380 * Add path attribute to identify the lun. 19381 * We are using minor node 'a' as the sysevent attribute. 19382 */ 19383 (void) snprintf(path, MAXPATHLEN, "/devices"); 19384 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19385 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19386 ":a"); 19387 19388 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 19389 if (err != 0) { 19390 nvlist_free(dle_attr_list); 19391 kmem_free(path, MAXPATHLEN); 19392 SD_ERROR(SD_LOG_ERROR, un, 19393 "sd_log_lun_expansion_event: fail to add attribute\n"); 19394 return; 19395 } 19396 19397 /* Log dynamic lun expansion sysevent */ 19398 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19399 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 19400 if (err != DDI_SUCCESS) { 19401 SD_ERROR(SD_LOG_ERROR, un, 19402 "sd_log_lun_expansion_event: fail to log sysevent\n"); 19403 } 19404 19405 nvlist_free(dle_attr_list); 19406 kmem_free(path, MAXPATHLEN); 19407 } 19408 19409 /* 19410 * Function: sd_media_change_task 19411 * 19412 * Description: Recovery action for CDROM to become available. 19413 * 19414 * Context: Executes in a taskq() thread context 19415 */ 19416 19417 static void 19418 sd_media_change_task(void *arg) 19419 { 19420 struct scsi_pkt *pktp = arg; 19421 struct sd_lun *un; 19422 struct buf *bp; 19423 struct sd_xbuf *xp; 19424 int err = 0; 19425 int retry_count = 0; 19426 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19427 struct sd_sense_info si; 19428 19429 ASSERT(pktp != NULL); 19430 bp = (struct buf *)pktp->pkt_private; 19431 ASSERT(bp != NULL); 19432 xp = SD_GET_XBUF(bp); 19433 ASSERT(xp != NULL); 19434 un = SD_GET_UN(bp); 19435 ASSERT(un != NULL); 19436 ASSERT(!mutex_owned(SD_MUTEX(un))); 19437 ASSERT(un->un_f_monitor_media_state); 19438 19439 si.ssi_severity = SCSI_ERR_INFO; 19440 si.ssi_pfa_flag = FALSE; 19441 19442 /* 19443 * When a reset is issued on a CDROM, it takes a long time to 19444 * recover. First few attempts to read capacity and other things 19445 * related to handling unit attention fail (with a ASC 0x4 and 19446 * ASCQ 0x1). In that case we want to do enough retries and we want 19447 * to limit the retries in other cases of genuine failures like 19448 * no media in drive. 19449 */ 19450 while (retry_count++ < retry_limit) { 19451 if ((err = sd_handle_mchange(un)) == 0) { 19452 break; 19453 } 19454 if (err == EAGAIN) { 19455 retry_limit = SD_UNIT_ATTENTION_RETRY; 19456 } 19457 /* Sleep for 0.5 sec. & try again */ 19458 delay(drv_usectohz(500000)); 19459 } 19460 19461 /* 19462 * Dispatch (retry or fail) the original command here, 19463 * along with appropriate console messages.... 19464 * 19465 * Must grab the mutex before calling sd_retry_command, 19466 * sd_print_sense_msg and sd_return_failed_command. 19467 */ 19468 mutex_enter(SD_MUTEX(un)); 19469 if (err != SD_CMD_SUCCESS) { 19470 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19471 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19472 si.ssi_severity = SCSI_ERR_FATAL; 19473 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19474 sd_return_failed_command(un, bp, EIO); 19475 } else { 19476 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 19477 &si, EIO, (clock_t)0, NULL); 19478 } 19479 mutex_exit(SD_MUTEX(un)); 19480 } 19481 19482 19483 19484 /* 19485 * Function: sd_handle_mchange 19486 * 19487 * Description: Perform geometry validation & other recovery when CDROM 19488 * has been removed from drive. 19489 * 19490 * Return Code: 0 for success 19491 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19492 * sd_send_scsi_READ_CAPACITY() 19493 * 19494 * Context: Executes in a taskq() thread context 19495 */ 19496 19497 static int 19498 sd_handle_mchange(struct sd_lun *un) 19499 { 19500 uint64_t capacity; 19501 uint32_t lbasize; 19502 int rval; 19503 sd_ssc_t *ssc; 19504 19505 ASSERT(!mutex_owned(SD_MUTEX(un))); 19506 ASSERT(un->un_f_monitor_media_state); 19507 19508 ssc = sd_ssc_init(un); 19509 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19510 SD_PATH_DIRECT_PRIORITY); 19511 19512 if (rval != 0) 19513 goto failed; 19514 19515 mutex_enter(SD_MUTEX(un)); 19516 sd_update_block_info(un, lbasize, capacity); 19517 19518 if (un->un_errstats != NULL) { 19519 struct sd_errstats *stp = 19520 (struct sd_errstats *)un->un_errstats->ks_data; 19521 stp->sd_capacity.value.ui64 = (uint64_t) 19522 ((uint64_t)un->un_blockcount * 19523 (uint64_t)un->un_tgt_blocksize); 19524 } 19525 19526 /* 19527 * Check if the media in the device is writable or not 19528 */ 19529 if (ISCD(un)) { 19530 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19531 } 19532 19533 /* 19534 * Note: Maybe let the strategy/partitioning chain worry about getting 19535 * valid geometry. 19536 */ 19537 mutex_exit(SD_MUTEX(un)); 19538 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19539 19540 19541 if (cmlb_validate(un->un_cmlbhandle, 0, 19542 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19543 sd_ssc_fini(ssc); 19544 return (EIO); 19545 } else { 19546 if (un->un_f_pkstats_enabled) { 19547 sd_set_pstats(un); 19548 SD_TRACE(SD_LOG_IO_PARTITION, un, 19549 "sd_handle_mchange: un:0x%p pstats created and " 19550 "set\n", un); 19551 } 19552 } 19553 19554 /* 19555 * Try to lock the door 19556 */ 19557 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19558 SD_PATH_DIRECT_PRIORITY); 19559 failed: 19560 if (rval != 0) 19561 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19562 sd_ssc_fini(ssc); 19563 return (rval); 19564 } 19565 19566 19567 /* 19568 * Function: sd_send_scsi_DOORLOCK 19569 * 19570 * Description: Issue the scsi DOOR LOCK command 19571 * 19572 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19573 * structure for this target. 19574 * flag - SD_REMOVAL_ALLOW 19575 * SD_REMOVAL_PREVENT 19576 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19577 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19578 * to use the USCSI "direct" chain and bypass the normal 19579 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19580 * command is issued as part of an error recovery action. 19581 * 19582 * Return Code: 0 - Success 19583 * errno return code from sd_ssc_send() 19584 * 19585 * Context: Can sleep. 19586 */ 19587 19588 static int 19589 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19590 { 19591 struct scsi_extended_sense sense_buf; 19592 union scsi_cdb cdb; 19593 struct uscsi_cmd ucmd_buf; 19594 int status; 19595 struct sd_lun *un; 19596 19597 ASSERT(ssc != NULL); 19598 un = ssc->ssc_un; 19599 ASSERT(un != NULL); 19600 ASSERT(!mutex_owned(SD_MUTEX(un))); 19601 19602 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19603 19604 /* already determined doorlock is not supported, fake success */ 19605 if (un->un_f_doorlock_supported == FALSE) { 19606 return (0); 19607 } 19608 19609 /* 19610 * If we are ejecting and see an SD_REMOVAL_PREVENT 19611 * ignore the command so we can complete the eject 19612 * operation. 19613 */ 19614 if (flag == SD_REMOVAL_PREVENT) { 19615 mutex_enter(SD_MUTEX(un)); 19616 if (un->un_f_ejecting == TRUE) { 19617 mutex_exit(SD_MUTEX(un)); 19618 return (EAGAIN); 19619 } 19620 mutex_exit(SD_MUTEX(un)); 19621 } 19622 19623 bzero(&cdb, sizeof (cdb)); 19624 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19625 19626 cdb.scc_cmd = SCMD_DOORLOCK; 19627 cdb.cdb_opaque[4] = (uchar_t)flag; 19628 19629 ucmd_buf.uscsi_cdb = (char *)&cdb; 19630 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19631 ucmd_buf.uscsi_bufaddr = NULL; 19632 ucmd_buf.uscsi_buflen = 0; 19633 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19634 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19635 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19636 ucmd_buf.uscsi_timeout = 15; 19637 19638 SD_TRACE(SD_LOG_IO, un, 19639 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19640 19641 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19642 UIO_SYSSPACE, path_flag); 19643 19644 if (status == 0) 19645 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19646 19647 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19648 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19649 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19650 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19651 19652 /* fake success and skip subsequent doorlock commands */ 19653 un->un_f_doorlock_supported = FALSE; 19654 return (0); 19655 } 19656 19657 return (status); 19658 } 19659 19660 /* 19661 * Function: sd_send_scsi_READ_CAPACITY 19662 * 19663 * Description: This routine uses the scsi READ CAPACITY command to determine 19664 * the device capacity in number of blocks and the device native 19665 * block size. If this function returns a failure, then the 19666 * values in *capp and *lbap are undefined. If the capacity 19667 * returned is 0xffffffff then the lun is too large for a 19668 * normal READ CAPACITY command and the results of a 19669 * READ CAPACITY 16 will be used instead. 19670 * 19671 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19672 * capp - ptr to unsigned 64-bit variable to receive the 19673 * capacity value from the command. 19674 * lbap - ptr to unsigned 32-bit varaible to receive the 19675 * block size value from the command 19676 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19677 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19678 * to use the USCSI "direct" chain and bypass the normal 19679 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19680 * command is issued as part of an error recovery action. 19681 * 19682 * Return Code: 0 - Success 19683 * EIO - IO error 19684 * EACCES - Reservation conflict detected 19685 * EAGAIN - Device is becoming ready 19686 * errno return code from sd_ssc_send() 19687 * 19688 * Context: Can sleep. Blocks until command completes. 19689 */ 19690 19691 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19692 19693 static int 19694 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19695 int path_flag) 19696 { 19697 struct scsi_extended_sense sense_buf; 19698 struct uscsi_cmd ucmd_buf; 19699 union scsi_cdb cdb; 19700 uint32_t *capacity_buf; 19701 uint64_t capacity; 19702 uint32_t lbasize; 19703 uint32_t pbsize; 19704 int status; 19705 struct sd_lun *un; 19706 19707 ASSERT(ssc != NULL); 19708 19709 un = ssc->ssc_un; 19710 ASSERT(un != NULL); 19711 ASSERT(!mutex_owned(SD_MUTEX(un))); 19712 ASSERT(capp != NULL); 19713 ASSERT(lbap != NULL); 19714 19715 SD_TRACE(SD_LOG_IO, un, 19716 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19717 19718 /* 19719 * First send a READ_CAPACITY command to the target. 19720 * (This command is mandatory under SCSI-2.) 19721 * 19722 * Set up the CDB for the READ_CAPACITY command. The Partial 19723 * Medium Indicator bit is cleared. The address field must be 19724 * zero if the PMI bit is zero. 19725 */ 19726 bzero(&cdb, sizeof (cdb)); 19727 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19728 19729 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19730 19731 cdb.scc_cmd = SCMD_READ_CAPACITY; 19732 19733 ucmd_buf.uscsi_cdb = (char *)&cdb; 19734 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19735 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19736 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19737 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19738 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19739 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19740 ucmd_buf.uscsi_timeout = 60; 19741 19742 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19743 UIO_SYSSPACE, path_flag); 19744 19745 switch (status) { 19746 case 0: 19747 /* Return failure if we did not get valid capacity data. */ 19748 if (ucmd_buf.uscsi_resid != 0) { 19749 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19750 "sd_send_scsi_READ_CAPACITY received invalid " 19751 "capacity data"); 19752 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19753 return (EIO); 19754 } 19755 /* 19756 * Read capacity and block size from the READ CAPACITY 10 data. 19757 * This data may be adjusted later due to device specific 19758 * issues. 19759 * 19760 * According to the SCSI spec, the READ CAPACITY 10 19761 * command returns the following: 19762 * 19763 * bytes 0-3: Maximum logical block address available. 19764 * (MSB in byte:0 & LSB in byte:3) 19765 * 19766 * bytes 4-7: Block length in bytes 19767 * (MSB in byte:4 & LSB in byte:7) 19768 * 19769 */ 19770 capacity = BE_32(capacity_buf[0]); 19771 lbasize = BE_32(capacity_buf[1]); 19772 19773 /* 19774 * Done with capacity_buf 19775 */ 19776 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19777 19778 /* 19779 * if the reported capacity is set to all 0xf's, then 19780 * this disk is too large and requires SBC-2 commands. 19781 * Reissue the request using READ CAPACITY 16. 19782 */ 19783 if (capacity == 0xffffffff) { 19784 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19785 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19786 &lbasize, &pbsize, path_flag); 19787 if (status != 0) { 19788 return (status); 19789 } 19790 } 19791 break; /* Success! */ 19792 case EIO: 19793 switch (ucmd_buf.uscsi_status) { 19794 case STATUS_RESERVATION_CONFLICT: 19795 status = EACCES; 19796 break; 19797 case STATUS_CHECK: 19798 /* 19799 * Check condition; look for ASC/ASCQ of 0x04/0x01 19800 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19801 */ 19802 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19803 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19804 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19805 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19806 return (EAGAIN); 19807 } 19808 break; 19809 default: 19810 break; 19811 } 19812 /* FALLTHRU */ 19813 default: 19814 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19815 return (status); 19816 } 19817 19818 /* 19819 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19820 * (2352 and 0 are common) so for these devices always force the value 19821 * to 2048 as required by the ATAPI specs. 19822 */ 19823 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19824 lbasize = 2048; 19825 } 19826 19827 /* 19828 * Get the maximum LBA value from the READ CAPACITY data. 19829 * Here we assume that the Partial Medium Indicator (PMI) bit 19830 * was cleared when issuing the command. This means that the LBA 19831 * returned from the device is the LBA of the last logical block 19832 * on the logical unit. The actual logical block count will be 19833 * this value plus one. 19834 * 19835 * Currently, for removable media, the capacity is saved in terms 19836 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 19837 */ 19838 if (un->un_f_has_removable_media) 19839 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19840 19841 /* 19842 * Copy the values from the READ CAPACITY command into the space 19843 * provided by the caller. 19844 */ 19845 *capp = capacity; 19846 *lbap = lbasize; 19847 19848 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19849 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19850 19851 /* 19852 * Both the lbasize and capacity from the device must be nonzero, 19853 * otherwise we assume that the values are not valid and return 19854 * failure to the caller. (4203735) 19855 */ 19856 if ((capacity == 0) || (lbasize == 0)) { 19857 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19858 "sd_send_scsi_READ_CAPACITY received invalid value " 19859 "capacity %llu lbasize %d", capacity, lbasize); 19860 return (EIO); 19861 } 19862 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19863 return (0); 19864 } 19865 19866 /* 19867 * Function: sd_send_scsi_READ_CAPACITY_16 19868 * 19869 * Description: This routine uses the scsi READ CAPACITY 16 command to 19870 * determine the device capacity in number of blocks and the 19871 * device native block size. If this function returns a failure, 19872 * then the values in *capp and *lbap are undefined. 19873 * This routine should be called by sd_send_scsi_READ_CAPACITY 19874 * which will apply any device specific adjustments to capacity 19875 * and lbasize. One exception is it is also called by 19876 * sd_get_media_info_ext. In that function, there is no need to 19877 * adjust the capacity and lbasize. 19878 * 19879 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19880 * capp - ptr to unsigned 64-bit variable to receive the 19881 * capacity value from the command. 19882 * lbap - ptr to unsigned 32-bit varaible to receive the 19883 * block size value from the command 19884 * psp - ptr to unsigned 32-bit variable to receive the 19885 * physical block size value from the command 19886 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19887 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19888 * to use the USCSI "direct" chain and bypass the normal 19889 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19890 * this command is issued as part of an error recovery 19891 * action. 19892 * 19893 * Return Code: 0 - Success 19894 * EIO - IO error 19895 * EACCES - Reservation conflict detected 19896 * EAGAIN - Device is becoming ready 19897 * errno return code from sd_ssc_send() 19898 * 19899 * Context: Can sleep. Blocks until command completes. 19900 */ 19901 19902 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19903 19904 static int 19905 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19906 uint32_t *lbap, uint32_t *psp, int path_flag) 19907 { 19908 struct scsi_extended_sense sense_buf; 19909 struct uscsi_cmd ucmd_buf; 19910 union scsi_cdb cdb; 19911 uint64_t *capacity16_buf; 19912 uint64_t capacity; 19913 uint32_t lbasize; 19914 uint32_t pbsize; 19915 uint32_t lbpb_exp; 19916 int status; 19917 struct sd_lun *un; 19918 19919 ASSERT(ssc != NULL); 19920 19921 un = ssc->ssc_un; 19922 ASSERT(un != NULL); 19923 ASSERT(!mutex_owned(SD_MUTEX(un))); 19924 ASSERT(capp != NULL); 19925 ASSERT(lbap != NULL); 19926 19927 SD_TRACE(SD_LOG_IO, un, 19928 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19929 19930 /* 19931 * First send a READ_CAPACITY_16 command to the target. 19932 * 19933 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19934 * Medium Indicator bit is cleared. The address field must be 19935 * zero if the PMI bit is zero. 19936 */ 19937 bzero(&cdb, sizeof (cdb)); 19938 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19939 19940 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19941 19942 ucmd_buf.uscsi_cdb = (char *)&cdb; 19943 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19944 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19945 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19946 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19947 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19948 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19949 ucmd_buf.uscsi_timeout = 60; 19950 19951 /* 19952 * Read Capacity (16) is a Service Action In command. One 19953 * command byte (0x9E) is overloaded for multiple operations, 19954 * with the second CDB byte specifying the desired operation 19955 */ 19956 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19957 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19958 19959 /* 19960 * Fill in allocation length field 19961 */ 19962 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19963 19964 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19965 UIO_SYSSPACE, path_flag); 19966 19967 switch (status) { 19968 case 0: 19969 /* Return failure if we did not get valid capacity data. */ 19970 if (ucmd_buf.uscsi_resid > 20) { 19971 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19972 "sd_send_scsi_READ_CAPACITY_16 received invalid " 19973 "capacity data"); 19974 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19975 return (EIO); 19976 } 19977 19978 /* 19979 * Read capacity and block size from the READ CAPACITY 10 data. 19980 * This data may be adjusted later due to device specific 19981 * issues. 19982 * 19983 * According to the SCSI spec, the READ CAPACITY 10 19984 * command returns the following: 19985 * 19986 * bytes 0-7: Maximum logical block address available. 19987 * (MSB in byte:0 & LSB in byte:7) 19988 * 19989 * bytes 8-11: Block length in bytes 19990 * (MSB in byte:8 & LSB in byte:11) 19991 * 19992 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 19993 */ 19994 capacity = BE_64(capacity16_buf[0]); 19995 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19996 lbpb_exp = (BE_64(capacity16_buf[1]) >> 40) & 0x0f; 19997 19998 pbsize = lbasize << lbpb_exp; 19999 20000 /* 20001 * Done with capacity16_buf 20002 */ 20003 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20004 20005 /* 20006 * if the reported capacity is set to all 0xf's, then 20007 * this disk is too large. This could only happen with 20008 * a device that supports LBAs larger than 64 bits which 20009 * are not defined by any current T10 standards. 20010 */ 20011 if (capacity == 0xffffffffffffffff) { 20012 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20013 "disk is too large"); 20014 return (EIO); 20015 } 20016 break; /* Success! */ 20017 case EIO: 20018 switch (ucmd_buf.uscsi_status) { 20019 case STATUS_RESERVATION_CONFLICT: 20020 status = EACCES; 20021 break; 20022 case STATUS_CHECK: 20023 /* 20024 * Check condition; look for ASC/ASCQ of 0x04/0x01 20025 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20026 */ 20027 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20028 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20029 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20030 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20031 return (EAGAIN); 20032 } 20033 break; 20034 default: 20035 break; 20036 } 20037 /* FALLTHRU */ 20038 default: 20039 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20040 return (status); 20041 } 20042 20043 *capp = capacity; 20044 *lbap = lbasize; 20045 *psp = pbsize; 20046 20047 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20048 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20049 capacity, lbasize, pbsize); 20050 20051 return (0); 20052 } 20053 20054 20055 /* 20056 * Function: sd_send_scsi_START_STOP_UNIT 20057 * 20058 * Description: Issue a scsi START STOP UNIT command to the target. 20059 * 20060 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20061 * structure for this target. 20062 * flag - SD_TARGET_START 20063 * SD_TARGET_STOP 20064 * SD_TARGET_EJECT 20065 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20066 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20067 * to use the USCSI "direct" chain and bypass the normal 20068 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20069 * command is issued as part of an error recovery action. 20070 * 20071 * Return Code: 0 - Success 20072 * EIO - IO error 20073 * EACCES - Reservation conflict detected 20074 * ENXIO - Not Ready, medium not present 20075 * errno return code from sd_ssc_send() 20076 * 20077 * Context: Can sleep. 20078 */ 20079 20080 static int 20081 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 20082 { 20083 struct scsi_extended_sense sense_buf; 20084 union scsi_cdb cdb; 20085 struct uscsi_cmd ucmd_buf; 20086 int status; 20087 struct sd_lun *un; 20088 20089 ASSERT(ssc != NULL); 20090 un = ssc->ssc_un; 20091 ASSERT(un != NULL); 20092 ASSERT(!mutex_owned(SD_MUTEX(un))); 20093 20094 SD_TRACE(SD_LOG_IO, un, 20095 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20096 20097 if (un->un_f_check_start_stop && 20098 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20099 (un->un_f_start_stop_supported != TRUE)) { 20100 return (0); 20101 } 20102 20103 /* 20104 * If we are performing an eject operation and 20105 * we receive any command other than SD_TARGET_EJECT 20106 * we should immediately return. 20107 */ 20108 if (flag != SD_TARGET_EJECT) { 20109 mutex_enter(SD_MUTEX(un)); 20110 if (un->un_f_ejecting == TRUE) { 20111 mutex_exit(SD_MUTEX(un)); 20112 return (EAGAIN); 20113 } 20114 mutex_exit(SD_MUTEX(un)); 20115 } 20116 20117 bzero(&cdb, sizeof (cdb)); 20118 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20119 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20120 20121 cdb.scc_cmd = SCMD_START_STOP; 20122 cdb.cdb_opaque[4] = (uchar_t)flag; 20123 20124 ucmd_buf.uscsi_cdb = (char *)&cdb; 20125 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20126 ucmd_buf.uscsi_bufaddr = NULL; 20127 ucmd_buf.uscsi_buflen = 0; 20128 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20129 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20130 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20131 ucmd_buf.uscsi_timeout = 200; 20132 20133 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20134 UIO_SYSSPACE, path_flag); 20135 20136 switch (status) { 20137 case 0: 20138 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20139 break; /* Success! */ 20140 case EIO: 20141 switch (ucmd_buf.uscsi_status) { 20142 case STATUS_RESERVATION_CONFLICT: 20143 status = EACCES; 20144 break; 20145 case STATUS_CHECK: 20146 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20147 switch (scsi_sense_key( 20148 (uint8_t *)&sense_buf)) { 20149 case KEY_ILLEGAL_REQUEST: 20150 status = ENOTSUP; 20151 break; 20152 case KEY_NOT_READY: 20153 if (scsi_sense_asc( 20154 (uint8_t *)&sense_buf) 20155 == 0x3A) { 20156 status = ENXIO; 20157 } 20158 break; 20159 default: 20160 break; 20161 } 20162 } 20163 break; 20164 default: 20165 break; 20166 } 20167 break; 20168 default: 20169 break; 20170 } 20171 20172 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20173 20174 return (status); 20175 } 20176 20177 20178 /* 20179 * Function: sd_start_stop_unit_callback 20180 * 20181 * Description: timeout(9F) callback to begin recovery process for a 20182 * device that has spun down. 20183 * 20184 * Arguments: arg - pointer to associated softstate struct. 20185 * 20186 * Context: Executes in a timeout(9F) thread context 20187 */ 20188 20189 static void 20190 sd_start_stop_unit_callback(void *arg) 20191 { 20192 struct sd_lun *un = arg; 20193 ASSERT(un != NULL); 20194 ASSERT(!mutex_owned(SD_MUTEX(un))); 20195 20196 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20197 20198 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20199 } 20200 20201 20202 /* 20203 * Function: sd_start_stop_unit_task 20204 * 20205 * Description: Recovery procedure when a drive is spun down. 20206 * 20207 * Arguments: arg - pointer to associated softstate struct. 20208 * 20209 * Context: Executes in a taskq() thread context 20210 */ 20211 20212 static void 20213 sd_start_stop_unit_task(void *arg) 20214 { 20215 struct sd_lun *un = arg; 20216 sd_ssc_t *ssc; 20217 int rval; 20218 20219 ASSERT(un != NULL); 20220 ASSERT(!mutex_owned(SD_MUTEX(un))); 20221 20222 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20223 20224 /* 20225 * Some unformatted drives report not ready error, no need to 20226 * restart if format has been initiated. 20227 */ 20228 mutex_enter(SD_MUTEX(un)); 20229 if (un->un_f_format_in_progress == TRUE) { 20230 mutex_exit(SD_MUTEX(un)); 20231 return; 20232 } 20233 mutex_exit(SD_MUTEX(un)); 20234 20235 /* 20236 * When a START STOP command is issued from here, it is part of a 20237 * failure recovery operation and must be issued before any other 20238 * commands, including any pending retries. Thus it must be sent 20239 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20240 * succeeds or not, we will start I/O after the attempt. 20241 */ 20242 ssc = sd_ssc_init(un); 20243 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 20244 SD_PATH_DIRECT_PRIORITY); 20245 if (rval != 0) 20246 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20247 sd_ssc_fini(ssc); 20248 /* 20249 * The above call blocks until the START_STOP_UNIT command completes. 20250 * Now that it has completed, we must re-try the original IO that 20251 * received the NOT READY condition in the first place. There are 20252 * three possible conditions here: 20253 * 20254 * (1) The original IO is on un_retry_bp. 20255 * (2) The original IO is on the regular wait queue, and un_retry_bp 20256 * is NULL. 20257 * (3) The original IO is on the regular wait queue, and un_retry_bp 20258 * points to some other, unrelated bp. 20259 * 20260 * For each case, we must call sd_start_cmds() with un_retry_bp 20261 * as the argument. If un_retry_bp is NULL, this will initiate 20262 * processing of the regular wait queue. If un_retry_bp is not NULL, 20263 * then this will process the bp on un_retry_bp. That may or may not 20264 * be the original IO, but that does not matter: the important thing 20265 * is to keep the IO processing going at this point. 20266 * 20267 * Note: This is a very specific error recovery sequence associated 20268 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20269 * serialize the I/O with completion of the spin-up. 20270 */ 20271 mutex_enter(SD_MUTEX(un)); 20272 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20273 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20274 un, un->un_retry_bp); 20275 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20276 sd_start_cmds(un, un->un_retry_bp); 20277 mutex_exit(SD_MUTEX(un)); 20278 20279 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20280 } 20281 20282 20283 /* 20284 * Function: sd_send_scsi_INQUIRY 20285 * 20286 * Description: Issue the scsi INQUIRY command. 20287 * 20288 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20289 * structure for this target. 20290 * bufaddr 20291 * buflen 20292 * evpd 20293 * page_code 20294 * page_length 20295 * 20296 * Return Code: 0 - Success 20297 * errno return code from sd_ssc_send() 20298 * 20299 * Context: Can sleep. Does not return until command is completed. 20300 */ 20301 20302 static int 20303 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20304 uchar_t evpd, uchar_t page_code, size_t *residp) 20305 { 20306 union scsi_cdb cdb; 20307 struct uscsi_cmd ucmd_buf; 20308 int status; 20309 struct sd_lun *un; 20310 20311 ASSERT(ssc != NULL); 20312 un = ssc->ssc_un; 20313 ASSERT(un != NULL); 20314 ASSERT(!mutex_owned(SD_MUTEX(un))); 20315 ASSERT(bufaddr != NULL); 20316 20317 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20318 20319 bzero(&cdb, sizeof (cdb)); 20320 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20321 bzero(bufaddr, buflen); 20322 20323 cdb.scc_cmd = SCMD_INQUIRY; 20324 cdb.cdb_opaque[1] = evpd; 20325 cdb.cdb_opaque[2] = page_code; 20326 FORMG0COUNT(&cdb, buflen); 20327 20328 ucmd_buf.uscsi_cdb = (char *)&cdb; 20329 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20330 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20331 ucmd_buf.uscsi_buflen = buflen; 20332 ucmd_buf.uscsi_rqbuf = NULL; 20333 ucmd_buf.uscsi_rqlen = 0; 20334 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20335 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20336 20337 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20338 UIO_SYSSPACE, SD_PATH_DIRECT); 20339 20340 /* 20341 * Only handle status == 0, the upper-level caller 20342 * will put different assessment based on the context. 20343 */ 20344 if (status == 0) 20345 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20346 20347 if ((status == 0) && (residp != NULL)) { 20348 *residp = ucmd_buf.uscsi_resid; 20349 } 20350 20351 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20352 20353 return (status); 20354 } 20355 20356 20357 /* 20358 * Function: sd_send_scsi_TEST_UNIT_READY 20359 * 20360 * Description: Issue the scsi TEST UNIT READY command. 20361 * This routine can be told to set the flag USCSI_DIAGNOSE to 20362 * prevent retrying failed commands. Use this when the intent 20363 * is either to check for device readiness, to clear a Unit 20364 * Attention, or to clear any outstanding sense data. 20365 * However under specific conditions the expected behavior 20366 * is for retries to bring a device ready, so use the flag 20367 * with caution. 20368 * 20369 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20370 * structure for this target. 20371 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20372 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20373 * 0: dont check for media present, do retries on cmd. 20374 * 20375 * Return Code: 0 - Success 20376 * EIO - IO error 20377 * EACCES - Reservation conflict detected 20378 * ENXIO - Not Ready, medium not present 20379 * errno return code from sd_ssc_send() 20380 * 20381 * Context: Can sleep. Does not return until command is completed. 20382 */ 20383 20384 static int 20385 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20386 { 20387 struct scsi_extended_sense sense_buf; 20388 union scsi_cdb cdb; 20389 struct uscsi_cmd ucmd_buf; 20390 int status; 20391 struct sd_lun *un; 20392 20393 ASSERT(ssc != NULL); 20394 un = ssc->ssc_un; 20395 ASSERT(un != NULL); 20396 ASSERT(!mutex_owned(SD_MUTEX(un))); 20397 20398 SD_TRACE(SD_LOG_IO, un, 20399 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20400 20401 /* 20402 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20403 * timeouts when they receive a TUR and the queue is not empty. Check 20404 * the configuration flag set during attach (indicating the drive has 20405 * this firmware bug) and un_ncmds_in_transport before issuing the 20406 * TUR. If there are 20407 * pending commands return success, this is a bit arbitrary but is ok 20408 * for non-removables (i.e. the eliteI disks) and non-clustering 20409 * configurations. 20410 */ 20411 if (un->un_f_cfg_tur_check == TRUE) { 20412 mutex_enter(SD_MUTEX(un)); 20413 if (un->un_ncmds_in_transport != 0) { 20414 mutex_exit(SD_MUTEX(un)); 20415 return (0); 20416 } 20417 mutex_exit(SD_MUTEX(un)); 20418 } 20419 20420 bzero(&cdb, sizeof (cdb)); 20421 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20422 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20423 20424 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20425 20426 ucmd_buf.uscsi_cdb = (char *)&cdb; 20427 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20428 ucmd_buf.uscsi_bufaddr = NULL; 20429 ucmd_buf.uscsi_buflen = 0; 20430 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20431 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20432 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20433 20434 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20435 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20436 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20437 } 20438 ucmd_buf.uscsi_timeout = 60; 20439 20440 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20441 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20442 SD_PATH_STANDARD)); 20443 20444 switch (status) { 20445 case 0: 20446 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20447 break; /* Success! */ 20448 case EIO: 20449 switch (ucmd_buf.uscsi_status) { 20450 case STATUS_RESERVATION_CONFLICT: 20451 status = EACCES; 20452 break; 20453 case STATUS_CHECK: 20454 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20455 break; 20456 } 20457 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20458 (scsi_sense_key((uint8_t *)&sense_buf) == 20459 KEY_NOT_READY) && 20460 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20461 status = ENXIO; 20462 } 20463 break; 20464 default: 20465 break; 20466 } 20467 break; 20468 default: 20469 break; 20470 } 20471 20472 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20473 20474 return (status); 20475 } 20476 20477 /* 20478 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20479 * 20480 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20481 * 20482 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20483 * structure for this target. 20484 * 20485 * Return Code: 0 - Success 20486 * EACCES 20487 * ENOTSUP 20488 * errno return code from sd_ssc_send() 20489 * 20490 * Context: Can sleep. Does not return until command is completed. 20491 */ 20492 20493 static int 20494 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20495 uint16_t data_len, uchar_t *data_bufp) 20496 { 20497 struct scsi_extended_sense sense_buf; 20498 union scsi_cdb cdb; 20499 struct uscsi_cmd ucmd_buf; 20500 int status; 20501 int no_caller_buf = FALSE; 20502 struct sd_lun *un; 20503 20504 ASSERT(ssc != NULL); 20505 un = ssc->ssc_un; 20506 ASSERT(un != NULL); 20507 ASSERT(!mutex_owned(SD_MUTEX(un))); 20508 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20509 20510 SD_TRACE(SD_LOG_IO, un, 20511 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20512 20513 bzero(&cdb, sizeof (cdb)); 20514 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20515 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20516 if (data_bufp == NULL) { 20517 /* Allocate a default buf if the caller did not give one */ 20518 ASSERT(data_len == 0); 20519 data_len = MHIOC_RESV_KEY_SIZE; 20520 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20521 no_caller_buf = TRUE; 20522 } 20523 20524 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20525 cdb.cdb_opaque[1] = usr_cmd; 20526 FORMG1COUNT(&cdb, data_len); 20527 20528 ucmd_buf.uscsi_cdb = (char *)&cdb; 20529 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20530 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20531 ucmd_buf.uscsi_buflen = data_len; 20532 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20533 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20534 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20535 ucmd_buf.uscsi_timeout = 60; 20536 20537 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20538 UIO_SYSSPACE, SD_PATH_STANDARD); 20539 20540 switch (status) { 20541 case 0: 20542 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20543 20544 break; /* Success! */ 20545 case EIO: 20546 switch (ucmd_buf.uscsi_status) { 20547 case STATUS_RESERVATION_CONFLICT: 20548 status = EACCES; 20549 break; 20550 case STATUS_CHECK: 20551 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20552 (scsi_sense_key((uint8_t *)&sense_buf) == 20553 KEY_ILLEGAL_REQUEST)) { 20554 status = ENOTSUP; 20555 } 20556 break; 20557 default: 20558 break; 20559 } 20560 break; 20561 default: 20562 break; 20563 } 20564 20565 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20566 20567 if (no_caller_buf == TRUE) { 20568 kmem_free(data_bufp, data_len); 20569 } 20570 20571 return (status); 20572 } 20573 20574 20575 /* 20576 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20577 * 20578 * Description: This routine is the driver entry point for handling CD-ROM 20579 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20580 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20581 * device. 20582 * 20583 * Arguments: ssc - ssc contains un - pointer to soft state struct 20584 * for the target. 20585 * usr_cmd SCSI-3 reservation facility command (one of 20586 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20587 * SD_SCSI3_PREEMPTANDABORT) 20588 * usr_bufp - user provided pointer register, reserve descriptor or 20589 * preempt and abort structure (mhioc_register_t, 20590 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20591 * 20592 * Return Code: 0 - Success 20593 * EACCES 20594 * ENOTSUP 20595 * errno return code from sd_ssc_send() 20596 * 20597 * Context: Can sleep. Does not return until command is completed. 20598 */ 20599 20600 static int 20601 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20602 uchar_t *usr_bufp) 20603 { 20604 struct scsi_extended_sense sense_buf; 20605 union scsi_cdb cdb; 20606 struct uscsi_cmd ucmd_buf; 20607 int status; 20608 uchar_t data_len = sizeof (sd_prout_t); 20609 sd_prout_t *prp; 20610 struct sd_lun *un; 20611 20612 ASSERT(ssc != NULL); 20613 un = ssc->ssc_un; 20614 ASSERT(un != NULL); 20615 ASSERT(!mutex_owned(SD_MUTEX(un))); 20616 ASSERT(data_len == 24); /* required by scsi spec */ 20617 20618 SD_TRACE(SD_LOG_IO, un, 20619 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20620 20621 if (usr_bufp == NULL) { 20622 return (EINVAL); 20623 } 20624 20625 bzero(&cdb, sizeof (cdb)); 20626 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20627 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20628 prp = kmem_zalloc(data_len, KM_SLEEP); 20629 20630 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20631 cdb.cdb_opaque[1] = usr_cmd; 20632 FORMG1COUNT(&cdb, data_len); 20633 20634 ucmd_buf.uscsi_cdb = (char *)&cdb; 20635 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20636 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20637 ucmd_buf.uscsi_buflen = data_len; 20638 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20639 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20640 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20641 ucmd_buf.uscsi_timeout = 60; 20642 20643 switch (usr_cmd) { 20644 case SD_SCSI3_REGISTER: { 20645 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20646 20647 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20648 bcopy(ptr->newkey.key, prp->service_key, 20649 MHIOC_RESV_KEY_SIZE); 20650 prp->aptpl = ptr->aptpl; 20651 break; 20652 } 20653 case SD_SCSI3_RESERVE: 20654 case SD_SCSI3_RELEASE: { 20655 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20656 20657 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20658 prp->scope_address = BE_32(ptr->scope_specific_addr); 20659 cdb.cdb_opaque[2] = ptr->type; 20660 break; 20661 } 20662 case SD_SCSI3_PREEMPTANDABORT: { 20663 mhioc_preemptandabort_t *ptr = 20664 (mhioc_preemptandabort_t *)usr_bufp; 20665 20666 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20667 bcopy(ptr->victim_key.key, prp->service_key, 20668 MHIOC_RESV_KEY_SIZE); 20669 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20670 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20671 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20672 break; 20673 } 20674 case SD_SCSI3_REGISTERANDIGNOREKEY: 20675 { 20676 mhioc_registerandignorekey_t *ptr; 20677 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20678 bcopy(ptr->newkey.key, 20679 prp->service_key, MHIOC_RESV_KEY_SIZE); 20680 prp->aptpl = ptr->aptpl; 20681 break; 20682 } 20683 default: 20684 ASSERT(FALSE); 20685 break; 20686 } 20687 20688 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20689 UIO_SYSSPACE, SD_PATH_STANDARD); 20690 20691 switch (status) { 20692 case 0: 20693 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20694 break; /* Success! */ 20695 case EIO: 20696 switch (ucmd_buf.uscsi_status) { 20697 case STATUS_RESERVATION_CONFLICT: 20698 status = EACCES; 20699 break; 20700 case STATUS_CHECK: 20701 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20702 (scsi_sense_key((uint8_t *)&sense_buf) == 20703 KEY_ILLEGAL_REQUEST)) { 20704 status = ENOTSUP; 20705 } 20706 break; 20707 default: 20708 break; 20709 } 20710 break; 20711 default: 20712 break; 20713 } 20714 20715 kmem_free(prp, data_len); 20716 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20717 return (status); 20718 } 20719 20720 20721 /* 20722 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20723 * 20724 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20725 * 20726 * Arguments: un - pointer to the target's soft state struct 20727 * dkc - pointer to the callback structure 20728 * 20729 * Return Code: 0 - success 20730 * errno-type error code 20731 * 20732 * Context: kernel thread context only. 20733 * 20734 * _______________________________________________________________ 20735 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20736 * |FLUSH_VOLATILE| | operation | 20737 * |______________|______________|_________________________________| 20738 * | 0 | NULL | Synchronous flush on both | 20739 * | | | volatile and non-volatile cache | 20740 * |______________|______________|_________________________________| 20741 * | 1 | NULL | Synchronous flush on volatile | 20742 * | | | cache; disk drivers may suppress| 20743 * | | | flush if disk table indicates | 20744 * | | | non-volatile cache | 20745 * |______________|______________|_________________________________| 20746 * | 0 | !NULL | Asynchronous flush on both | 20747 * | | | volatile and non-volatile cache;| 20748 * |______________|______________|_________________________________| 20749 * | 1 | !NULL | Asynchronous flush on volatile | 20750 * | | | cache; disk drivers may suppress| 20751 * | | | flush if disk table indicates | 20752 * | | | non-volatile cache | 20753 * |______________|______________|_________________________________| 20754 * 20755 */ 20756 20757 static int 20758 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20759 { 20760 struct sd_uscsi_info *uip; 20761 struct uscsi_cmd *uscmd; 20762 union scsi_cdb *cdb; 20763 struct buf *bp; 20764 int rval = 0; 20765 int is_async; 20766 20767 SD_TRACE(SD_LOG_IO, un, 20768 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20769 20770 ASSERT(un != NULL); 20771 ASSERT(!mutex_owned(SD_MUTEX(un))); 20772 20773 if (dkc == NULL || dkc->dkc_callback == NULL) { 20774 is_async = FALSE; 20775 } else { 20776 is_async = TRUE; 20777 } 20778 20779 mutex_enter(SD_MUTEX(un)); 20780 /* check whether cache flush should be suppressed */ 20781 if (un->un_f_suppress_cache_flush == TRUE) { 20782 mutex_exit(SD_MUTEX(un)); 20783 /* 20784 * suppress the cache flush if the device is told to do 20785 * so by sd.conf or disk table 20786 */ 20787 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20788 skip the cache flush since suppress_cache_flush is %d!\n", 20789 un->un_f_suppress_cache_flush); 20790 20791 if (is_async == TRUE) { 20792 /* invoke callback for asynchronous flush */ 20793 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20794 } 20795 return (rval); 20796 } 20797 mutex_exit(SD_MUTEX(un)); 20798 20799 /* 20800 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20801 * set properly 20802 */ 20803 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20804 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20805 20806 mutex_enter(SD_MUTEX(un)); 20807 if (dkc != NULL && un->un_f_sync_nv_supported && 20808 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20809 /* 20810 * if the device supports SYNC_NV bit, turn on 20811 * the SYNC_NV bit to only flush volatile cache 20812 */ 20813 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20814 } 20815 mutex_exit(SD_MUTEX(un)); 20816 20817 /* 20818 * First get some memory for the uscsi_cmd struct and cdb 20819 * and initialize for SYNCHRONIZE_CACHE cmd. 20820 */ 20821 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20822 uscmd->uscsi_cdblen = CDB_GROUP1; 20823 uscmd->uscsi_cdb = (caddr_t)cdb; 20824 uscmd->uscsi_bufaddr = NULL; 20825 uscmd->uscsi_buflen = 0; 20826 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20827 uscmd->uscsi_rqlen = SENSE_LENGTH; 20828 uscmd->uscsi_rqresid = SENSE_LENGTH; 20829 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20830 uscmd->uscsi_timeout = sd_io_time; 20831 20832 /* 20833 * Allocate an sd_uscsi_info struct and fill it with the info 20834 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20835 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20836 * since we allocate the buf here in this function, we do not 20837 * need to preserve the prior contents of b_private. 20838 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20839 */ 20840 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20841 uip->ui_flags = SD_PATH_DIRECT; 20842 uip->ui_cmdp = uscmd; 20843 20844 bp = getrbuf(KM_SLEEP); 20845 bp->b_private = uip; 20846 20847 /* 20848 * Setup buffer to carry uscsi request. 20849 */ 20850 bp->b_flags = B_BUSY; 20851 bp->b_bcount = 0; 20852 bp->b_blkno = 0; 20853 20854 if (is_async == TRUE) { 20855 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20856 uip->ui_dkc = *dkc; 20857 } 20858 20859 bp->b_edev = SD_GET_DEV(un); 20860 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20861 20862 /* 20863 * Unset un_f_sync_cache_required flag 20864 */ 20865 mutex_enter(SD_MUTEX(un)); 20866 un->un_f_sync_cache_required = FALSE; 20867 mutex_exit(SD_MUTEX(un)); 20868 20869 (void) sd_uscsi_strategy(bp); 20870 20871 /* 20872 * If synchronous request, wait for completion 20873 * If async just return and let b_iodone callback 20874 * cleanup. 20875 * NOTE: On return, u_ncmds_in_driver will be decremented, 20876 * but it was also incremented in sd_uscsi_strategy(), so 20877 * we should be ok. 20878 */ 20879 if (is_async == FALSE) { 20880 (void) biowait(bp); 20881 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20882 } 20883 20884 return (rval); 20885 } 20886 20887 20888 static int 20889 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20890 { 20891 struct sd_uscsi_info *uip; 20892 struct uscsi_cmd *uscmd; 20893 uint8_t *sense_buf; 20894 struct sd_lun *un; 20895 int status; 20896 union scsi_cdb *cdb; 20897 20898 uip = (struct sd_uscsi_info *)(bp->b_private); 20899 ASSERT(uip != NULL); 20900 20901 uscmd = uip->ui_cmdp; 20902 ASSERT(uscmd != NULL); 20903 20904 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20905 ASSERT(sense_buf != NULL); 20906 20907 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20908 ASSERT(un != NULL); 20909 20910 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20911 20912 status = geterror(bp); 20913 switch (status) { 20914 case 0: 20915 break; /* Success! */ 20916 case EIO: 20917 switch (uscmd->uscsi_status) { 20918 case STATUS_RESERVATION_CONFLICT: 20919 /* Ignore reservation conflict */ 20920 status = 0; 20921 goto done; 20922 20923 case STATUS_CHECK: 20924 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20925 (scsi_sense_key(sense_buf) == 20926 KEY_ILLEGAL_REQUEST)) { 20927 /* Ignore Illegal Request error */ 20928 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20929 mutex_enter(SD_MUTEX(un)); 20930 un->un_f_sync_nv_supported = FALSE; 20931 mutex_exit(SD_MUTEX(un)); 20932 status = 0; 20933 SD_TRACE(SD_LOG_IO, un, 20934 "un_f_sync_nv_supported \ 20935 is set to false.\n"); 20936 goto done; 20937 } 20938 20939 mutex_enter(SD_MUTEX(un)); 20940 un->un_f_sync_cache_supported = FALSE; 20941 mutex_exit(SD_MUTEX(un)); 20942 SD_TRACE(SD_LOG_IO, un, 20943 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20944 un_f_sync_cache_supported set to false \ 20945 with asc = %x, ascq = %x\n", 20946 scsi_sense_asc(sense_buf), 20947 scsi_sense_ascq(sense_buf)); 20948 status = ENOTSUP; 20949 goto done; 20950 } 20951 break; 20952 default: 20953 break; 20954 } 20955 /* FALLTHRU */ 20956 default: 20957 /* 20958 * Turn on the un_f_sync_cache_required flag 20959 * since the SYNC CACHE command failed 20960 */ 20961 mutex_enter(SD_MUTEX(un)); 20962 un->un_f_sync_cache_required = TRUE; 20963 mutex_exit(SD_MUTEX(un)); 20964 20965 /* 20966 * Don't log an error message if this device 20967 * has removable media. 20968 */ 20969 if (!un->un_f_has_removable_media) { 20970 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20971 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20972 } 20973 break; 20974 } 20975 20976 done: 20977 if (uip->ui_dkc.dkc_callback != NULL) { 20978 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20979 } 20980 20981 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20982 freerbuf(bp); 20983 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20984 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20985 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20986 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20987 20988 return (status); 20989 } 20990 20991 20992 /* 20993 * Function: sd_send_scsi_GET_CONFIGURATION 20994 * 20995 * Description: Issues the get configuration command to the device. 20996 * Called from sd_check_for_writable_cd & sd_get_media_info 20997 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20998 * Arguments: ssc 20999 * ucmdbuf 21000 * rqbuf 21001 * rqbuflen 21002 * bufaddr 21003 * buflen 21004 * path_flag 21005 * 21006 * Return Code: 0 - Success 21007 * errno return code from sd_ssc_send() 21008 * 21009 * Context: Can sleep. Does not return until command is completed. 21010 * 21011 */ 21012 21013 static int 21014 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21015 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21016 int path_flag) 21017 { 21018 char cdb[CDB_GROUP1]; 21019 int status; 21020 struct sd_lun *un; 21021 21022 ASSERT(ssc != NULL); 21023 un = ssc->ssc_un; 21024 ASSERT(un != NULL); 21025 ASSERT(!mutex_owned(SD_MUTEX(un))); 21026 ASSERT(bufaddr != NULL); 21027 ASSERT(ucmdbuf != NULL); 21028 ASSERT(rqbuf != NULL); 21029 21030 SD_TRACE(SD_LOG_IO, un, 21031 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21032 21033 bzero(cdb, sizeof (cdb)); 21034 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21035 bzero(rqbuf, rqbuflen); 21036 bzero(bufaddr, buflen); 21037 21038 /* 21039 * Set up cdb field for the get configuration command. 21040 */ 21041 cdb[0] = SCMD_GET_CONFIGURATION; 21042 cdb[1] = 0x02; /* Requested Type */ 21043 cdb[8] = SD_PROFILE_HEADER_LEN; 21044 ucmdbuf->uscsi_cdb = cdb; 21045 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21046 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21047 ucmdbuf->uscsi_buflen = buflen; 21048 ucmdbuf->uscsi_timeout = sd_io_time; 21049 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21050 ucmdbuf->uscsi_rqlen = rqbuflen; 21051 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21052 21053 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21054 UIO_SYSSPACE, path_flag); 21055 21056 switch (status) { 21057 case 0: 21058 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21059 break; /* Success! */ 21060 case EIO: 21061 switch (ucmdbuf->uscsi_status) { 21062 case STATUS_RESERVATION_CONFLICT: 21063 status = EACCES; 21064 break; 21065 default: 21066 break; 21067 } 21068 break; 21069 default: 21070 break; 21071 } 21072 21073 if (status == 0) { 21074 SD_DUMP_MEMORY(un, SD_LOG_IO, 21075 "sd_send_scsi_GET_CONFIGURATION: data", 21076 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21077 } 21078 21079 SD_TRACE(SD_LOG_IO, un, 21080 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21081 21082 return (status); 21083 } 21084 21085 /* 21086 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21087 * 21088 * Description: Issues the get configuration command to the device to 21089 * retrieve a specific feature. Called from 21090 * sd_check_for_writable_cd & sd_set_mmc_caps. 21091 * Arguments: ssc 21092 * ucmdbuf 21093 * rqbuf 21094 * rqbuflen 21095 * bufaddr 21096 * buflen 21097 * feature 21098 * 21099 * Return Code: 0 - Success 21100 * errno return code from sd_ssc_send() 21101 * 21102 * Context: Can sleep. Does not return until command is completed. 21103 * 21104 */ 21105 static int 21106 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 21107 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 21108 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 21109 { 21110 char cdb[CDB_GROUP1]; 21111 int status; 21112 struct sd_lun *un; 21113 21114 ASSERT(ssc != NULL); 21115 un = ssc->ssc_un; 21116 ASSERT(un != NULL); 21117 ASSERT(!mutex_owned(SD_MUTEX(un))); 21118 ASSERT(bufaddr != NULL); 21119 ASSERT(ucmdbuf != NULL); 21120 ASSERT(rqbuf != NULL); 21121 21122 SD_TRACE(SD_LOG_IO, un, 21123 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21124 21125 bzero(cdb, sizeof (cdb)); 21126 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21127 bzero(rqbuf, rqbuflen); 21128 bzero(bufaddr, buflen); 21129 21130 /* 21131 * Set up cdb field for the get configuration command. 21132 */ 21133 cdb[0] = SCMD_GET_CONFIGURATION; 21134 cdb[1] = 0x02; /* Requested Type */ 21135 cdb[3] = feature; 21136 cdb[8] = buflen; 21137 ucmdbuf->uscsi_cdb = cdb; 21138 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21139 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21140 ucmdbuf->uscsi_buflen = buflen; 21141 ucmdbuf->uscsi_timeout = sd_io_time; 21142 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21143 ucmdbuf->uscsi_rqlen = rqbuflen; 21144 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21145 21146 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21147 UIO_SYSSPACE, path_flag); 21148 21149 switch (status) { 21150 case 0: 21151 21152 break; /* Success! */ 21153 case EIO: 21154 switch (ucmdbuf->uscsi_status) { 21155 case STATUS_RESERVATION_CONFLICT: 21156 status = EACCES; 21157 break; 21158 default: 21159 break; 21160 } 21161 break; 21162 default: 21163 break; 21164 } 21165 21166 if (status == 0) { 21167 SD_DUMP_MEMORY(un, SD_LOG_IO, 21168 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21169 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21170 } 21171 21172 SD_TRACE(SD_LOG_IO, un, 21173 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21174 21175 return (status); 21176 } 21177 21178 21179 /* 21180 * Function: sd_send_scsi_MODE_SENSE 21181 * 21182 * Description: Utility function for issuing a scsi MODE SENSE command. 21183 * Note: This routine uses a consistent implementation for Group0, 21184 * Group1, and Group2 commands across all platforms. ATAPI devices 21185 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21186 * 21187 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21188 * structure for this target. 21189 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21190 * CDB_GROUP[1|2] (10 byte). 21191 * bufaddr - buffer for page data retrieved from the target. 21192 * buflen - size of page to be retrieved. 21193 * page_code - page code of data to be retrieved from the target. 21194 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21195 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21196 * to use the USCSI "direct" chain and bypass the normal 21197 * command waitq. 21198 * 21199 * Return Code: 0 - Success 21200 * errno return code from sd_ssc_send() 21201 * 21202 * Context: Can sleep. Does not return until command is completed. 21203 */ 21204 21205 static int 21206 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21207 size_t buflen, uchar_t page_code, int path_flag) 21208 { 21209 struct scsi_extended_sense sense_buf; 21210 union scsi_cdb cdb; 21211 struct uscsi_cmd ucmd_buf; 21212 int status; 21213 int headlen; 21214 struct sd_lun *un; 21215 21216 ASSERT(ssc != NULL); 21217 un = ssc->ssc_un; 21218 ASSERT(un != NULL); 21219 ASSERT(!mutex_owned(SD_MUTEX(un))); 21220 ASSERT(bufaddr != NULL); 21221 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21222 (cdbsize == CDB_GROUP2)); 21223 21224 SD_TRACE(SD_LOG_IO, un, 21225 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21226 21227 bzero(&cdb, sizeof (cdb)); 21228 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21229 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21230 bzero(bufaddr, buflen); 21231 21232 if (cdbsize == CDB_GROUP0) { 21233 cdb.scc_cmd = SCMD_MODE_SENSE; 21234 cdb.cdb_opaque[2] = page_code; 21235 FORMG0COUNT(&cdb, buflen); 21236 headlen = MODE_HEADER_LENGTH; 21237 } else { 21238 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21239 cdb.cdb_opaque[2] = page_code; 21240 FORMG1COUNT(&cdb, buflen); 21241 headlen = MODE_HEADER_LENGTH_GRP2; 21242 } 21243 21244 ASSERT(headlen <= buflen); 21245 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21246 21247 ucmd_buf.uscsi_cdb = (char *)&cdb; 21248 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21249 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21250 ucmd_buf.uscsi_buflen = buflen; 21251 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21252 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21253 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21254 ucmd_buf.uscsi_timeout = 60; 21255 21256 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21257 UIO_SYSSPACE, path_flag); 21258 21259 switch (status) { 21260 case 0: 21261 /* 21262 * sr_check_wp() uses 0x3f page code and check the header of 21263 * mode page to determine if target device is write-protected. 21264 * But some USB devices return 0 bytes for 0x3f page code. For 21265 * this case, make sure that mode page header is returned at 21266 * least. 21267 */ 21268 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21269 status = EIO; 21270 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21271 "mode page header is not returned"); 21272 } 21273 break; /* Success! */ 21274 case EIO: 21275 switch (ucmd_buf.uscsi_status) { 21276 case STATUS_RESERVATION_CONFLICT: 21277 status = EACCES; 21278 break; 21279 default: 21280 break; 21281 } 21282 break; 21283 default: 21284 break; 21285 } 21286 21287 if (status == 0) { 21288 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21289 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21290 } 21291 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21292 21293 return (status); 21294 } 21295 21296 21297 /* 21298 * Function: sd_send_scsi_MODE_SELECT 21299 * 21300 * Description: Utility function for issuing a scsi MODE SELECT command. 21301 * Note: This routine uses a consistent implementation for Group0, 21302 * Group1, and Group2 commands across all platforms. ATAPI devices 21303 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21304 * 21305 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21306 * structure for this target. 21307 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21308 * CDB_GROUP[1|2] (10 byte). 21309 * bufaddr - buffer for page data retrieved from the target. 21310 * buflen - size of page to be retrieved. 21311 * save_page - boolean to determin if SP bit should be set. 21312 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21313 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21314 * to use the USCSI "direct" chain and bypass the normal 21315 * command waitq. 21316 * 21317 * Return Code: 0 - Success 21318 * errno return code from sd_ssc_send() 21319 * 21320 * Context: Can sleep. Does not return until command is completed. 21321 */ 21322 21323 static int 21324 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21325 size_t buflen, uchar_t save_page, int path_flag) 21326 { 21327 struct scsi_extended_sense sense_buf; 21328 union scsi_cdb cdb; 21329 struct uscsi_cmd ucmd_buf; 21330 int status; 21331 struct sd_lun *un; 21332 21333 ASSERT(ssc != NULL); 21334 un = ssc->ssc_un; 21335 ASSERT(un != NULL); 21336 ASSERT(!mutex_owned(SD_MUTEX(un))); 21337 ASSERT(bufaddr != NULL); 21338 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21339 (cdbsize == CDB_GROUP2)); 21340 21341 SD_TRACE(SD_LOG_IO, un, 21342 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21343 21344 bzero(&cdb, sizeof (cdb)); 21345 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21346 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21347 21348 /* Set the PF bit for many third party drives */ 21349 cdb.cdb_opaque[1] = 0x10; 21350 21351 /* Set the savepage(SP) bit if given */ 21352 if (save_page == SD_SAVE_PAGE) { 21353 cdb.cdb_opaque[1] |= 0x01; 21354 } 21355 21356 if (cdbsize == CDB_GROUP0) { 21357 cdb.scc_cmd = SCMD_MODE_SELECT; 21358 FORMG0COUNT(&cdb, buflen); 21359 } else { 21360 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21361 FORMG1COUNT(&cdb, buflen); 21362 } 21363 21364 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21365 21366 ucmd_buf.uscsi_cdb = (char *)&cdb; 21367 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21368 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21369 ucmd_buf.uscsi_buflen = buflen; 21370 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21371 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21372 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21373 ucmd_buf.uscsi_timeout = 60; 21374 21375 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21376 UIO_SYSSPACE, path_flag); 21377 21378 switch (status) { 21379 case 0: 21380 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21381 break; /* Success! */ 21382 case EIO: 21383 switch (ucmd_buf.uscsi_status) { 21384 case STATUS_RESERVATION_CONFLICT: 21385 status = EACCES; 21386 break; 21387 default: 21388 break; 21389 } 21390 break; 21391 default: 21392 break; 21393 } 21394 21395 if (status == 0) { 21396 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21397 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21398 } 21399 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21400 21401 return (status); 21402 } 21403 21404 21405 /* 21406 * Function: sd_send_scsi_RDWR 21407 * 21408 * Description: Issue a scsi READ or WRITE command with the given parameters. 21409 * 21410 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21411 * structure for this target. 21412 * cmd: SCMD_READ or SCMD_WRITE 21413 * bufaddr: Address of caller's buffer to receive the RDWR data 21414 * buflen: Length of caller's buffer receive the RDWR data. 21415 * start_block: Block number for the start of the RDWR operation. 21416 * (Assumes target-native block size.) 21417 * residp: Pointer to variable to receive the redisual of the 21418 * RDWR operation (may be NULL of no residual requested). 21419 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21420 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21421 * to use the USCSI "direct" chain and bypass the normal 21422 * command waitq. 21423 * 21424 * Return Code: 0 - Success 21425 * errno return code from sd_ssc_send() 21426 * 21427 * Context: Can sleep. Does not return until command is completed. 21428 */ 21429 21430 static int 21431 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21432 size_t buflen, daddr_t start_block, int path_flag) 21433 { 21434 struct scsi_extended_sense sense_buf; 21435 union scsi_cdb cdb; 21436 struct uscsi_cmd ucmd_buf; 21437 uint32_t block_count; 21438 int status; 21439 int cdbsize; 21440 uchar_t flag; 21441 struct sd_lun *un; 21442 21443 ASSERT(ssc != NULL); 21444 un = ssc->ssc_un; 21445 ASSERT(un != NULL); 21446 ASSERT(!mutex_owned(SD_MUTEX(un))); 21447 ASSERT(bufaddr != NULL); 21448 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21449 21450 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21451 21452 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21453 return (EINVAL); 21454 } 21455 21456 mutex_enter(SD_MUTEX(un)); 21457 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21458 mutex_exit(SD_MUTEX(un)); 21459 21460 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21461 21462 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21463 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21464 bufaddr, buflen, start_block, block_count); 21465 21466 bzero(&cdb, sizeof (cdb)); 21467 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21468 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21469 21470 /* Compute CDB size to use */ 21471 if (start_block > 0xffffffff) 21472 cdbsize = CDB_GROUP4; 21473 else if ((start_block & 0xFFE00000) || 21474 (un->un_f_cfg_is_atapi == TRUE)) 21475 cdbsize = CDB_GROUP1; 21476 else 21477 cdbsize = CDB_GROUP0; 21478 21479 switch (cdbsize) { 21480 case CDB_GROUP0: /* 6-byte CDBs */ 21481 cdb.scc_cmd = cmd; 21482 FORMG0ADDR(&cdb, start_block); 21483 FORMG0COUNT(&cdb, block_count); 21484 break; 21485 case CDB_GROUP1: /* 10-byte CDBs */ 21486 cdb.scc_cmd = cmd | SCMD_GROUP1; 21487 FORMG1ADDR(&cdb, start_block); 21488 FORMG1COUNT(&cdb, block_count); 21489 break; 21490 case CDB_GROUP4: /* 16-byte CDBs */ 21491 cdb.scc_cmd = cmd | SCMD_GROUP4; 21492 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21493 FORMG4COUNT(&cdb, block_count); 21494 break; 21495 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21496 default: 21497 /* All others reserved */ 21498 return (EINVAL); 21499 } 21500 21501 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21502 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21503 21504 ucmd_buf.uscsi_cdb = (char *)&cdb; 21505 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21506 ucmd_buf.uscsi_bufaddr = bufaddr; 21507 ucmd_buf.uscsi_buflen = buflen; 21508 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21509 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21510 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21511 ucmd_buf.uscsi_timeout = 60; 21512 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21513 UIO_SYSSPACE, path_flag); 21514 21515 switch (status) { 21516 case 0: 21517 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21518 break; /* Success! */ 21519 case EIO: 21520 switch (ucmd_buf.uscsi_status) { 21521 case STATUS_RESERVATION_CONFLICT: 21522 status = EACCES; 21523 break; 21524 default: 21525 break; 21526 } 21527 break; 21528 default: 21529 break; 21530 } 21531 21532 if (status == 0) { 21533 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21534 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21535 } 21536 21537 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21538 21539 return (status); 21540 } 21541 21542 21543 /* 21544 * Function: sd_send_scsi_LOG_SENSE 21545 * 21546 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21547 * 21548 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21549 * structure for this target. 21550 * 21551 * Return Code: 0 - Success 21552 * errno return code from sd_ssc_send() 21553 * 21554 * Context: Can sleep. Does not return until command is completed. 21555 */ 21556 21557 static int 21558 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21559 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21560 int path_flag) 21561 21562 { 21563 struct scsi_extended_sense sense_buf; 21564 union scsi_cdb cdb; 21565 struct uscsi_cmd ucmd_buf; 21566 int status; 21567 struct sd_lun *un; 21568 21569 ASSERT(ssc != NULL); 21570 un = ssc->ssc_un; 21571 ASSERT(un != NULL); 21572 ASSERT(!mutex_owned(SD_MUTEX(un))); 21573 21574 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21575 21576 bzero(&cdb, sizeof (cdb)); 21577 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21578 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21579 21580 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21581 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21582 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21583 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21584 FORMG1COUNT(&cdb, buflen); 21585 21586 ucmd_buf.uscsi_cdb = (char *)&cdb; 21587 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21588 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21589 ucmd_buf.uscsi_buflen = buflen; 21590 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21591 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21592 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21593 ucmd_buf.uscsi_timeout = 60; 21594 21595 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21596 UIO_SYSSPACE, path_flag); 21597 21598 switch (status) { 21599 case 0: 21600 break; 21601 case EIO: 21602 switch (ucmd_buf.uscsi_status) { 21603 case STATUS_RESERVATION_CONFLICT: 21604 status = EACCES; 21605 break; 21606 case STATUS_CHECK: 21607 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21608 (scsi_sense_key((uint8_t *)&sense_buf) == 21609 KEY_ILLEGAL_REQUEST) && 21610 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21611 /* 21612 * ASC 0x24: INVALID FIELD IN CDB 21613 */ 21614 switch (page_code) { 21615 case START_STOP_CYCLE_PAGE: 21616 /* 21617 * The start stop cycle counter is 21618 * implemented as page 0x31 in earlier 21619 * generation disks. In new generation 21620 * disks the start stop cycle counter is 21621 * implemented as page 0xE. To properly 21622 * handle this case if an attempt for 21623 * log page 0xE is made and fails we 21624 * will try again using page 0x31. 21625 * 21626 * Network storage BU committed to 21627 * maintain the page 0x31 for this 21628 * purpose and will not have any other 21629 * page implemented with page code 0x31 21630 * until all disks transition to the 21631 * standard page. 21632 */ 21633 mutex_enter(SD_MUTEX(un)); 21634 un->un_start_stop_cycle_page = 21635 START_STOP_CYCLE_VU_PAGE; 21636 cdb.cdb_opaque[2] = 21637 (char)(page_control << 6) | 21638 un->un_start_stop_cycle_page; 21639 mutex_exit(SD_MUTEX(un)); 21640 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21641 status = sd_ssc_send( 21642 ssc, &ucmd_buf, FKIOCTL, 21643 UIO_SYSSPACE, path_flag); 21644 21645 break; 21646 case TEMPERATURE_PAGE: 21647 status = ENOTTY; 21648 break; 21649 default: 21650 break; 21651 } 21652 } 21653 break; 21654 default: 21655 break; 21656 } 21657 break; 21658 default: 21659 break; 21660 } 21661 21662 if (status == 0) { 21663 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21664 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21665 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21666 } 21667 21668 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21669 21670 return (status); 21671 } 21672 21673 21674 /* 21675 * Function: sdioctl 21676 * 21677 * Description: Driver's ioctl(9e) entry point function. 21678 * 21679 * Arguments: dev - device number 21680 * cmd - ioctl operation to be performed 21681 * arg - user argument, contains data to be set or reference 21682 * parameter for get 21683 * flag - bit flag, indicating open settings, 32/64 bit type 21684 * cred_p - user credential pointer 21685 * rval_p - calling process return value (OPT) 21686 * 21687 * Return Code: EINVAL 21688 * ENOTTY 21689 * ENXIO 21690 * EIO 21691 * EFAULT 21692 * ENOTSUP 21693 * EPERM 21694 * 21695 * Context: Called from the device switch at normal priority. 21696 */ 21697 21698 static int 21699 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21700 { 21701 struct sd_lun *un = NULL; 21702 int err = 0; 21703 int i = 0; 21704 cred_t *cr; 21705 int tmprval = EINVAL; 21706 boolean_t is_valid; 21707 sd_ssc_t *ssc; 21708 21709 /* 21710 * All device accesses go thru sdstrategy where we check on suspend 21711 * status 21712 */ 21713 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21714 return (ENXIO); 21715 } 21716 21717 ASSERT(!mutex_owned(SD_MUTEX(un))); 21718 21719 /* Initialize sd_ssc_t for internal uscsi commands */ 21720 ssc = sd_ssc_init(un); 21721 21722 is_valid = SD_IS_VALID_LABEL(un); 21723 21724 /* 21725 * Moved this wait from sd_uscsi_strategy to here for 21726 * reasons of deadlock prevention. Internal driver commands, 21727 * specifically those to change a devices power level, result 21728 * in a call to sd_uscsi_strategy. 21729 */ 21730 mutex_enter(SD_MUTEX(un)); 21731 while ((un->un_state == SD_STATE_SUSPENDED) || 21732 (un->un_state == SD_STATE_PM_CHANGING)) { 21733 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21734 } 21735 /* 21736 * Twiddling the counter here protects commands from now 21737 * through to the top of sd_uscsi_strategy. Without the 21738 * counter inc. a power down, for example, could get in 21739 * after the above check for state is made and before 21740 * execution gets to the top of sd_uscsi_strategy. 21741 * That would cause problems. 21742 */ 21743 un->un_ncmds_in_driver++; 21744 21745 if (!is_valid && 21746 (flag & (FNDELAY | FNONBLOCK))) { 21747 switch (cmd) { 21748 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21749 case DKIOCGVTOC: 21750 case DKIOCGEXTVTOC: 21751 case DKIOCGAPART: 21752 case DKIOCPARTINFO: 21753 case DKIOCEXTPARTINFO: 21754 case DKIOCSGEOM: 21755 case DKIOCSAPART: 21756 case DKIOCGETEFI: 21757 case DKIOCPARTITION: 21758 case DKIOCSVTOC: 21759 case DKIOCSEXTVTOC: 21760 case DKIOCSETEFI: 21761 case DKIOCGMBOOT: 21762 case DKIOCSMBOOT: 21763 case DKIOCG_PHYGEOM: 21764 case DKIOCG_VIRTGEOM: 21765 /* let cmlb handle it */ 21766 goto skip_ready_valid; 21767 21768 case CDROMPAUSE: 21769 case CDROMRESUME: 21770 case CDROMPLAYMSF: 21771 case CDROMPLAYTRKIND: 21772 case CDROMREADTOCHDR: 21773 case CDROMREADTOCENTRY: 21774 case CDROMSTOP: 21775 case CDROMSTART: 21776 case CDROMVOLCTRL: 21777 case CDROMSUBCHNL: 21778 case CDROMREADMODE2: 21779 case CDROMREADMODE1: 21780 case CDROMREADOFFSET: 21781 case CDROMSBLKMODE: 21782 case CDROMGBLKMODE: 21783 case CDROMGDRVSPEED: 21784 case CDROMSDRVSPEED: 21785 case CDROMCDDA: 21786 case CDROMCDXA: 21787 case CDROMSUBCODE: 21788 if (!ISCD(un)) { 21789 un->un_ncmds_in_driver--; 21790 ASSERT(un->un_ncmds_in_driver >= 0); 21791 mutex_exit(SD_MUTEX(un)); 21792 err = ENOTTY; 21793 goto done_without_assess; 21794 } 21795 break; 21796 case FDEJECT: 21797 case DKIOCEJECT: 21798 case CDROMEJECT: 21799 if (!un->un_f_eject_media_supported) { 21800 un->un_ncmds_in_driver--; 21801 ASSERT(un->un_ncmds_in_driver >= 0); 21802 mutex_exit(SD_MUTEX(un)); 21803 err = ENOTTY; 21804 goto done_without_assess; 21805 } 21806 break; 21807 case DKIOCFLUSHWRITECACHE: 21808 mutex_exit(SD_MUTEX(un)); 21809 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21810 if (err != 0) { 21811 mutex_enter(SD_MUTEX(un)); 21812 un->un_ncmds_in_driver--; 21813 ASSERT(un->un_ncmds_in_driver >= 0); 21814 mutex_exit(SD_MUTEX(un)); 21815 err = EIO; 21816 goto done_quick_assess; 21817 } 21818 mutex_enter(SD_MUTEX(un)); 21819 /* FALLTHROUGH */ 21820 case DKIOCREMOVABLE: 21821 case DKIOCHOTPLUGGABLE: 21822 case DKIOCINFO: 21823 case DKIOCGMEDIAINFO: 21824 case DKIOCGMEDIAINFOEXT: 21825 case MHIOCENFAILFAST: 21826 case MHIOCSTATUS: 21827 case MHIOCTKOWN: 21828 case MHIOCRELEASE: 21829 case MHIOCGRP_INKEYS: 21830 case MHIOCGRP_INRESV: 21831 case MHIOCGRP_REGISTER: 21832 case MHIOCGRP_RESERVE: 21833 case MHIOCGRP_PREEMPTANDABORT: 21834 case MHIOCGRP_REGISTERANDIGNOREKEY: 21835 case CDROMCLOSETRAY: 21836 case USCSICMD: 21837 goto skip_ready_valid; 21838 default: 21839 break; 21840 } 21841 21842 mutex_exit(SD_MUTEX(un)); 21843 err = sd_ready_and_valid(ssc, SDPART(dev)); 21844 mutex_enter(SD_MUTEX(un)); 21845 21846 if (err != SD_READY_VALID) { 21847 switch (cmd) { 21848 case DKIOCSTATE: 21849 case CDROMGDRVSPEED: 21850 case CDROMSDRVSPEED: 21851 case FDEJECT: /* for eject command */ 21852 case DKIOCEJECT: 21853 case CDROMEJECT: 21854 case DKIOCREMOVABLE: 21855 case DKIOCHOTPLUGGABLE: 21856 break; 21857 default: 21858 if (un->un_f_has_removable_media) { 21859 err = ENXIO; 21860 } else { 21861 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21862 if (err == SD_RESERVED_BY_OTHERS) { 21863 err = EACCES; 21864 } else { 21865 err = EIO; 21866 } 21867 } 21868 un->un_ncmds_in_driver--; 21869 ASSERT(un->un_ncmds_in_driver >= 0); 21870 mutex_exit(SD_MUTEX(un)); 21871 21872 goto done_without_assess; 21873 } 21874 } 21875 } 21876 21877 skip_ready_valid: 21878 mutex_exit(SD_MUTEX(un)); 21879 21880 switch (cmd) { 21881 case DKIOCINFO: 21882 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21883 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21884 break; 21885 21886 case DKIOCGMEDIAINFO: 21887 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21888 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21889 break; 21890 21891 case DKIOCGMEDIAINFOEXT: 21892 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 21893 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 21894 break; 21895 21896 case DKIOCGGEOM: 21897 case DKIOCGVTOC: 21898 case DKIOCGEXTVTOC: 21899 case DKIOCGAPART: 21900 case DKIOCPARTINFO: 21901 case DKIOCEXTPARTINFO: 21902 case DKIOCSGEOM: 21903 case DKIOCSAPART: 21904 case DKIOCGETEFI: 21905 case DKIOCPARTITION: 21906 case DKIOCSVTOC: 21907 case DKIOCSEXTVTOC: 21908 case DKIOCSETEFI: 21909 case DKIOCGMBOOT: 21910 case DKIOCSMBOOT: 21911 case DKIOCG_PHYGEOM: 21912 case DKIOCG_VIRTGEOM: 21913 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21914 21915 /* TUR should spin up */ 21916 21917 if (un->un_f_has_removable_media) 21918 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21919 SD_CHECK_FOR_MEDIA); 21920 21921 else 21922 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21923 21924 if (err != 0) 21925 goto done_with_assess; 21926 21927 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21928 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21929 21930 if ((err == 0) && 21931 ((cmd == DKIOCSETEFI) || 21932 (un->un_f_pkstats_enabled) && 21933 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 21934 cmd == DKIOCSEXTVTOC))) { 21935 21936 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21937 (void *)SD_PATH_DIRECT); 21938 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21939 sd_set_pstats(un); 21940 SD_TRACE(SD_LOG_IO_PARTITION, un, 21941 "sd_ioctl: un:0x%p pstats created and " 21942 "set\n", un); 21943 } 21944 } 21945 21946 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 21947 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21948 21949 mutex_enter(SD_MUTEX(un)); 21950 if (un->un_f_devid_supported && 21951 (un->un_f_opt_fab_devid == TRUE)) { 21952 if (un->un_devid == NULL) { 21953 sd_register_devid(ssc, SD_DEVINFO(un), 21954 SD_TARGET_IS_UNRESERVED); 21955 } else { 21956 /* 21957 * The device id for this disk 21958 * has been fabricated. The 21959 * device id must be preserved 21960 * by writing it back out to 21961 * disk. 21962 */ 21963 if (sd_write_deviceid(ssc) != 0) { 21964 ddi_devid_free(un->un_devid); 21965 un->un_devid = NULL; 21966 } 21967 } 21968 } 21969 mutex_exit(SD_MUTEX(un)); 21970 } 21971 21972 break; 21973 21974 case DKIOCLOCK: 21975 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21976 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21977 SD_PATH_STANDARD); 21978 goto done_with_assess; 21979 21980 case DKIOCUNLOCK: 21981 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21982 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21983 SD_PATH_STANDARD); 21984 goto done_with_assess; 21985 21986 case DKIOCSTATE: { 21987 enum dkio_state state; 21988 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21989 21990 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21991 err = EFAULT; 21992 } else { 21993 err = sd_check_media(dev, state); 21994 if (err == 0) { 21995 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21996 sizeof (int), flag) != 0) 21997 err = EFAULT; 21998 } 21999 } 22000 break; 22001 } 22002 22003 case DKIOCREMOVABLE: 22004 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22005 i = un->un_f_has_removable_media ? 1 : 0; 22006 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22007 err = EFAULT; 22008 } else { 22009 err = 0; 22010 } 22011 break; 22012 22013 case DKIOCHOTPLUGGABLE: 22014 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22015 i = un->un_f_is_hotpluggable ? 1 : 0; 22016 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22017 err = EFAULT; 22018 } else { 22019 err = 0; 22020 } 22021 break; 22022 22023 case DKIOCGTEMPERATURE: 22024 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22025 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22026 break; 22027 22028 case MHIOCENFAILFAST: 22029 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22030 if ((err = drv_priv(cred_p)) == 0) { 22031 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22032 } 22033 break; 22034 22035 case MHIOCTKOWN: 22036 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22037 if ((err = drv_priv(cred_p)) == 0) { 22038 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22039 } 22040 break; 22041 22042 case MHIOCRELEASE: 22043 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22044 if ((err = drv_priv(cred_p)) == 0) { 22045 err = sd_mhdioc_release(dev); 22046 } 22047 break; 22048 22049 case MHIOCSTATUS: 22050 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22051 if ((err = drv_priv(cred_p)) == 0) { 22052 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22053 case 0: 22054 err = 0; 22055 break; 22056 case EACCES: 22057 *rval_p = 1; 22058 err = 0; 22059 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22060 break; 22061 default: 22062 err = EIO; 22063 goto done_with_assess; 22064 } 22065 } 22066 break; 22067 22068 case MHIOCQRESERVE: 22069 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22070 if ((err = drv_priv(cred_p)) == 0) { 22071 err = sd_reserve_release(dev, SD_RESERVE); 22072 } 22073 break; 22074 22075 case MHIOCREREGISTERDEVID: 22076 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22077 if (drv_priv(cred_p) == EPERM) { 22078 err = EPERM; 22079 } else if (!un->un_f_devid_supported) { 22080 err = ENOTTY; 22081 } else { 22082 err = sd_mhdioc_register_devid(dev); 22083 } 22084 break; 22085 22086 case MHIOCGRP_INKEYS: 22087 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22088 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22089 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22090 err = ENOTSUP; 22091 } else { 22092 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22093 flag); 22094 } 22095 } 22096 break; 22097 22098 case MHIOCGRP_INRESV: 22099 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22100 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22101 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22102 err = ENOTSUP; 22103 } else { 22104 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22105 } 22106 } 22107 break; 22108 22109 case MHIOCGRP_REGISTER: 22110 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22111 if ((err = drv_priv(cred_p)) != EPERM) { 22112 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22113 err = ENOTSUP; 22114 } else if (arg != NULL) { 22115 mhioc_register_t reg; 22116 if (ddi_copyin((void *)arg, ®, 22117 sizeof (mhioc_register_t), flag) != 0) { 22118 err = EFAULT; 22119 } else { 22120 err = 22121 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22122 ssc, SD_SCSI3_REGISTER, 22123 (uchar_t *)®); 22124 if (err != 0) 22125 goto done_with_assess; 22126 } 22127 } 22128 } 22129 break; 22130 22131 case MHIOCGRP_RESERVE: 22132 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22133 if ((err = drv_priv(cred_p)) != EPERM) { 22134 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22135 err = ENOTSUP; 22136 } else if (arg != NULL) { 22137 mhioc_resv_desc_t resv_desc; 22138 if (ddi_copyin((void *)arg, &resv_desc, 22139 sizeof (mhioc_resv_desc_t), flag) != 0) { 22140 err = EFAULT; 22141 } else { 22142 err = 22143 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22144 ssc, SD_SCSI3_RESERVE, 22145 (uchar_t *)&resv_desc); 22146 if (err != 0) 22147 goto done_with_assess; 22148 } 22149 } 22150 } 22151 break; 22152 22153 case MHIOCGRP_PREEMPTANDABORT: 22154 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22155 if ((err = drv_priv(cred_p)) != EPERM) { 22156 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22157 err = ENOTSUP; 22158 } else if (arg != NULL) { 22159 mhioc_preemptandabort_t preempt_abort; 22160 if (ddi_copyin((void *)arg, &preempt_abort, 22161 sizeof (mhioc_preemptandabort_t), 22162 flag) != 0) { 22163 err = EFAULT; 22164 } else { 22165 err = 22166 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22167 ssc, SD_SCSI3_PREEMPTANDABORT, 22168 (uchar_t *)&preempt_abort); 22169 if (err != 0) 22170 goto done_with_assess; 22171 } 22172 } 22173 } 22174 break; 22175 22176 case MHIOCGRP_REGISTERANDIGNOREKEY: 22177 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22178 if ((err = drv_priv(cred_p)) != EPERM) { 22179 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22180 err = ENOTSUP; 22181 } else if (arg != NULL) { 22182 mhioc_registerandignorekey_t r_and_i; 22183 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22184 sizeof (mhioc_registerandignorekey_t), 22185 flag) != 0) { 22186 err = EFAULT; 22187 } else { 22188 err = 22189 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22190 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22191 (uchar_t *)&r_and_i); 22192 if (err != 0) 22193 goto done_with_assess; 22194 } 22195 } 22196 } 22197 break; 22198 22199 case USCSICMD: 22200 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22201 cr = ddi_get_cred(); 22202 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22203 err = EPERM; 22204 } else { 22205 enum uio_seg uioseg; 22206 22207 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22208 UIO_USERSPACE; 22209 if (un->un_f_format_in_progress == TRUE) { 22210 err = EAGAIN; 22211 break; 22212 } 22213 22214 err = sd_ssc_send(ssc, 22215 (struct uscsi_cmd *)arg, 22216 flag, uioseg, SD_PATH_STANDARD); 22217 if (err != 0) 22218 goto done_with_assess; 22219 else 22220 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22221 } 22222 break; 22223 22224 case CDROMPAUSE: 22225 case CDROMRESUME: 22226 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22227 if (!ISCD(un)) { 22228 err = ENOTTY; 22229 } else { 22230 err = sr_pause_resume(dev, cmd); 22231 } 22232 break; 22233 22234 case CDROMPLAYMSF: 22235 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22236 if (!ISCD(un)) { 22237 err = ENOTTY; 22238 } else { 22239 err = sr_play_msf(dev, (caddr_t)arg, flag); 22240 } 22241 break; 22242 22243 case CDROMPLAYTRKIND: 22244 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22245 #if defined(__i386) || defined(__amd64) 22246 /* 22247 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22248 */ 22249 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22250 #else 22251 if (!ISCD(un)) { 22252 #endif 22253 err = ENOTTY; 22254 } else { 22255 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22256 } 22257 break; 22258 22259 case CDROMREADTOCHDR: 22260 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22261 if (!ISCD(un)) { 22262 err = ENOTTY; 22263 } else { 22264 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22265 } 22266 break; 22267 22268 case CDROMREADTOCENTRY: 22269 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22270 if (!ISCD(un)) { 22271 err = ENOTTY; 22272 } else { 22273 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22274 } 22275 break; 22276 22277 case CDROMSTOP: 22278 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22279 if (!ISCD(un)) { 22280 err = ENOTTY; 22281 } else { 22282 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 22283 SD_PATH_STANDARD); 22284 goto done_with_assess; 22285 } 22286 break; 22287 22288 case CDROMSTART: 22289 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22290 if (!ISCD(un)) { 22291 err = ENOTTY; 22292 } else { 22293 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 22294 SD_PATH_STANDARD); 22295 goto done_with_assess; 22296 } 22297 break; 22298 22299 case CDROMCLOSETRAY: 22300 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22301 if (!ISCD(un)) { 22302 err = ENOTTY; 22303 } else { 22304 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 22305 SD_PATH_STANDARD); 22306 goto done_with_assess; 22307 } 22308 break; 22309 22310 case FDEJECT: /* for eject command */ 22311 case DKIOCEJECT: 22312 case CDROMEJECT: 22313 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22314 if (!un->un_f_eject_media_supported) { 22315 err = ENOTTY; 22316 } else { 22317 err = sr_eject(dev); 22318 } 22319 break; 22320 22321 case CDROMVOLCTRL: 22322 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22323 if (!ISCD(un)) { 22324 err = ENOTTY; 22325 } else { 22326 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22327 } 22328 break; 22329 22330 case CDROMSUBCHNL: 22331 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22332 if (!ISCD(un)) { 22333 err = ENOTTY; 22334 } else { 22335 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22336 } 22337 break; 22338 22339 case CDROMREADMODE2: 22340 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22341 if (!ISCD(un)) { 22342 err = ENOTTY; 22343 } else if (un->un_f_cfg_is_atapi == TRUE) { 22344 /* 22345 * If the drive supports READ CD, use that instead of 22346 * switching the LBA size via a MODE SELECT 22347 * Block Descriptor 22348 */ 22349 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22350 } else { 22351 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22352 } 22353 break; 22354 22355 case CDROMREADMODE1: 22356 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22357 if (!ISCD(un)) { 22358 err = ENOTTY; 22359 } else { 22360 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22361 } 22362 break; 22363 22364 case CDROMREADOFFSET: 22365 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22366 if (!ISCD(un)) { 22367 err = ENOTTY; 22368 } else { 22369 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22370 flag); 22371 } 22372 break; 22373 22374 case CDROMSBLKMODE: 22375 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22376 /* 22377 * There is no means of changing block size in case of atapi 22378 * drives, thus return ENOTTY if drive type is atapi 22379 */ 22380 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22381 err = ENOTTY; 22382 } else if (un->un_f_mmc_cap == TRUE) { 22383 22384 /* 22385 * MMC Devices do not support changing the 22386 * logical block size 22387 * 22388 * Note: EINVAL is being returned instead of ENOTTY to 22389 * maintain consistancy with the original mmc 22390 * driver update. 22391 */ 22392 err = EINVAL; 22393 } else { 22394 mutex_enter(SD_MUTEX(un)); 22395 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22396 (un->un_ncmds_in_transport > 0)) { 22397 mutex_exit(SD_MUTEX(un)); 22398 err = EINVAL; 22399 } else { 22400 mutex_exit(SD_MUTEX(un)); 22401 err = sr_change_blkmode(dev, cmd, arg, flag); 22402 } 22403 } 22404 break; 22405 22406 case CDROMGBLKMODE: 22407 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22408 if (!ISCD(un)) { 22409 err = ENOTTY; 22410 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22411 (un->un_f_blockcount_is_valid != FALSE)) { 22412 /* 22413 * Drive is an ATAPI drive so return target block 22414 * size for ATAPI drives since we cannot change the 22415 * blocksize on ATAPI drives. Used primarily to detect 22416 * if an ATAPI cdrom is present. 22417 */ 22418 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22419 sizeof (int), flag) != 0) { 22420 err = EFAULT; 22421 } else { 22422 err = 0; 22423 } 22424 22425 } else { 22426 /* 22427 * Drive supports changing block sizes via a Mode 22428 * Select. 22429 */ 22430 err = sr_change_blkmode(dev, cmd, arg, flag); 22431 } 22432 break; 22433 22434 case CDROMGDRVSPEED: 22435 case CDROMSDRVSPEED: 22436 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22437 if (!ISCD(un)) { 22438 err = ENOTTY; 22439 } else if (un->un_f_mmc_cap == TRUE) { 22440 /* 22441 * Note: In the future the driver implementation 22442 * for getting and 22443 * setting cd speed should entail: 22444 * 1) If non-mmc try the Toshiba mode page 22445 * (sr_change_speed) 22446 * 2) If mmc but no support for Real Time Streaming try 22447 * the SET CD SPEED (0xBB) command 22448 * (sr_atapi_change_speed) 22449 * 3) If mmc and support for Real Time Streaming 22450 * try the GET PERFORMANCE and SET STREAMING 22451 * commands (not yet implemented, 4380808) 22452 */ 22453 /* 22454 * As per recent MMC spec, CD-ROM speed is variable 22455 * and changes with LBA. Since there is no such 22456 * things as drive speed now, fail this ioctl. 22457 * 22458 * Note: EINVAL is returned for consistancy of original 22459 * implementation which included support for getting 22460 * the drive speed of mmc devices but not setting 22461 * the drive speed. Thus EINVAL would be returned 22462 * if a set request was made for an mmc device. 22463 * We no longer support get or set speed for 22464 * mmc but need to remain consistent with regard 22465 * to the error code returned. 22466 */ 22467 err = EINVAL; 22468 } else if (un->un_f_cfg_is_atapi == TRUE) { 22469 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22470 } else { 22471 err = sr_change_speed(dev, cmd, arg, flag); 22472 } 22473 break; 22474 22475 case CDROMCDDA: 22476 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22477 if (!ISCD(un)) { 22478 err = ENOTTY; 22479 } else { 22480 err = sr_read_cdda(dev, (void *)arg, flag); 22481 } 22482 break; 22483 22484 case CDROMCDXA: 22485 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22486 if (!ISCD(un)) { 22487 err = ENOTTY; 22488 } else { 22489 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22490 } 22491 break; 22492 22493 case CDROMSUBCODE: 22494 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22495 if (!ISCD(un)) { 22496 err = ENOTTY; 22497 } else { 22498 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22499 } 22500 break; 22501 22502 22503 #ifdef SDDEBUG 22504 /* RESET/ABORTS testing ioctls */ 22505 case DKIOCRESET: { 22506 int reset_level; 22507 22508 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22509 err = EFAULT; 22510 } else { 22511 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22512 "reset_level = 0x%lx\n", reset_level); 22513 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22514 err = 0; 22515 } else { 22516 err = EIO; 22517 } 22518 } 22519 break; 22520 } 22521 22522 case DKIOCABORT: 22523 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22524 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22525 err = 0; 22526 } else { 22527 err = EIO; 22528 } 22529 break; 22530 #endif 22531 22532 #ifdef SD_FAULT_INJECTION 22533 /* SDIOC FaultInjection testing ioctls */ 22534 case SDIOCSTART: 22535 case SDIOCSTOP: 22536 case SDIOCINSERTPKT: 22537 case SDIOCINSERTXB: 22538 case SDIOCINSERTUN: 22539 case SDIOCINSERTARQ: 22540 case SDIOCPUSH: 22541 case SDIOCRETRIEVE: 22542 case SDIOCRUN: 22543 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22544 "SDIOC detected cmd:0x%X:\n", cmd); 22545 /* call error generator */ 22546 sd_faultinjection_ioctl(cmd, arg, un); 22547 err = 0; 22548 break; 22549 22550 #endif /* SD_FAULT_INJECTION */ 22551 22552 case DKIOCFLUSHWRITECACHE: 22553 { 22554 struct dk_callback *dkc = (struct dk_callback *)arg; 22555 22556 mutex_enter(SD_MUTEX(un)); 22557 if (!un->un_f_sync_cache_supported || 22558 !un->un_f_write_cache_enabled) { 22559 err = un->un_f_sync_cache_supported ? 22560 0 : ENOTSUP; 22561 mutex_exit(SD_MUTEX(un)); 22562 if ((flag & FKIOCTL) && dkc != NULL && 22563 dkc->dkc_callback != NULL) { 22564 (*dkc->dkc_callback)(dkc->dkc_cookie, 22565 err); 22566 /* 22567 * Did callback and reported error. 22568 * Since we did a callback, ioctl 22569 * should return 0. 22570 */ 22571 err = 0; 22572 } 22573 break; 22574 } 22575 mutex_exit(SD_MUTEX(un)); 22576 22577 if ((flag & FKIOCTL) && dkc != NULL && 22578 dkc->dkc_callback != NULL) { 22579 /* async SYNC CACHE request */ 22580 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22581 } else { 22582 /* synchronous SYNC CACHE request */ 22583 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22584 } 22585 } 22586 break; 22587 22588 case DKIOCGETWCE: { 22589 22590 int wce; 22591 22592 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22593 break; 22594 } 22595 22596 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22597 err = EFAULT; 22598 } 22599 break; 22600 } 22601 22602 case DKIOCSETWCE: { 22603 22604 int wce, sync_supported; 22605 22606 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22607 err = EFAULT; 22608 break; 22609 } 22610 22611 /* 22612 * Synchronize multiple threads trying to enable 22613 * or disable the cache via the un_f_wcc_cv 22614 * condition variable. 22615 */ 22616 mutex_enter(SD_MUTEX(un)); 22617 22618 /* 22619 * Don't allow the cache to be enabled if the 22620 * config file has it disabled. 22621 */ 22622 if (un->un_f_opt_disable_cache && wce) { 22623 mutex_exit(SD_MUTEX(un)); 22624 err = EINVAL; 22625 break; 22626 } 22627 22628 /* 22629 * Wait for write cache change in progress 22630 * bit to be clear before proceeding. 22631 */ 22632 while (un->un_f_wcc_inprog) 22633 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22634 22635 un->un_f_wcc_inprog = 1; 22636 22637 if (un->un_f_write_cache_enabled && wce == 0) { 22638 /* 22639 * Disable the write cache. Don't clear 22640 * un_f_write_cache_enabled until after 22641 * the mode select and flush are complete. 22642 */ 22643 sync_supported = un->un_f_sync_cache_supported; 22644 22645 /* 22646 * If cache flush is suppressed, we assume that the 22647 * controller firmware will take care of managing the 22648 * write cache for us: no need to explicitly 22649 * disable it. 22650 */ 22651 if (!un->un_f_suppress_cache_flush) { 22652 mutex_exit(SD_MUTEX(un)); 22653 if ((err = sd_cache_control(ssc, 22654 SD_CACHE_NOCHANGE, 22655 SD_CACHE_DISABLE)) == 0 && 22656 sync_supported) { 22657 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22658 NULL); 22659 } 22660 } else { 22661 mutex_exit(SD_MUTEX(un)); 22662 } 22663 22664 mutex_enter(SD_MUTEX(un)); 22665 if (err == 0) { 22666 un->un_f_write_cache_enabled = 0; 22667 } 22668 22669 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22670 /* 22671 * Set un_f_write_cache_enabled first, so there is 22672 * no window where the cache is enabled, but the 22673 * bit says it isn't. 22674 */ 22675 un->un_f_write_cache_enabled = 1; 22676 22677 /* 22678 * If cache flush is suppressed, we assume that the 22679 * controller firmware will take care of managing the 22680 * write cache for us: no need to explicitly 22681 * enable it. 22682 */ 22683 if (!un->un_f_suppress_cache_flush) { 22684 mutex_exit(SD_MUTEX(un)); 22685 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22686 SD_CACHE_ENABLE); 22687 } else { 22688 mutex_exit(SD_MUTEX(un)); 22689 } 22690 22691 mutex_enter(SD_MUTEX(un)); 22692 22693 if (err) { 22694 un->un_f_write_cache_enabled = 0; 22695 } 22696 } 22697 22698 un->un_f_wcc_inprog = 0; 22699 cv_broadcast(&un->un_wcc_cv); 22700 mutex_exit(SD_MUTEX(un)); 22701 break; 22702 } 22703 22704 default: 22705 err = ENOTTY; 22706 break; 22707 } 22708 mutex_enter(SD_MUTEX(un)); 22709 un->un_ncmds_in_driver--; 22710 ASSERT(un->un_ncmds_in_driver >= 0); 22711 mutex_exit(SD_MUTEX(un)); 22712 22713 22714 done_without_assess: 22715 sd_ssc_fini(ssc); 22716 22717 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22718 return (err); 22719 22720 done_with_assess: 22721 mutex_enter(SD_MUTEX(un)); 22722 un->un_ncmds_in_driver--; 22723 ASSERT(un->un_ncmds_in_driver >= 0); 22724 mutex_exit(SD_MUTEX(un)); 22725 22726 done_quick_assess: 22727 if (err != 0) 22728 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22729 /* Uninitialize sd_ssc_t pointer */ 22730 sd_ssc_fini(ssc); 22731 22732 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22733 return (err); 22734 } 22735 22736 22737 /* 22738 * Function: sd_dkio_ctrl_info 22739 * 22740 * Description: This routine is the driver entry point for handling controller 22741 * information ioctl requests (DKIOCINFO). 22742 * 22743 * Arguments: dev - the device number 22744 * arg - pointer to user provided dk_cinfo structure 22745 * specifying the controller type and attributes. 22746 * flag - this argument is a pass through to ddi_copyxxx() 22747 * directly from the mode argument of ioctl(). 22748 * 22749 * Return Code: 0 22750 * EFAULT 22751 * ENXIO 22752 */ 22753 22754 static int 22755 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22756 { 22757 struct sd_lun *un = NULL; 22758 struct dk_cinfo *info; 22759 dev_info_t *pdip; 22760 int lun, tgt; 22761 22762 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22763 return (ENXIO); 22764 } 22765 22766 info = (struct dk_cinfo *) 22767 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22768 22769 switch (un->un_ctype) { 22770 case CTYPE_CDROM: 22771 info->dki_ctype = DKC_CDROM; 22772 break; 22773 default: 22774 info->dki_ctype = DKC_SCSI_CCS; 22775 break; 22776 } 22777 pdip = ddi_get_parent(SD_DEVINFO(un)); 22778 info->dki_cnum = ddi_get_instance(pdip); 22779 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22780 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22781 } else { 22782 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22783 DK_DEVLEN - 1); 22784 } 22785 22786 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22787 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22788 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22789 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22790 22791 /* Unit Information */ 22792 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22793 info->dki_slave = ((tgt << 3) | lun); 22794 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22795 DK_DEVLEN - 1); 22796 info->dki_flags = DKI_FMTVOL; 22797 info->dki_partition = SDPART(dev); 22798 22799 /* Max Transfer size of this device in blocks */ 22800 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22801 info->dki_addr = 0; 22802 info->dki_space = 0; 22803 info->dki_prio = 0; 22804 info->dki_vec = 0; 22805 22806 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22807 kmem_free(info, sizeof (struct dk_cinfo)); 22808 return (EFAULT); 22809 } else { 22810 kmem_free(info, sizeof (struct dk_cinfo)); 22811 return (0); 22812 } 22813 } 22814 22815 22816 /* 22817 * Function: sd_get_media_info 22818 * 22819 * Description: This routine is the driver entry point for handling ioctl 22820 * requests for the media type or command set profile used by the 22821 * drive to operate on the media (DKIOCGMEDIAINFO). 22822 * 22823 * Arguments: dev - the device number 22824 * arg - pointer to user provided dk_minfo structure 22825 * specifying the media type, logical block size and 22826 * drive capacity. 22827 * flag - this argument is a pass through to ddi_copyxxx() 22828 * directly from the mode argument of ioctl(). 22829 * 22830 * Return Code: 0 22831 * EACCESS 22832 * EFAULT 22833 * ENXIO 22834 * EIO 22835 */ 22836 22837 static int 22838 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22839 { 22840 struct sd_lun *un = NULL; 22841 struct uscsi_cmd com; 22842 struct scsi_inquiry *sinq; 22843 struct dk_minfo media_info; 22844 u_longlong_t media_capacity; 22845 uint64_t capacity; 22846 uint_t lbasize; 22847 uchar_t *out_data; 22848 uchar_t *rqbuf; 22849 int rval = 0; 22850 int rtn; 22851 sd_ssc_t *ssc; 22852 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22853 (un->un_state == SD_STATE_OFFLINE)) { 22854 return (ENXIO); 22855 } 22856 22857 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22858 22859 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22860 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22861 22862 /* Issue a TUR to determine if the drive is ready with media present */ 22863 ssc = sd_ssc_init(un); 22864 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22865 if (rval == ENXIO) { 22866 goto done; 22867 } else if (rval != 0) { 22868 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22869 } 22870 22871 /* Now get configuration data */ 22872 if (ISCD(un)) { 22873 media_info.dki_media_type = DK_CDROM; 22874 22875 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22876 if (un->un_f_mmc_cap == TRUE) { 22877 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22878 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22879 SD_PATH_STANDARD); 22880 22881 if (rtn) { 22882 /* 22883 * We ignore all failures for CD and need to 22884 * put the assessment before processing code 22885 * to avoid missing assessment for FMA. 22886 */ 22887 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22888 /* 22889 * Failed for other than an illegal request 22890 * or command not supported 22891 */ 22892 if ((com.uscsi_status == STATUS_CHECK) && 22893 (com.uscsi_rqstatus == STATUS_GOOD)) { 22894 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22895 (rqbuf[12] != 0x20)) { 22896 rval = EIO; 22897 goto no_assessment; 22898 } 22899 } 22900 } else { 22901 /* 22902 * The GET CONFIGURATION command succeeded 22903 * so set the media type according to the 22904 * returned data 22905 */ 22906 media_info.dki_media_type = out_data[6]; 22907 media_info.dki_media_type <<= 8; 22908 media_info.dki_media_type |= out_data[7]; 22909 } 22910 } 22911 } else { 22912 /* 22913 * The profile list is not available, so we attempt to identify 22914 * the media type based on the inquiry data 22915 */ 22916 sinq = un->un_sd->sd_inq; 22917 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22918 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22919 /* This is a direct access device or optical disk */ 22920 media_info.dki_media_type = DK_FIXED_DISK; 22921 22922 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22923 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22924 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22925 media_info.dki_media_type = DK_ZIP; 22926 } else if ( 22927 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22928 media_info.dki_media_type = DK_JAZ; 22929 } 22930 } 22931 } else { 22932 /* 22933 * Not a CD, direct access or optical disk so return 22934 * unknown media 22935 */ 22936 media_info.dki_media_type = DK_UNKNOWN; 22937 } 22938 } 22939 22940 /* Now read the capacity so we can provide the lbasize and capacity */ 22941 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22942 SD_PATH_DIRECT); 22943 switch (rval) { 22944 case 0: 22945 break; 22946 case EACCES: 22947 rval = EACCES; 22948 goto done; 22949 default: 22950 rval = EIO; 22951 goto done; 22952 } 22953 22954 /* 22955 * If lun is expanded dynamically, update the un structure. 22956 */ 22957 mutex_enter(SD_MUTEX(un)); 22958 if ((un->un_f_blockcount_is_valid == TRUE) && 22959 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22960 (capacity > un->un_blockcount)) { 22961 sd_update_block_info(un, lbasize, capacity); 22962 } 22963 mutex_exit(SD_MUTEX(un)); 22964 22965 media_info.dki_lbsize = lbasize; 22966 media_capacity = capacity; 22967 22968 /* 22969 * sd_send_scsi_READ_CAPACITY() reports capacity in 22970 * un->un_sys_blocksize chunks. So we need to convert it into 22971 * cap.lbasize chunks. 22972 */ 22973 media_capacity *= un->un_sys_blocksize; 22974 media_capacity /= lbasize; 22975 media_info.dki_capacity = media_capacity; 22976 22977 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22978 rval = EFAULT; 22979 /* Put goto. Anybody might add some code below in future */ 22980 goto no_assessment; 22981 } 22982 done: 22983 if (rval != 0) { 22984 if (rval == EIO) 22985 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22986 else 22987 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22988 } 22989 no_assessment: 22990 sd_ssc_fini(ssc); 22991 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22992 kmem_free(rqbuf, SENSE_LENGTH); 22993 return (rval); 22994 } 22995 22996 /* 22997 * Function: sd_get_media_info_ext 22998 * 22999 * Description: This routine is the driver entry point for handling ioctl 23000 * requests for the media type or command set profile used by the 23001 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23002 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23003 * of this ioctl contains both logical block size and physical 23004 * block size. 23005 * 23006 * 23007 * Arguments: dev - the device number 23008 * arg - pointer to user provided dk_minfo_ext structure 23009 * specifying the media type, logical block size, 23010 * physical block size and disk capacity. 23011 * flag - this argument is a pass through to ddi_copyxxx() 23012 * directly from the mode argument of ioctl(). 23013 * 23014 * Return Code: 0 23015 * EACCESS 23016 * EFAULT 23017 * ENXIO 23018 * EIO 23019 */ 23020 23021 static int 23022 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23023 { 23024 struct sd_lun *un = NULL; 23025 struct uscsi_cmd com; 23026 struct scsi_inquiry *sinq; 23027 struct dk_minfo_ext media_info_ext; 23028 u_longlong_t media_capacity; 23029 uint64_t capacity; 23030 uint_t lbasize; 23031 uint_t pbsize; 23032 uchar_t *out_data; 23033 uchar_t *rqbuf; 23034 int rval = 0; 23035 int rtn; 23036 sd_ssc_t *ssc; 23037 23038 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23039 (un->un_state == SD_STATE_OFFLINE)) { 23040 return (ENXIO); 23041 } 23042 23043 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_ext: entry\n"); 23044 23045 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23046 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23047 ssc = sd_ssc_init(un); 23048 23049 /* Issue a TUR to determine if the drive is ready with media present */ 23050 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23051 if (rval == ENXIO) { 23052 goto done; 23053 } else if (rval != 0) { 23054 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23055 } 23056 23057 /* Now get configuration data */ 23058 if (ISCD(un)) { 23059 media_info_ext.dki_media_type = DK_CDROM; 23060 23061 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23062 if (un->un_f_mmc_cap == TRUE) { 23063 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23064 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23065 SD_PATH_STANDARD); 23066 23067 if (rtn) { 23068 /* 23069 * We ignore all failures for CD and need to 23070 * put the assessment before processing code 23071 * to avoid missing assessment for FMA. 23072 */ 23073 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23074 /* 23075 * Failed for other than an illegal request 23076 * or command not supported 23077 */ 23078 if ((com.uscsi_status == STATUS_CHECK) && 23079 (com.uscsi_rqstatus == STATUS_GOOD)) { 23080 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23081 (rqbuf[12] != 0x20)) { 23082 rval = EIO; 23083 goto no_assessment; 23084 } 23085 } 23086 } else { 23087 /* 23088 * The GET CONFIGURATION command succeeded 23089 * so set the media type according to the 23090 * returned data 23091 */ 23092 media_info_ext.dki_media_type = out_data[6]; 23093 media_info_ext.dki_media_type <<= 8; 23094 media_info_ext.dki_media_type |= out_data[7]; 23095 } 23096 } 23097 } else { 23098 /* 23099 * The profile list is not available, so we attempt to identify 23100 * the media type based on the inquiry data 23101 */ 23102 sinq = un->un_sd->sd_inq; 23103 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23104 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23105 /* This is a direct access device or optical disk */ 23106 media_info_ext.dki_media_type = DK_FIXED_DISK; 23107 23108 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23109 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23110 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23111 media_info_ext.dki_media_type = DK_ZIP; 23112 } else if ( 23113 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23114 media_info_ext.dki_media_type = DK_JAZ; 23115 } 23116 } 23117 } else { 23118 /* 23119 * Not a CD, direct access or optical disk so return 23120 * unknown media 23121 */ 23122 media_info_ext.dki_media_type = DK_UNKNOWN; 23123 } 23124 } 23125 23126 /* 23127 * Now read the capacity so we can provide the lbasize, 23128 * pbsize and capacity. 23129 */ 23130 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, &pbsize, 23131 SD_PATH_DIRECT); 23132 23133 if (rval != 0) { 23134 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23135 SD_PATH_DIRECT); 23136 23137 switch (rval) { 23138 case 0: 23139 pbsize = lbasize; 23140 media_capacity = capacity; 23141 /* 23142 * sd_send_scsi_READ_CAPACITY() reports capacity in 23143 * un->un_sys_blocksize chunks. So we need to convert 23144 * it into cap.lbsize chunks. 23145 */ 23146 if (un->un_f_has_removable_media) { 23147 media_capacity *= un->un_sys_blocksize; 23148 media_capacity /= lbasize; 23149 } 23150 break; 23151 case EACCES: 23152 rval = EACCES; 23153 goto done; 23154 default: 23155 rval = EIO; 23156 goto done; 23157 } 23158 } else { 23159 media_capacity = capacity; 23160 } 23161 23162 /* 23163 * If lun is expanded dynamically, update the un structure. 23164 */ 23165 mutex_enter(SD_MUTEX(un)); 23166 if ((un->un_f_blockcount_is_valid == TRUE) && 23167 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23168 (capacity > un->un_blockcount)) { 23169 sd_update_block_info(un, lbasize, capacity); 23170 } 23171 mutex_exit(SD_MUTEX(un)); 23172 23173 media_info_ext.dki_lbsize = lbasize; 23174 media_info_ext.dki_capacity = media_capacity; 23175 media_info_ext.dki_pbsize = pbsize; 23176 23177 if (ddi_copyout(&media_info_ext, arg, sizeof (struct dk_minfo_ext), 23178 flag)) { 23179 rval = EFAULT; 23180 goto no_assessment; 23181 } 23182 done: 23183 if (rval != 0) { 23184 if (rval == EIO) 23185 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23186 else 23187 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23188 } 23189 no_assessment: 23190 sd_ssc_fini(ssc); 23191 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23192 kmem_free(rqbuf, SENSE_LENGTH); 23193 return (rval); 23194 } 23195 23196 /* 23197 * Function: sd_check_media 23198 * 23199 * Description: This utility routine implements the functionality for the 23200 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23201 * driver state changes from that specified by the user 23202 * (inserted or ejected). For example, if the user specifies 23203 * DKIO_EJECTED and the current media state is inserted this 23204 * routine will immediately return DKIO_INSERTED. However, if the 23205 * current media state is not inserted the user thread will be 23206 * blocked until the drive state changes. If DKIO_NONE is specified 23207 * the user thread will block until a drive state change occurs. 23208 * 23209 * Arguments: dev - the device number 23210 * state - user pointer to a dkio_state, updated with the current 23211 * drive state at return. 23212 * 23213 * Return Code: ENXIO 23214 * EIO 23215 * EAGAIN 23216 * EINTR 23217 */ 23218 23219 static int 23220 sd_check_media(dev_t dev, enum dkio_state state) 23221 { 23222 struct sd_lun *un = NULL; 23223 enum dkio_state prev_state; 23224 opaque_t token = NULL; 23225 int rval = 0; 23226 sd_ssc_t *ssc; 23227 dev_t sub_dev; 23228 23229 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23230 return (ENXIO); 23231 } 23232 23233 /* 23234 * sub_dev is used when submitting request to scsi watch. 23235 * All submissions are unified to use same device number. 23236 */ 23237 sub_dev = sd_make_device(SD_DEVINFO(un)); 23238 23239 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23240 23241 ssc = sd_ssc_init(un); 23242 23243 mutex_enter(SD_MUTEX(un)); 23244 23245 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23246 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23247 23248 prev_state = un->un_mediastate; 23249 23250 /* is there anything to do? */ 23251 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23252 /* 23253 * submit the request to the scsi_watch service; 23254 * scsi_media_watch_cb() does the real work 23255 */ 23256 mutex_exit(SD_MUTEX(un)); 23257 23258 /* 23259 * This change handles the case where a scsi watch request is 23260 * added to a device that is powered down. To accomplish this 23261 * we power up the device before adding the scsi watch request, 23262 * since the scsi watch sends a TUR directly to the device 23263 * which the device cannot handle if it is powered down. 23264 */ 23265 if (sd_pm_entry(un) != DDI_SUCCESS) { 23266 mutex_enter(SD_MUTEX(un)); 23267 goto done; 23268 } 23269 23270 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23271 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23272 (caddr_t)sub_dev); 23273 23274 sd_pm_exit(un); 23275 23276 mutex_enter(SD_MUTEX(un)); 23277 if (token == NULL) { 23278 rval = EAGAIN; 23279 goto done; 23280 } 23281 23282 /* 23283 * This is a special case IOCTL that doesn't return 23284 * until the media state changes. Routine sdpower 23285 * knows about and handles this so don't count it 23286 * as an active cmd in the driver, which would 23287 * keep the device busy to the pm framework. 23288 * If the count isn't decremented the device can't 23289 * be powered down. 23290 */ 23291 un->un_ncmds_in_driver--; 23292 ASSERT(un->un_ncmds_in_driver >= 0); 23293 23294 /* 23295 * if a prior request had been made, this will be the same 23296 * token, as scsi_watch was designed that way. 23297 */ 23298 un->un_swr_token = token; 23299 un->un_specified_mediastate = state; 23300 23301 /* 23302 * now wait for media change 23303 * we will not be signalled unless mediastate == state but it is 23304 * still better to test for this condition, since there is a 23305 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23306 */ 23307 SD_TRACE(SD_LOG_COMMON, un, 23308 "sd_check_media: waiting for media state change\n"); 23309 while (un->un_mediastate == state) { 23310 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23311 SD_TRACE(SD_LOG_COMMON, un, 23312 "sd_check_media: waiting for media state " 23313 "was interrupted\n"); 23314 un->un_ncmds_in_driver++; 23315 rval = EINTR; 23316 goto done; 23317 } 23318 SD_TRACE(SD_LOG_COMMON, un, 23319 "sd_check_media: received signal, state=%x\n", 23320 un->un_mediastate); 23321 } 23322 /* 23323 * Inc the counter to indicate the device once again 23324 * has an active outstanding cmd. 23325 */ 23326 un->un_ncmds_in_driver++; 23327 } 23328 23329 /* invalidate geometry */ 23330 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23331 sr_ejected(un); 23332 } 23333 23334 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23335 uint64_t capacity; 23336 uint_t lbasize; 23337 23338 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23339 mutex_exit(SD_MUTEX(un)); 23340 /* 23341 * Since the following routines use SD_PATH_DIRECT, we must 23342 * call PM directly before the upcoming disk accesses. This 23343 * may cause the disk to be power/spin up. 23344 */ 23345 23346 if (sd_pm_entry(un) == DDI_SUCCESS) { 23347 rval = sd_send_scsi_READ_CAPACITY(ssc, 23348 &capacity, &lbasize, SD_PATH_DIRECT); 23349 if (rval != 0) { 23350 sd_pm_exit(un); 23351 if (rval == EIO) 23352 sd_ssc_assessment(ssc, 23353 SD_FMT_STATUS_CHECK); 23354 else 23355 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23356 mutex_enter(SD_MUTEX(un)); 23357 goto done; 23358 } 23359 } else { 23360 rval = EIO; 23361 mutex_enter(SD_MUTEX(un)); 23362 goto done; 23363 } 23364 mutex_enter(SD_MUTEX(un)); 23365 23366 sd_update_block_info(un, lbasize, capacity); 23367 23368 /* 23369 * Check if the media in the device is writable or not 23370 */ 23371 if (ISCD(un)) { 23372 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23373 } 23374 23375 mutex_exit(SD_MUTEX(un)); 23376 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23377 if ((cmlb_validate(un->un_cmlbhandle, 0, 23378 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23379 sd_set_pstats(un); 23380 SD_TRACE(SD_LOG_IO_PARTITION, un, 23381 "sd_check_media: un:0x%p pstats created and " 23382 "set\n", un); 23383 } 23384 23385 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23386 SD_PATH_DIRECT); 23387 23388 sd_pm_exit(un); 23389 23390 if (rval != 0) { 23391 if (rval == EIO) 23392 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23393 else 23394 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23395 } 23396 23397 mutex_enter(SD_MUTEX(un)); 23398 } 23399 done: 23400 sd_ssc_fini(ssc); 23401 un->un_f_watcht_stopped = FALSE; 23402 if (token != NULL && un->un_swr_token != NULL) { 23403 /* 23404 * Use of this local token and the mutex ensures that we avoid 23405 * some race conditions associated with terminating the 23406 * scsi watch. 23407 */ 23408 token = un->un_swr_token; 23409 mutex_exit(SD_MUTEX(un)); 23410 (void) scsi_watch_request_terminate(token, 23411 SCSI_WATCH_TERMINATE_WAIT); 23412 if (scsi_watch_get_ref_count(token) == 0) { 23413 mutex_enter(SD_MUTEX(un)); 23414 un->un_swr_token = (opaque_t)NULL; 23415 } else { 23416 mutex_enter(SD_MUTEX(un)); 23417 } 23418 } 23419 23420 /* 23421 * Update the capacity kstat value, if no media previously 23422 * (capacity kstat is 0) and a media has been inserted 23423 * (un_f_blockcount_is_valid == TRUE) 23424 */ 23425 if (un->un_errstats) { 23426 struct sd_errstats *stp = NULL; 23427 23428 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23429 if ((stp->sd_capacity.value.ui64 == 0) && 23430 (un->un_f_blockcount_is_valid == TRUE)) { 23431 stp->sd_capacity.value.ui64 = 23432 (uint64_t)((uint64_t)un->un_blockcount * 23433 un->un_sys_blocksize); 23434 } 23435 } 23436 mutex_exit(SD_MUTEX(un)); 23437 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23438 return (rval); 23439 } 23440 23441 23442 /* 23443 * Function: sd_delayed_cv_broadcast 23444 * 23445 * Description: Delayed cv_broadcast to allow for target to recover from media 23446 * insertion. 23447 * 23448 * Arguments: arg - driver soft state (unit) structure 23449 */ 23450 23451 static void 23452 sd_delayed_cv_broadcast(void *arg) 23453 { 23454 struct sd_lun *un = arg; 23455 23456 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23457 23458 mutex_enter(SD_MUTEX(un)); 23459 un->un_dcvb_timeid = NULL; 23460 cv_broadcast(&un->un_state_cv); 23461 mutex_exit(SD_MUTEX(un)); 23462 } 23463 23464 23465 /* 23466 * Function: sd_media_watch_cb 23467 * 23468 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23469 * routine processes the TUR sense data and updates the driver 23470 * state if a transition has occurred. The user thread 23471 * (sd_check_media) is then signalled. 23472 * 23473 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23474 * among multiple watches that share this callback function 23475 * resultp - scsi watch facility result packet containing scsi 23476 * packet, status byte and sense data 23477 * 23478 * Return Code: 0 for success, -1 for failure 23479 */ 23480 23481 static int 23482 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23483 { 23484 struct sd_lun *un; 23485 struct scsi_status *statusp = resultp->statusp; 23486 uint8_t *sensep = (uint8_t *)resultp->sensep; 23487 enum dkio_state state = DKIO_NONE; 23488 dev_t dev = (dev_t)arg; 23489 uchar_t actual_sense_length; 23490 uint8_t skey, asc, ascq; 23491 23492 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23493 return (-1); 23494 } 23495 actual_sense_length = resultp->actual_sense_length; 23496 23497 mutex_enter(SD_MUTEX(un)); 23498 SD_TRACE(SD_LOG_COMMON, un, 23499 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23500 *((char *)statusp), (void *)sensep, actual_sense_length); 23501 23502 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23503 un->un_mediastate = DKIO_DEV_GONE; 23504 cv_broadcast(&un->un_state_cv); 23505 mutex_exit(SD_MUTEX(un)); 23506 23507 return (0); 23508 } 23509 23510 /* 23511 * If there was a check condition then sensep points to valid sense data 23512 * If status was not a check condition but a reservation or busy status 23513 * then the new state is DKIO_NONE 23514 */ 23515 if (sensep != NULL) { 23516 skey = scsi_sense_key(sensep); 23517 asc = scsi_sense_asc(sensep); 23518 ascq = scsi_sense_ascq(sensep); 23519 23520 SD_INFO(SD_LOG_COMMON, un, 23521 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23522 skey, asc, ascq); 23523 /* This routine only uses up to 13 bytes of sense data. */ 23524 if (actual_sense_length >= 13) { 23525 if (skey == KEY_UNIT_ATTENTION) { 23526 if (asc == 0x28) { 23527 state = DKIO_INSERTED; 23528 } 23529 } else if (skey == KEY_NOT_READY) { 23530 /* 23531 * Sense data of 02/06/00 means that the 23532 * drive could not read the media (No 23533 * reference position found). In this case 23534 * to prevent a hang on the DKIOCSTATE IOCTL 23535 * we set the media state to DKIO_INSERTED. 23536 */ 23537 if (asc == 0x06 && ascq == 0x00) 23538 state = DKIO_INSERTED; 23539 23540 /* 23541 * if 02/04/02 means that the host 23542 * should send start command. Explicitly 23543 * leave the media state as is 23544 * (inserted) as the media is inserted 23545 * and host has stopped device for PM 23546 * reasons. Upon next true read/write 23547 * to this media will bring the 23548 * device to the right state good for 23549 * media access. 23550 */ 23551 if (asc == 0x3a) { 23552 state = DKIO_EJECTED; 23553 } else { 23554 /* 23555 * If the drive is busy with an 23556 * operation or long write, keep the 23557 * media in an inserted state. 23558 */ 23559 23560 if ((asc == 0x04) && 23561 ((ascq == 0x02) || 23562 (ascq == 0x07) || 23563 (ascq == 0x08))) { 23564 state = DKIO_INSERTED; 23565 } 23566 } 23567 } else if (skey == KEY_NO_SENSE) { 23568 if ((asc == 0x00) && (ascq == 0x00)) { 23569 /* 23570 * Sense Data 00/00/00 does not provide 23571 * any information about the state of 23572 * the media. Ignore it. 23573 */ 23574 mutex_exit(SD_MUTEX(un)); 23575 return (0); 23576 } 23577 } 23578 } 23579 } else if ((*((char *)statusp) == STATUS_GOOD) && 23580 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23581 state = DKIO_INSERTED; 23582 } 23583 23584 SD_TRACE(SD_LOG_COMMON, un, 23585 "sd_media_watch_cb: state=%x, specified=%x\n", 23586 state, un->un_specified_mediastate); 23587 23588 /* 23589 * now signal the waiting thread if this is *not* the specified state; 23590 * delay the signal if the state is DKIO_INSERTED to allow the target 23591 * to recover 23592 */ 23593 if (state != un->un_specified_mediastate) { 23594 un->un_mediastate = state; 23595 if (state == DKIO_INSERTED) { 23596 /* 23597 * delay the signal to give the drive a chance 23598 * to do what it apparently needs to do 23599 */ 23600 SD_TRACE(SD_LOG_COMMON, un, 23601 "sd_media_watch_cb: delayed cv_broadcast\n"); 23602 if (un->un_dcvb_timeid == NULL) { 23603 un->un_dcvb_timeid = 23604 timeout(sd_delayed_cv_broadcast, un, 23605 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23606 } 23607 } else { 23608 SD_TRACE(SD_LOG_COMMON, un, 23609 "sd_media_watch_cb: immediate cv_broadcast\n"); 23610 cv_broadcast(&un->un_state_cv); 23611 } 23612 } 23613 mutex_exit(SD_MUTEX(un)); 23614 return (0); 23615 } 23616 23617 23618 /* 23619 * Function: sd_dkio_get_temp 23620 * 23621 * Description: This routine is the driver entry point for handling ioctl 23622 * requests to get the disk temperature. 23623 * 23624 * Arguments: dev - the device number 23625 * arg - pointer to user provided dk_temperature structure. 23626 * flag - this argument is a pass through to ddi_copyxxx() 23627 * directly from the mode argument of ioctl(). 23628 * 23629 * Return Code: 0 23630 * EFAULT 23631 * ENXIO 23632 * EAGAIN 23633 */ 23634 23635 static int 23636 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23637 { 23638 struct sd_lun *un = NULL; 23639 struct dk_temperature *dktemp = NULL; 23640 uchar_t *temperature_page; 23641 int rval = 0; 23642 int path_flag = SD_PATH_STANDARD; 23643 sd_ssc_t *ssc; 23644 23645 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23646 return (ENXIO); 23647 } 23648 23649 ssc = sd_ssc_init(un); 23650 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23651 23652 /* copyin the disk temp argument to get the user flags */ 23653 if (ddi_copyin((void *)arg, dktemp, 23654 sizeof (struct dk_temperature), flag) != 0) { 23655 rval = EFAULT; 23656 goto done; 23657 } 23658 23659 /* Initialize the temperature to invalid. */ 23660 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23661 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23662 23663 /* 23664 * Note: Investigate removing the "bypass pm" semantic. 23665 * Can we just bypass PM always? 23666 */ 23667 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23668 path_flag = SD_PATH_DIRECT; 23669 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23670 mutex_enter(&un->un_pm_mutex); 23671 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23672 /* 23673 * If DKT_BYPASS_PM is set, and the drive happens to be 23674 * in low power mode, we can not wake it up, Need to 23675 * return EAGAIN. 23676 */ 23677 mutex_exit(&un->un_pm_mutex); 23678 rval = EAGAIN; 23679 goto done; 23680 } else { 23681 /* 23682 * Indicate to PM the device is busy. This is required 23683 * to avoid a race - i.e. the ioctl is issuing a 23684 * command and the pm framework brings down the device 23685 * to low power mode (possible power cut-off on some 23686 * platforms). 23687 */ 23688 mutex_exit(&un->un_pm_mutex); 23689 if (sd_pm_entry(un) != DDI_SUCCESS) { 23690 rval = EAGAIN; 23691 goto done; 23692 } 23693 } 23694 } 23695 23696 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23697 23698 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 23699 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 23700 if (rval != 0) 23701 goto done2; 23702 23703 /* 23704 * For the current temperature verify that the parameter length is 0x02 23705 * and the parameter code is 0x00 23706 */ 23707 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23708 (temperature_page[5] == 0x00)) { 23709 if (temperature_page[9] == 0xFF) { 23710 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23711 } else { 23712 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23713 } 23714 } 23715 23716 /* 23717 * For the reference temperature verify that the parameter 23718 * length is 0x02 and the parameter code is 0x01 23719 */ 23720 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23721 (temperature_page[11] == 0x01)) { 23722 if (temperature_page[15] == 0xFF) { 23723 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23724 } else { 23725 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23726 } 23727 } 23728 23729 /* Do the copyout regardless of the temperature commands status. */ 23730 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23731 flag) != 0) { 23732 rval = EFAULT; 23733 goto done1; 23734 } 23735 23736 done2: 23737 if (rval != 0) { 23738 if (rval == EIO) 23739 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23740 else 23741 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23742 } 23743 done1: 23744 if (path_flag == SD_PATH_DIRECT) { 23745 sd_pm_exit(un); 23746 } 23747 23748 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23749 done: 23750 sd_ssc_fini(ssc); 23751 if (dktemp != NULL) { 23752 kmem_free(dktemp, sizeof (struct dk_temperature)); 23753 } 23754 23755 return (rval); 23756 } 23757 23758 23759 /* 23760 * Function: sd_log_page_supported 23761 * 23762 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23763 * supported log pages. 23764 * 23765 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23766 * structure for this target. 23767 * log_page - 23768 * 23769 * Return Code: -1 - on error (log sense is optional and may not be supported). 23770 * 0 - log page not found. 23771 * 1 - log page found. 23772 */ 23773 23774 static int 23775 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23776 { 23777 uchar_t *log_page_data; 23778 int i; 23779 int match = 0; 23780 int log_size; 23781 int status = 0; 23782 struct sd_lun *un; 23783 23784 ASSERT(ssc != NULL); 23785 un = ssc->ssc_un; 23786 ASSERT(un != NULL); 23787 23788 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23789 23790 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23791 SD_PATH_DIRECT); 23792 23793 if (status != 0) { 23794 if (status == EIO) { 23795 /* 23796 * Some disks do not support log sense, we 23797 * should ignore this kind of error(sense key is 23798 * 0x5 - illegal request). 23799 */ 23800 uint8_t *sensep; 23801 int senlen; 23802 23803 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23804 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23805 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23806 23807 if (senlen > 0 && 23808 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23809 sd_ssc_assessment(ssc, 23810 SD_FMT_IGNORE_COMPROMISE); 23811 } else { 23812 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23813 } 23814 } else { 23815 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23816 } 23817 23818 SD_ERROR(SD_LOG_COMMON, un, 23819 "sd_log_page_supported: failed log page retrieval\n"); 23820 kmem_free(log_page_data, 0xFF); 23821 return (-1); 23822 } 23823 23824 log_size = log_page_data[3]; 23825 23826 /* 23827 * The list of supported log pages start from the fourth byte. Check 23828 * until we run out of log pages or a match is found. 23829 */ 23830 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23831 if (log_page_data[i] == log_page) { 23832 match++; 23833 } 23834 } 23835 kmem_free(log_page_data, 0xFF); 23836 return (match); 23837 } 23838 23839 23840 /* 23841 * Function: sd_mhdioc_failfast 23842 * 23843 * Description: This routine is the driver entry point for handling ioctl 23844 * requests to enable/disable the multihost failfast option. 23845 * (MHIOCENFAILFAST) 23846 * 23847 * Arguments: dev - the device number 23848 * arg - user specified probing interval. 23849 * flag - this argument is a pass through to ddi_copyxxx() 23850 * directly from the mode argument of ioctl(). 23851 * 23852 * Return Code: 0 23853 * EFAULT 23854 * ENXIO 23855 */ 23856 23857 static int 23858 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23859 { 23860 struct sd_lun *un = NULL; 23861 int mh_time; 23862 int rval = 0; 23863 23864 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23865 return (ENXIO); 23866 } 23867 23868 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23869 return (EFAULT); 23870 23871 if (mh_time) { 23872 mutex_enter(SD_MUTEX(un)); 23873 un->un_resvd_status |= SD_FAILFAST; 23874 mutex_exit(SD_MUTEX(un)); 23875 /* 23876 * If mh_time is INT_MAX, then this ioctl is being used for 23877 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23878 */ 23879 if (mh_time != INT_MAX) { 23880 rval = sd_check_mhd(dev, mh_time); 23881 } 23882 } else { 23883 (void) sd_check_mhd(dev, 0); 23884 mutex_enter(SD_MUTEX(un)); 23885 un->un_resvd_status &= ~SD_FAILFAST; 23886 mutex_exit(SD_MUTEX(un)); 23887 } 23888 return (rval); 23889 } 23890 23891 23892 /* 23893 * Function: sd_mhdioc_takeown 23894 * 23895 * Description: This routine is the driver entry point for handling ioctl 23896 * requests to forcefully acquire exclusive access rights to the 23897 * multihost disk (MHIOCTKOWN). 23898 * 23899 * Arguments: dev - the device number 23900 * arg - user provided structure specifying the delay 23901 * parameters in milliseconds 23902 * flag - this argument is a pass through to ddi_copyxxx() 23903 * directly from the mode argument of ioctl(). 23904 * 23905 * Return Code: 0 23906 * EFAULT 23907 * ENXIO 23908 */ 23909 23910 static int 23911 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23912 { 23913 struct sd_lun *un = NULL; 23914 struct mhioctkown *tkown = NULL; 23915 int rval = 0; 23916 23917 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23918 return (ENXIO); 23919 } 23920 23921 if (arg != NULL) { 23922 tkown = (struct mhioctkown *) 23923 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23924 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23925 if (rval != 0) { 23926 rval = EFAULT; 23927 goto error; 23928 } 23929 } 23930 23931 rval = sd_take_ownership(dev, tkown); 23932 mutex_enter(SD_MUTEX(un)); 23933 if (rval == 0) { 23934 un->un_resvd_status |= SD_RESERVE; 23935 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23936 sd_reinstate_resv_delay = 23937 tkown->reinstate_resv_delay * 1000; 23938 } else { 23939 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23940 } 23941 /* 23942 * Give the scsi_watch routine interval set by 23943 * the MHIOCENFAILFAST ioctl precedence here. 23944 */ 23945 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23946 mutex_exit(SD_MUTEX(un)); 23947 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23948 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23949 "sd_mhdioc_takeown : %d\n", 23950 sd_reinstate_resv_delay); 23951 } else { 23952 mutex_exit(SD_MUTEX(un)); 23953 } 23954 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23955 sd_mhd_reset_notify_cb, (caddr_t)un); 23956 } else { 23957 un->un_resvd_status &= ~SD_RESERVE; 23958 mutex_exit(SD_MUTEX(un)); 23959 } 23960 23961 error: 23962 if (tkown != NULL) { 23963 kmem_free(tkown, sizeof (struct mhioctkown)); 23964 } 23965 return (rval); 23966 } 23967 23968 23969 /* 23970 * Function: sd_mhdioc_release 23971 * 23972 * Description: This routine is the driver entry point for handling ioctl 23973 * requests to release exclusive access rights to the multihost 23974 * disk (MHIOCRELEASE). 23975 * 23976 * Arguments: dev - the device number 23977 * 23978 * Return Code: 0 23979 * ENXIO 23980 */ 23981 23982 static int 23983 sd_mhdioc_release(dev_t dev) 23984 { 23985 struct sd_lun *un = NULL; 23986 timeout_id_t resvd_timeid_save; 23987 int resvd_status_save; 23988 int rval = 0; 23989 23990 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23991 return (ENXIO); 23992 } 23993 23994 mutex_enter(SD_MUTEX(un)); 23995 resvd_status_save = un->un_resvd_status; 23996 un->un_resvd_status &= 23997 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23998 if (un->un_resvd_timeid) { 23999 resvd_timeid_save = un->un_resvd_timeid; 24000 un->un_resvd_timeid = NULL; 24001 mutex_exit(SD_MUTEX(un)); 24002 (void) untimeout(resvd_timeid_save); 24003 } else { 24004 mutex_exit(SD_MUTEX(un)); 24005 } 24006 24007 /* 24008 * destroy any pending timeout thread that may be attempting to 24009 * reinstate reservation on this device. 24010 */ 24011 sd_rmv_resv_reclaim_req(dev); 24012 24013 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24014 mutex_enter(SD_MUTEX(un)); 24015 if ((un->un_mhd_token) && 24016 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24017 mutex_exit(SD_MUTEX(un)); 24018 (void) sd_check_mhd(dev, 0); 24019 } else { 24020 mutex_exit(SD_MUTEX(un)); 24021 } 24022 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24023 sd_mhd_reset_notify_cb, (caddr_t)un); 24024 } else { 24025 /* 24026 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24027 */ 24028 mutex_enter(SD_MUTEX(un)); 24029 un->un_resvd_status = resvd_status_save; 24030 mutex_exit(SD_MUTEX(un)); 24031 } 24032 return (rval); 24033 } 24034 24035 24036 /* 24037 * Function: sd_mhdioc_register_devid 24038 * 24039 * Description: This routine is the driver entry point for handling ioctl 24040 * requests to register the device id (MHIOCREREGISTERDEVID). 24041 * 24042 * Note: The implementation for this ioctl has been updated to 24043 * be consistent with the original PSARC case (1999/357) 24044 * (4375899, 4241671, 4220005) 24045 * 24046 * Arguments: dev - the device number 24047 * 24048 * Return Code: 0 24049 * ENXIO 24050 */ 24051 24052 static int 24053 sd_mhdioc_register_devid(dev_t dev) 24054 { 24055 struct sd_lun *un = NULL; 24056 int rval = 0; 24057 sd_ssc_t *ssc; 24058 24059 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24060 return (ENXIO); 24061 } 24062 24063 ASSERT(!mutex_owned(SD_MUTEX(un))); 24064 24065 mutex_enter(SD_MUTEX(un)); 24066 24067 /* If a devid already exists, de-register it */ 24068 if (un->un_devid != NULL) { 24069 ddi_devid_unregister(SD_DEVINFO(un)); 24070 /* 24071 * After unregister devid, needs to free devid memory 24072 */ 24073 ddi_devid_free(un->un_devid); 24074 un->un_devid = NULL; 24075 } 24076 24077 /* Check for reservation conflict */ 24078 mutex_exit(SD_MUTEX(un)); 24079 ssc = sd_ssc_init(un); 24080 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24081 mutex_enter(SD_MUTEX(un)); 24082 24083 switch (rval) { 24084 case 0: 24085 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24086 break; 24087 case EACCES: 24088 break; 24089 default: 24090 rval = EIO; 24091 } 24092 24093 mutex_exit(SD_MUTEX(un)); 24094 if (rval != 0) { 24095 if (rval == EIO) 24096 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24097 else 24098 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24099 } 24100 sd_ssc_fini(ssc); 24101 return (rval); 24102 } 24103 24104 24105 /* 24106 * Function: sd_mhdioc_inkeys 24107 * 24108 * Description: This routine is the driver entry point for handling ioctl 24109 * requests to issue the SCSI-3 Persistent In Read Keys command 24110 * to the device (MHIOCGRP_INKEYS). 24111 * 24112 * Arguments: dev - the device number 24113 * arg - user provided in_keys structure 24114 * flag - this argument is a pass through to ddi_copyxxx() 24115 * directly from the mode argument of ioctl(). 24116 * 24117 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24118 * ENXIO 24119 * EFAULT 24120 */ 24121 24122 static int 24123 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24124 { 24125 struct sd_lun *un; 24126 mhioc_inkeys_t inkeys; 24127 int rval = 0; 24128 24129 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24130 return (ENXIO); 24131 } 24132 24133 #ifdef _MULTI_DATAMODEL 24134 switch (ddi_model_convert_from(flag & FMODELS)) { 24135 case DDI_MODEL_ILP32: { 24136 struct mhioc_inkeys32 inkeys32; 24137 24138 if (ddi_copyin(arg, &inkeys32, 24139 sizeof (struct mhioc_inkeys32), flag) != 0) { 24140 return (EFAULT); 24141 } 24142 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24143 if ((rval = sd_persistent_reservation_in_read_keys(un, 24144 &inkeys, flag)) != 0) { 24145 return (rval); 24146 } 24147 inkeys32.generation = inkeys.generation; 24148 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24149 flag) != 0) { 24150 return (EFAULT); 24151 } 24152 break; 24153 } 24154 case DDI_MODEL_NONE: 24155 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24156 flag) != 0) { 24157 return (EFAULT); 24158 } 24159 if ((rval = sd_persistent_reservation_in_read_keys(un, 24160 &inkeys, flag)) != 0) { 24161 return (rval); 24162 } 24163 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24164 flag) != 0) { 24165 return (EFAULT); 24166 } 24167 break; 24168 } 24169 24170 #else /* ! _MULTI_DATAMODEL */ 24171 24172 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24173 return (EFAULT); 24174 } 24175 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24176 if (rval != 0) { 24177 return (rval); 24178 } 24179 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24180 return (EFAULT); 24181 } 24182 24183 #endif /* _MULTI_DATAMODEL */ 24184 24185 return (rval); 24186 } 24187 24188 24189 /* 24190 * Function: sd_mhdioc_inresv 24191 * 24192 * Description: This routine is the driver entry point for handling ioctl 24193 * requests to issue the SCSI-3 Persistent In Read Reservations 24194 * command to the device (MHIOCGRP_INKEYS). 24195 * 24196 * Arguments: dev - the device number 24197 * arg - user provided in_resv structure 24198 * flag - this argument is a pass through to ddi_copyxxx() 24199 * directly from the mode argument of ioctl(). 24200 * 24201 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24202 * ENXIO 24203 * EFAULT 24204 */ 24205 24206 static int 24207 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24208 { 24209 struct sd_lun *un; 24210 mhioc_inresvs_t inresvs; 24211 int rval = 0; 24212 24213 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24214 return (ENXIO); 24215 } 24216 24217 #ifdef _MULTI_DATAMODEL 24218 24219 switch (ddi_model_convert_from(flag & FMODELS)) { 24220 case DDI_MODEL_ILP32: { 24221 struct mhioc_inresvs32 inresvs32; 24222 24223 if (ddi_copyin(arg, &inresvs32, 24224 sizeof (struct mhioc_inresvs32), flag) != 0) { 24225 return (EFAULT); 24226 } 24227 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24228 if ((rval = sd_persistent_reservation_in_read_resv(un, 24229 &inresvs, flag)) != 0) { 24230 return (rval); 24231 } 24232 inresvs32.generation = inresvs.generation; 24233 if (ddi_copyout(&inresvs32, arg, 24234 sizeof (struct mhioc_inresvs32), flag) != 0) { 24235 return (EFAULT); 24236 } 24237 break; 24238 } 24239 case DDI_MODEL_NONE: 24240 if (ddi_copyin(arg, &inresvs, 24241 sizeof (mhioc_inresvs_t), flag) != 0) { 24242 return (EFAULT); 24243 } 24244 if ((rval = sd_persistent_reservation_in_read_resv(un, 24245 &inresvs, flag)) != 0) { 24246 return (rval); 24247 } 24248 if (ddi_copyout(&inresvs, arg, 24249 sizeof (mhioc_inresvs_t), flag) != 0) { 24250 return (EFAULT); 24251 } 24252 break; 24253 } 24254 24255 #else /* ! _MULTI_DATAMODEL */ 24256 24257 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24258 return (EFAULT); 24259 } 24260 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24261 if (rval != 0) { 24262 return (rval); 24263 } 24264 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24265 return (EFAULT); 24266 } 24267 24268 #endif /* ! _MULTI_DATAMODEL */ 24269 24270 return (rval); 24271 } 24272 24273 24274 /* 24275 * The following routines support the clustering functionality described below 24276 * and implement lost reservation reclaim functionality. 24277 * 24278 * Clustering 24279 * ---------- 24280 * The clustering code uses two different, independent forms of SCSI 24281 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24282 * Persistent Group Reservations. For any particular disk, it will use either 24283 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24284 * 24285 * SCSI-2 24286 * The cluster software takes ownership of a multi-hosted disk by issuing the 24287 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24288 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24289 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24290 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24291 * driver. The meaning of failfast is that if the driver (on this host) ever 24292 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24293 * it should immediately panic the host. The motivation for this ioctl is that 24294 * if this host does encounter reservation conflict, the underlying cause is 24295 * that some other host of the cluster has decided that this host is no longer 24296 * in the cluster and has seized control of the disks for itself. Since this 24297 * host is no longer in the cluster, it ought to panic itself. The 24298 * MHIOCENFAILFAST ioctl does two things: 24299 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24300 * error to panic the host 24301 * (b) it sets up a periodic timer to test whether this host still has 24302 * "access" (in that no other host has reserved the device): if the 24303 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24304 * purpose of that periodic timer is to handle scenarios where the host is 24305 * otherwise temporarily quiescent, temporarily doing no real i/o. 24306 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24307 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24308 * the device itself. 24309 * 24310 * SCSI-3 PGR 24311 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24312 * facility is supported through the shared multihost disk ioctls 24313 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24314 * MHIOCGRP_PREEMPTANDABORT) 24315 * 24316 * Reservation Reclaim: 24317 * -------------------- 24318 * To support the lost reservation reclaim operations this driver creates a 24319 * single thread to handle reinstating reservations on all devices that have 24320 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24321 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24322 * and the reservation reclaim thread loops through the requests to regain the 24323 * lost reservations. 24324 */ 24325 24326 /* 24327 * Function: sd_check_mhd() 24328 * 24329 * Description: This function sets up and submits a scsi watch request or 24330 * terminates an existing watch request. This routine is used in 24331 * support of reservation reclaim. 24332 * 24333 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24334 * among multiple watches that share the callback function 24335 * interval - the number of microseconds specifying the watch 24336 * interval for issuing TEST UNIT READY commands. If 24337 * set to 0 the watch should be terminated. If the 24338 * interval is set to 0 and if the device is required 24339 * to hold reservation while disabling failfast, the 24340 * watch is restarted with an interval of 24341 * reinstate_resv_delay. 24342 * 24343 * Return Code: 0 - Successful submit/terminate of scsi watch request 24344 * ENXIO - Indicates an invalid device was specified 24345 * EAGAIN - Unable to submit the scsi watch request 24346 */ 24347 24348 static int 24349 sd_check_mhd(dev_t dev, int interval) 24350 { 24351 struct sd_lun *un; 24352 opaque_t token; 24353 24354 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24355 return (ENXIO); 24356 } 24357 24358 /* is this a watch termination request? */ 24359 if (interval == 0) { 24360 mutex_enter(SD_MUTEX(un)); 24361 /* if there is an existing watch task then terminate it */ 24362 if (un->un_mhd_token) { 24363 token = un->un_mhd_token; 24364 un->un_mhd_token = NULL; 24365 mutex_exit(SD_MUTEX(un)); 24366 (void) scsi_watch_request_terminate(token, 24367 SCSI_WATCH_TERMINATE_ALL_WAIT); 24368 mutex_enter(SD_MUTEX(un)); 24369 } else { 24370 mutex_exit(SD_MUTEX(un)); 24371 /* 24372 * Note: If we return here we don't check for the 24373 * failfast case. This is the original legacy 24374 * implementation but perhaps we should be checking 24375 * the failfast case. 24376 */ 24377 return (0); 24378 } 24379 /* 24380 * If the device is required to hold reservation while 24381 * disabling failfast, we need to restart the scsi_watch 24382 * routine with an interval of reinstate_resv_delay. 24383 */ 24384 if (un->un_resvd_status & SD_RESERVE) { 24385 interval = sd_reinstate_resv_delay/1000; 24386 } else { 24387 /* no failfast so bail */ 24388 mutex_exit(SD_MUTEX(un)); 24389 return (0); 24390 } 24391 mutex_exit(SD_MUTEX(un)); 24392 } 24393 24394 /* 24395 * adjust minimum time interval to 1 second, 24396 * and convert from msecs to usecs 24397 */ 24398 if (interval > 0 && interval < 1000) { 24399 interval = 1000; 24400 } 24401 interval *= 1000; 24402 24403 /* 24404 * submit the request to the scsi_watch service 24405 */ 24406 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24407 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24408 if (token == NULL) { 24409 return (EAGAIN); 24410 } 24411 24412 /* 24413 * save token for termination later on 24414 */ 24415 mutex_enter(SD_MUTEX(un)); 24416 un->un_mhd_token = token; 24417 mutex_exit(SD_MUTEX(un)); 24418 return (0); 24419 } 24420 24421 24422 /* 24423 * Function: sd_mhd_watch_cb() 24424 * 24425 * Description: This function is the call back function used by the scsi watch 24426 * facility. The scsi watch facility sends the "Test Unit Ready" 24427 * and processes the status. If applicable (i.e. a "Unit Attention" 24428 * status and automatic "Request Sense" not used) the scsi watch 24429 * facility will send a "Request Sense" and retrieve the sense data 24430 * to be passed to this callback function. In either case the 24431 * automatic "Request Sense" or the facility submitting one, this 24432 * callback is passed the status and sense data. 24433 * 24434 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24435 * among multiple watches that share this callback function 24436 * resultp - scsi watch facility result packet containing scsi 24437 * packet, status byte and sense data 24438 * 24439 * Return Code: 0 - continue the watch task 24440 * non-zero - terminate the watch task 24441 */ 24442 24443 static int 24444 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24445 { 24446 struct sd_lun *un; 24447 struct scsi_status *statusp; 24448 uint8_t *sensep; 24449 struct scsi_pkt *pkt; 24450 uchar_t actual_sense_length; 24451 dev_t dev = (dev_t)arg; 24452 24453 ASSERT(resultp != NULL); 24454 statusp = resultp->statusp; 24455 sensep = (uint8_t *)resultp->sensep; 24456 pkt = resultp->pkt; 24457 actual_sense_length = resultp->actual_sense_length; 24458 24459 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24460 return (ENXIO); 24461 } 24462 24463 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24464 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24465 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24466 24467 /* Begin processing of the status and/or sense data */ 24468 if (pkt->pkt_reason != CMD_CMPLT) { 24469 /* Handle the incomplete packet */ 24470 sd_mhd_watch_incomplete(un, pkt); 24471 return (0); 24472 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24473 if (*((unsigned char *)statusp) 24474 == STATUS_RESERVATION_CONFLICT) { 24475 /* 24476 * Handle a reservation conflict by panicking if 24477 * configured for failfast or by logging the conflict 24478 * and updating the reservation status 24479 */ 24480 mutex_enter(SD_MUTEX(un)); 24481 if ((un->un_resvd_status & SD_FAILFAST) && 24482 (sd_failfast_enable)) { 24483 sd_panic_for_res_conflict(un); 24484 /*NOTREACHED*/ 24485 } 24486 SD_INFO(SD_LOG_IOCTL_MHD, un, 24487 "sd_mhd_watch_cb: Reservation Conflict\n"); 24488 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24489 mutex_exit(SD_MUTEX(un)); 24490 } 24491 } 24492 24493 if (sensep != NULL) { 24494 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24495 mutex_enter(SD_MUTEX(un)); 24496 if ((scsi_sense_asc(sensep) == 24497 SD_SCSI_RESET_SENSE_CODE) && 24498 (un->un_resvd_status & SD_RESERVE)) { 24499 /* 24500 * The additional sense code indicates a power 24501 * on or bus device reset has occurred; update 24502 * the reservation status. 24503 */ 24504 un->un_resvd_status |= 24505 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24506 SD_INFO(SD_LOG_IOCTL_MHD, un, 24507 "sd_mhd_watch_cb: Lost Reservation\n"); 24508 } 24509 } else { 24510 return (0); 24511 } 24512 } else { 24513 mutex_enter(SD_MUTEX(un)); 24514 } 24515 24516 if ((un->un_resvd_status & SD_RESERVE) && 24517 (un->un_resvd_status & SD_LOST_RESERVE)) { 24518 if (un->un_resvd_status & SD_WANT_RESERVE) { 24519 /* 24520 * A reset occurred in between the last probe and this 24521 * one so if a timeout is pending cancel it. 24522 */ 24523 if (un->un_resvd_timeid) { 24524 timeout_id_t temp_id = un->un_resvd_timeid; 24525 un->un_resvd_timeid = NULL; 24526 mutex_exit(SD_MUTEX(un)); 24527 (void) untimeout(temp_id); 24528 mutex_enter(SD_MUTEX(un)); 24529 } 24530 un->un_resvd_status &= ~SD_WANT_RESERVE; 24531 } 24532 if (un->un_resvd_timeid == 0) { 24533 /* Schedule a timeout to handle the lost reservation */ 24534 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24535 (void *)dev, 24536 drv_usectohz(sd_reinstate_resv_delay)); 24537 } 24538 } 24539 mutex_exit(SD_MUTEX(un)); 24540 return (0); 24541 } 24542 24543 24544 /* 24545 * Function: sd_mhd_watch_incomplete() 24546 * 24547 * Description: This function is used to find out why a scsi pkt sent by the 24548 * scsi watch facility was not completed. Under some scenarios this 24549 * routine will return. Otherwise it will send a bus reset to see 24550 * if the drive is still online. 24551 * 24552 * Arguments: un - driver soft state (unit) structure 24553 * pkt - incomplete scsi pkt 24554 */ 24555 24556 static void 24557 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24558 { 24559 int be_chatty; 24560 int perr; 24561 24562 ASSERT(pkt != NULL); 24563 ASSERT(un != NULL); 24564 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24565 perr = (pkt->pkt_statistics & STAT_PERR); 24566 24567 mutex_enter(SD_MUTEX(un)); 24568 if (un->un_state == SD_STATE_DUMPING) { 24569 mutex_exit(SD_MUTEX(un)); 24570 return; 24571 } 24572 24573 switch (pkt->pkt_reason) { 24574 case CMD_UNX_BUS_FREE: 24575 /* 24576 * If we had a parity error that caused the target to drop BSY*, 24577 * don't be chatty about it. 24578 */ 24579 if (perr && be_chatty) { 24580 be_chatty = 0; 24581 } 24582 break; 24583 case CMD_TAG_REJECT: 24584 /* 24585 * The SCSI-2 spec states that a tag reject will be sent by the 24586 * target if tagged queuing is not supported. A tag reject may 24587 * also be sent during certain initialization periods or to 24588 * control internal resources. For the latter case the target 24589 * may also return Queue Full. 24590 * 24591 * If this driver receives a tag reject from a target that is 24592 * going through an init period or controlling internal 24593 * resources tagged queuing will be disabled. This is a less 24594 * than optimal behavior but the driver is unable to determine 24595 * the target state and assumes tagged queueing is not supported 24596 */ 24597 pkt->pkt_flags = 0; 24598 un->un_tagflags = 0; 24599 24600 if (un->un_f_opt_queueing == TRUE) { 24601 un->un_throttle = min(un->un_throttle, 3); 24602 } else { 24603 un->un_throttle = 1; 24604 } 24605 mutex_exit(SD_MUTEX(un)); 24606 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24607 mutex_enter(SD_MUTEX(un)); 24608 break; 24609 case CMD_INCOMPLETE: 24610 /* 24611 * The transport stopped with an abnormal state, fallthrough and 24612 * reset the target and/or bus unless selection did not complete 24613 * (indicated by STATE_GOT_BUS) in which case we don't want to 24614 * go through a target/bus reset 24615 */ 24616 if (pkt->pkt_state == STATE_GOT_BUS) { 24617 break; 24618 } 24619 /*FALLTHROUGH*/ 24620 24621 case CMD_TIMEOUT: 24622 default: 24623 /* 24624 * The lun may still be running the command, so a lun reset 24625 * should be attempted. If the lun reset fails or cannot be 24626 * issued, than try a target reset. Lastly try a bus reset. 24627 */ 24628 if ((pkt->pkt_statistics & 24629 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24630 int reset_retval = 0; 24631 mutex_exit(SD_MUTEX(un)); 24632 if (un->un_f_allow_bus_device_reset == TRUE) { 24633 if (un->un_f_lun_reset_enabled == TRUE) { 24634 reset_retval = 24635 scsi_reset(SD_ADDRESS(un), 24636 RESET_LUN); 24637 } 24638 if (reset_retval == 0) { 24639 reset_retval = 24640 scsi_reset(SD_ADDRESS(un), 24641 RESET_TARGET); 24642 } 24643 } 24644 if (reset_retval == 0) { 24645 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24646 } 24647 mutex_enter(SD_MUTEX(un)); 24648 } 24649 break; 24650 } 24651 24652 /* A device/bus reset has occurred; update the reservation status. */ 24653 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24654 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24655 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24656 un->un_resvd_status |= 24657 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24658 SD_INFO(SD_LOG_IOCTL_MHD, un, 24659 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24660 } 24661 } 24662 24663 /* 24664 * The disk has been turned off; Update the device state. 24665 * 24666 * Note: Should we be offlining the disk here? 24667 */ 24668 if (pkt->pkt_state == STATE_GOT_BUS) { 24669 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24670 "Disk not responding to selection\n"); 24671 if (un->un_state != SD_STATE_OFFLINE) { 24672 New_state(un, SD_STATE_OFFLINE); 24673 } 24674 } else if (be_chatty) { 24675 /* 24676 * suppress messages if they are all the same pkt reason; 24677 * with TQ, many (up to 256) are returned with the same 24678 * pkt_reason 24679 */ 24680 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24681 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24682 "sd_mhd_watch_incomplete: " 24683 "SCSI transport failed: reason '%s'\n", 24684 scsi_rname(pkt->pkt_reason)); 24685 } 24686 } 24687 un->un_last_pkt_reason = pkt->pkt_reason; 24688 mutex_exit(SD_MUTEX(un)); 24689 } 24690 24691 24692 /* 24693 * Function: sd_sname() 24694 * 24695 * Description: This is a simple little routine to return a string containing 24696 * a printable description of command status byte for use in 24697 * logging. 24698 * 24699 * Arguments: status - pointer to a status byte 24700 * 24701 * Return Code: char * - string containing status description. 24702 */ 24703 24704 static char * 24705 sd_sname(uchar_t status) 24706 { 24707 switch (status & STATUS_MASK) { 24708 case STATUS_GOOD: 24709 return ("good status"); 24710 case STATUS_CHECK: 24711 return ("check condition"); 24712 case STATUS_MET: 24713 return ("condition met"); 24714 case STATUS_BUSY: 24715 return ("busy"); 24716 case STATUS_INTERMEDIATE: 24717 return ("intermediate"); 24718 case STATUS_INTERMEDIATE_MET: 24719 return ("intermediate - condition met"); 24720 case STATUS_RESERVATION_CONFLICT: 24721 return ("reservation_conflict"); 24722 case STATUS_TERMINATED: 24723 return ("command terminated"); 24724 case STATUS_QFULL: 24725 return ("queue full"); 24726 default: 24727 return ("<unknown status>"); 24728 } 24729 } 24730 24731 24732 /* 24733 * Function: sd_mhd_resvd_recover() 24734 * 24735 * Description: This function adds a reservation entry to the 24736 * sd_resv_reclaim_request list and signals the reservation 24737 * reclaim thread that there is work pending. If the reservation 24738 * reclaim thread has not been previously created this function 24739 * will kick it off. 24740 * 24741 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24742 * among multiple watches that share this callback function 24743 * 24744 * Context: This routine is called by timeout() and is run in interrupt 24745 * context. It must not sleep or call other functions which may 24746 * sleep. 24747 */ 24748 24749 static void 24750 sd_mhd_resvd_recover(void *arg) 24751 { 24752 dev_t dev = (dev_t)arg; 24753 struct sd_lun *un; 24754 struct sd_thr_request *sd_treq = NULL; 24755 struct sd_thr_request *sd_cur = NULL; 24756 struct sd_thr_request *sd_prev = NULL; 24757 int already_there = 0; 24758 24759 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24760 return; 24761 } 24762 24763 mutex_enter(SD_MUTEX(un)); 24764 un->un_resvd_timeid = NULL; 24765 if (un->un_resvd_status & SD_WANT_RESERVE) { 24766 /* 24767 * There was a reset so don't issue the reserve, allow the 24768 * sd_mhd_watch_cb callback function to notice this and 24769 * reschedule the timeout for reservation. 24770 */ 24771 mutex_exit(SD_MUTEX(un)); 24772 return; 24773 } 24774 mutex_exit(SD_MUTEX(un)); 24775 24776 /* 24777 * Add this device to the sd_resv_reclaim_request list and the 24778 * sd_resv_reclaim_thread should take care of the rest. 24779 * 24780 * Note: We can't sleep in this context so if the memory allocation 24781 * fails allow the sd_mhd_watch_cb callback function to notice this and 24782 * reschedule the timeout for reservation. (4378460) 24783 */ 24784 sd_treq = (struct sd_thr_request *) 24785 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24786 if (sd_treq == NULL) { 24787 return; 24788 } 24789 24790 sd_treq->sd_thr_req_next = NULL; 24791 sd_treq->dev = dev; 24792 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24793 if (sd_tr.srq_thr_req_head == NULL) { 24794 sd_tr.srq_thr_req_head = sd_treq; 24795 } else { 24796 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24797 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24798 if (sd_cur->dev == dev) { 24799 /* 24800 * already in Queue so don't log 24801 * another request for the device 24802 */ 24803 already_there = 1; 24804 break; 24805 } 24806 sd_prev = sd_cur; 24807 } 24808 if (!already_there) { 24809 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24810 "logging request for %lx\n", dev); 24811 sd_prev->sd_thr_req_next = sd_treq; 24812 } else { 24813 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24814 } 24815 } 24816 24817 /* 24818 * Create a kernel thread to do the reservation reclaim and free up this 24819 * thread. We cannot block this thread while we go away to do the 24820 * reservation reclaim 24821 */ 24822 if (sd_tr.srq_resv_reclaim_thread == NULL) 24823 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24824 sd_resv_reclaim_thread, NULL, 24825 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24826 24827 /* Tell the reservation reclaim thread that it has work to do */ 24828 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24829 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24830 } 24831 24832 /* 24833 * Function: sd_resv_reclaim_thread() 24834 * 24835 * Description: This function implements the reservation reclaim operations 24836 * 24837 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24838 * among multiple watches that share this callback function 24839 */ 24840 24841 static void 24842 sd_resv_reclaim_thread() 24843 { 24844 struct sd_lun *un; 24845 struct sd_thr_request *sd_mhreq; 24846 24847 /* Wait for work */ 24848 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24849 if (sd_tr.srq_thr_req_head == NULL) { 24850 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24851 &sd_tr.srq_resv_reclaim_mutex); 24852 } 24853 24854 /* Loop while we have work */ 24855 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24856 un = ddi_get_soft_state(sd_state, 24857 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24858 if (un == NULL) { 24859 /* 24860 * softstate structure is NULL so just 24861 * dequeue the request and continue 24862 */ 24863 sd_tr.srq_thr_req_head = 24864 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24865 kmem_free(sd_tr.srq_thr_cur_req, 24866 sizeof (struct sd_thr_request)); 24867 continue; 24868 } 24869 24870 /* dequeue the request */ 24871 sd_mhreq = sd_tr.srq_thr_cur_req; 24872 sd_tr.srq_thr_req_head = 24873 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24874 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24875 24876 /* 24877 * Reclaim reservation only if SD_RESERVE is still set. There 24878 * may have been a call to MHIOCRELEASE before we got here. 24879 */ 24880 mutex_enter(SD_MUTEX(un)); 24881 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24882 /* 24883 * Note: The SD_LOST_RESERVE flag is cleared before 24884 * reclaiming the reservation. If this is done after the 24885 * call to sd_reserve_release a reservation loss in the 24886 * window between pkt completion of reserve cmd and 24887 * mutex_enter below may not be recognized 24888 */ 24889 un->un_resvd_status &= ~SD_LOST_RESERVE; 24890 mutex_exit(SD_MUTEX(un)); 24891 24892 if (sd_reserve_release(sd_mhreq->dev, 24893 SD_RESERVE) == 0) { 24894 mutex_enter(SD_MUTEX(un)); 24895 un->un_resvd_status |= SD_RESERVE; 24896 mutex_exit(SD_MUTEX(un)); 24897 SD_INFO(SD_LOG_IOCTL_MHD, un, 24898 "sd_resv_reclaim_thread: " 24899 "Reservation Recovered\n"); 24900 } else { 24901 mutex_enter(SD_MUTEX(un)); 24902 un->un_resvd_status |= SD_LOST_RESERVE; 24903 mutex_exit(SD_MUTEX(un)); 24904 SD_INFO(SD_LOG_IOCTL_MHD, un, 24905 "sd_resv_reclaim_thread: Failed " 24906 "Reservation Recovery\n"); 24907 } 24908 } else { 24909 mutex_exit(SD_MUTEX(un)); 24910 } 24911 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24912 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24913 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24914 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24915 /* 24916 * wakeup the destroy thread if anyone is waiting on 24917 * us to complete. 24918 */ 24919 cv_signal(&sd_tr.srq_inprocess_cv); 24920 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24921 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24922 } 24923 24924 /* 24925 * cleanup the sd_tr structure now that this thread will not exist 24926 */ 24927 ASSERT(sd_tr.srq_thr_req_head == NULL); 24928 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24929 sd_tr.srq_resv_reclaim_thread = NULL; 24930 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24931 thread_exit(); 24932 } 24933 24934 24935 /* 24936 * Function: sd_rmv_resv_reclaim_req() 24937 * 24938 * Description: This function removes any pending reservation reclaim requests 24939 * for the specified device. 24940 * 24941 * Arguments: dev - the device 'dev_t' 24942 */ 24943 24944 static void 24945 sd_rmv_resv_reclaim_req(dev_t dev) 24946 { 24947 struct sd_thr_request *sd_mhreq; 24948 struct sd_thr_request *sd_prev; 24949 24950 /* Remove a reservation reclaim request from the list */ 24951 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24952 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24953 /* 24954 * We are attempting to reinstate reservation for 24955 * this device. We wait for sd_reserve_release() 24956 * to return before we return. 24957 */ 24958 cv_wait(&sd_tr.srq_inprocess_cv, 24959 &sd_tr.srq_resv_reclaim_mutex); 24960 } else { 24961 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24962 if (sd_mhreq && sd_mhreq->dev == dev) { 24963 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24964 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24965 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24966 return; 24967 } 24968 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24969 if (sd_mhreq && sd_mhreq->dev == dev) { 24970 break; 24971 } 24972 sd_prev = sd_mhreq; 24973 } 24974 if (sd_mhreq != NULL) { 24975 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24976 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24977 } 24978 } 24979 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24980 } 24981 24982 24983 /* 24984 * Function: sd_mhd_reset_notify_cb() 24985 * 24986 * Description: This is a call back function for scsi_reset_notify. This 24987 * function updates the softstate reserved status and logs the 24988 * reset. The driver scsi watch facility callback function 24989 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24990 * will reclaim the reservation. 24991 * 24992 * Arguments: arg - driver soft state (unit) structure 24993 */ 24994 24995 static void 24996 sd_mhd_reset_notify_cb(caddr_t arg) 24997 { 24998 struct sd_lun *un = (struct sd_lun *)arg; 24999 25000 mutex_enter(SD_MUTEX(un)); 25001 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25002 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25003 SD_INFO(SD_LOG_IOCTL_MHD, un, 25004 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25005 } 25006 mutex_exit(SD_MUTEX(un)); 25007 } 25008 25009 25010 /* 25011 * Function: sd_take_ownership() 25012 * 25013 * Description: This routine implements an algorithm to achieve a stable 25014 * reservation on disks which don't implement priority reserve, 25015 * and makes sure that other host lose re-reservation attempts. 25016 * This algorithm contains of a loop that keeps issuing the RESERVE 25017 * for some period of time (min_ownership_delay, default 6 seconds) 25018 * During that loop, it looks to see if there has been a bus device 25019 * reset or bus reset (both of which cause an existing reservation 25020 * to be lost). If the reservation is lost issue RESERVE until a 25021 * period of min_ownership_delay with no resets has gone by, or 25022 * until max_ownership_delay has expired. This loop ensures that 25023 * the host really did manage to reserve the device, in spite of 25024 * resets. The looping for min_ownership_delay (default six 25025 * seconds) is important to early generation clustering products, 25026 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25027 * MHIOCENFAILFAST periodic timer of two seconds. By having 25028 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25029 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25030 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25031 * have already noticed, via the MHIOCENFAILFAST polling, that it 25032 * no longer "owns" the disk and will have panicked itself. Thus, 25033 * the host issuing the MHIOCTKOWN is assured (with timing 25034 * dependencies) that by the time it actually starts to use the 25035 * disk for real work, the old owner is no longer accessing it. 25036 * 25037 * min_ownership_delay is the minimum amount of time for which the 25038 * disk must be reserved continuously devoid of resets before the 25039 * MHIOCTKOWN ioctl will return success. 25040 * 25041 * max_ownership_delay indicates the amount of time by which the 25042 * take ownership should succeed or timeout with an error. 25043 * 25044 * Arguments: dev - the device 'dev_t' 25045 * *p - struct containing timing info. 25046 * 25047 * Return Code: 0 for success or error code 25048 */ 25049 25050 static int 25051 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25052 { 25053 struct sd_lun *un; 25054 int rval; 25055 int err; 25056 int reservation_count = 0; 25057 int min_ownership_delay = 6000000; /* in usec */ 25058 int max_ownership_delay = 30000000; /* in usec */ 25059 clock_t start_time; /* starting time of this algorithm */ 25060 clock_t end_time; /* time limit for giving up */ 25061 clock_t ownership_time; /* time limit for stable ownership */ 25062 clock_t current_time; 25063 clock_t previous_current_time; 25064 25065 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25066 return (ENXIO); 25067 } 25068 25069 /* 25070 * Attempt a device reservation. A priority reservation is requested. 25071 */ 25072 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25073 != SD_SUCCESS) { 25074 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25075 "sd_take_ownership: return(1)=%d\n", rval); 25076 return (rval); 25077 } 25078 25079 /* Update the softstate reserved status to indicate the reservation */ 25080 mutex_enter(SD_MUTEX(un)); 25081 un->un_resvd_status |= SD_RESERVE; 25082 un->un_resvd_status &= 25083 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25084 mutex_exit(SD_MUTEX(un)); 25085 25086 if (p != NULL) { 25087 if (p->min_ownership_delay != 0) { 25088 min_ownership_delay = p->min_ownership_delay * 1000; 25089 } 25090 if (p->max_ownership_delay != 0) { 25091 max_ownership_delay = p->max_ownership_delay * 1000; 25092 } 25093 } 25094 SD_INFO(SD_LOG_IOCTL_MHD, un, 25095 "sd_take_ownership: min, max delays: %d, %d\n", 25096 min_ownership_delay, max_ownership_delay); 25097 25098 start_time = ddi_get_lbolt(); 25099 current_time = start_time; 25100 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25101 end_time = start_time + drv_usectohz(max_ownership_delay); 25102 25103 while (current_time - end_time < 0) { 25104 delay(drv_usectohz(500000)); 25105 25106 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25107 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25108 mutex_enter(SD_MUTEX(un)); 25109 rval = (un->un_resvd_status & 25110 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25111 mutex_exit(SD_MUTEX(un)); 25112 break; 25113 } 25114 } 25115 previous_current_time = current_time; 25116 current_time = ddi_get_lbolt(); 25117 mutex_enter(SD_MUTEX(un)); 25118 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25119 ownership_time = ddi_get_lbolt() + 25120 drv_usectohz(min_ownership_delay); 25121 reservation_count = 0; 25122 } else { 25123 reservation_count++; 25124 } 25125 un->un_resvd_status |= SD_RESERVE; 25126 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25127 mutex_exit(SD_MUTEX(un)); 25128 25129 SD_INFO(SD_LOG_IOCTL_MHD, un, 25130 "sd_take_ownership: ticks for loop iteration=%ld, " 25131 "reservation=%s\n", (current_time - previous_current_time), 25132 reservation_count ? "ok" : "reclaimed"); 25133 25134 if (current_time - ownership_time >= 0 && 25135 reservation_count >= 4) { 25136 rval = 0; /* Achieved a stable ownership */ 25137 break; 25138 } 25139 if (current_time - end_time >= 0) { 25140 rval = EACCES; /* No ownership in max possible time */ 25141 break; 25142 } 25143 } 25144 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25145 "sd_take_ownership: return(2)=%d\n", rval); 25146 return (rval); 25147 } 25148 25149 25150 /* 25151 * Function: sd_reserve_release() 25152 * 25153 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25154 * PRIORITY RESERVE commands based on a user specified command type 25155 * 25156 * Arguments: dev - the device 'dev_t' 25157 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25158 * SD_RESERVE, SD_RELEASE 25159 * 25160 * Return Code: 0 or Error Code 25161 */ 25162 25163 static int 25164 sd_reserve_release(dev_t dev, int cmd) 25165 { 25166 struct uscsi_cmd *com = NULL; 25167 struct sd_lun *un = NULL; 25168 char cdb[CDB_GROUP0]; 25169 int rval; 25170 25171 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25172 (cmd == SD_PRIORITY_RESERVE)); 25173 25174 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25175 return (ENXIO); 25176 } 25177 25178 /* instantiate and initialize the command and cdb */ 25179 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25180 bzero(cdb, CDB_GROUP0); 25181 com->uscsi_flags = USCSI_SILENT; 25182 com->uscsi_timeout = un->un_reserve_release_time; 25183 com->uscsi_cdblen = CDB_GROUP0; 25184 com->uscsi_cdb = cdb; 25185 if (cmd == SD_RELEASE) { 25186 cdb[0] = SCMD_RELEASE; 25187 } else { 25188 cdb[0] = SCMD_RESERVE; 25189 } 25190 25191 /* Send the command. */ 25192 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25193 SD_PATH_STANDARD); 25194 25195 /* 25196 * "break" a reservation that is held by another host, by issuing a 25197 * reset if priority reserve is desired, and we could not get the 25198 * device. 25199 */ 25200 if ((cmd == SD_PRIORITY_RESERVE) && 25201 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25202 /* 25203 * First try to reset the LUN. If we cannot, then try a target 25204 * reset, followed by a bus reset if the target reset fails. 25205 */ 25206 int reset_retval = 0; 25207 if (un->un_f_lun_reset_enabled == TRUE) { 25208 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25209 } 25210 if (reset_retval == 0) { 25211 /* The LUN reset either failed or was not issued */ 25212 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25213 } 25214 if ((reset_retval == 0) && 25215 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25216 rval = EIO; 25217 kmem_free(com, sizeof (*com)); 25218 return (rval); 25219 } 25220 25221 bzero(com, sizeof (struct uscsi_cmd)); 25222 com->uscsi_flags = USCSI_SILENT; 25223 com->uscsi_cdb = cdb; 25224 com->uscsi_cdblen = CDB_GROUP0; 25225 com->uscsi_timeout = 5; 25226 25227 /* 25228 * Reissue the last reserve command, this time without request 25229 * sense. Assume that it is just a regular reserve command. 25230 */ 25231 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25232 SD_PATH_STANDARD); 25233 } 25234 25235 /* Return an error if still getting a reservation conflict. */ 25236 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25237 rval = EACCES; 25238 } 25239 25240 kmem_free(com, sizeof (*com)); 25241 return (rval); 25242 } 25243 25244 25245 #define SD_NDUMP_RETRIES 12 25246 /* 25247 * System Crash Dump routine 25248 */ 25249 25250 static int 25251 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25252 { 25253 int instance; 25254 int partition; 25255 int i; 25256 int err; 25257 struct sd_lun *un; 25258 struct scsi_pkt *wr_pktp; 25259 struct buf *wr_bp; 25260 struct buf wr_buf; 25261 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25262 daddr_t tgt_blkno; /* rmw - blkno for target */ 25263 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25264 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25265 size_t io_start_offset; 25266 int doing_rmw = FALSE; 25267 int rval; 25268 ssize_t dma_resid; 25269 daddr_t oblkno; 25270 diskaddr_t nblks = 0; 25271 diskaddr_t start_block; 25272 25273 instance = SDUNIT(dev); 25274 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25275 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25276 return (ENXIO); 25277 } 25278 25279 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25280 25281 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25282 25283 partition = SDPART(dev); 25284 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25285 25286 if (!(NOT_DEVBSIZE(un))) { 25287 int secmask = 0; 25288 int blknomask = 0; 25289 25290 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25291 secmask = un->un_tgt_blocksize - 1; 25292 25293 if (blkno & blknomask) { 25294 SD_TRACE(SD_LOG_DUMP, un, 25295 "sddump: dump start block not modulo %d\n", 25296 un->un_tgt_blocksize); 25297 return (EINVAL); 25298 } 25299 25300 if ((nblk * DEV_BSIZE) & secmask) { 25301 SD_TRACE(SD_LOG_DUMP, un, 25302 "sddump: dump length not modulo %d\n", 25303 un->un_tgt_blocksize); 25304 return (EINVAL); 25305 } 25306 25307 } 25308 25309 /* Validate blocks to dump at against partition size. */ 25310 25311 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25312 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25313 25314 if (NOT_DEVBSIZE(un)) { 25315 if ((blkno + nblk) > nblks) { 25316 SD_TRACE(SD_LOG_DUMP, un, 25317 "sddump: dump range larger than partition: " 25318 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25319 blkno, nblk, nblks); 25320 return (EINVAL); 25321 } 25322 } else { 25323 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25324 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25325 SD_TRACE(SD_LOG_DUMP, un, 25326 "sddump: dump range larger than partition: " 25327 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25328 blkno, nblk, nblks); 25329 return (EINVAL); 25330 } 25331 } 25332 25333 mutex_enter(&un->un_pm_mutex); 25334 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25335 struct scsi_pkt *start_pktp; 25336 25337 mutex_exit(&un->un_pm_mutex); 25338 25339 /* 25340 * use pm framework to power on HBA 1st 25341 */ 25342 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25343 25344 /* 25345 * Dump no long uses sdpower to power on a device, it's 25346 * in-line here so it can be done in polled mode. 25347 */ 25348 25349 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25350 25351 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25352 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25353 25354 if (start_pktp == NULL) { 25355 /* We were not given a SCSI packet, fail. */ 25356 return (EIO); 25357 } 25358 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25359 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25360 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25361 start_pktp->pkt_flags = FLAG_NOINTR; 25362 25363 mutex_enter(SD_MUTEX(un)); 25364 SD_FILL_SCSI1_LUN(un, start_pktp); 25365 mutex_exit(SD_MUTEX(un)); 25366 /* 25367 * Scsi_poll returns 0 (success) if the command completes and 25368 * the status block is STATUS_GOOD. 25369 */ 25370 if (sd_scsi_poll(un, start_pktp) != 0) { 25371 scsi_destroy_pkt(start_pktp); 25372 return (EIO); 25373 } 25374 scsi_destroy_pkt(start_pktp); 25375 (void) sd_ddi_pm_resume(un); 25376 } else { 25377 mutex_exit(&un->un_pm_mutex); 25378 } 25379 25380 mutex_enter(SD_MUTEX(un)); 25381 un->un_throttle = 0; 25382 25383 /* 25384 * The first time through, reset the specific target device. 25385 * However, when cpr calls sddump we know that sd is in a 25386 * a good state so no bus reset is required. 25387 * Clear sense data via Request Sense cmd. 25388 * In sddump we don't care about allow_bus_device_reset anymore 25389 */ 25390 25391 if ((un->un_state != SD_STATE_SUSPENDED) && 25392 (un->un_state != SD_STATE_DUMPING)) { 25393 25394 New_state(un, SD_STATE_DUMPING); 25395 25396 if (un->un_f_is_fibre == FALSE) { 25397 mutex_exit(SD_MUTEX(un)); 25398 /* 25399 * Attempt a bus reset for parallel scsi. 25400 * 25401 * Note: A bus reset is required because on some host 25402 * systems (i.e. E420R) a bus device reset is 25403 * insufficient to reset the state of the target. 25404 * 25405 * Note: Don't issue the reset for fibre-channel, 25406 * because this tends to hang the bus (loop) for 25407 * too long while everyone is logging out and in 25408 * and the deadman timer for dumping will fire 25409 * before the dump is complete. 25410 */ 25411 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25412 mutex_enter(SD_MUTEX(un)); 25413 Restore_state(un); 25414 mutex_exit(SD_MUTEX(un)); 25415 return (EIO); 25416 } 25417 25418 /* Delay to give the device some recovery time. */ 25419 drv_usecwait(10000); 25420 25421 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25422 SD_INFO(SD_LOG_DUMP, un, 25423 "sddump: sd_send_polled_RQS failed\n"); 25424 } 25425 mutex_enter(SD_MUTEX(un)); 25426 } 25427 } 25428 25429 /* 25430 * Convert the partition-relative block number to a 25431 * disk physical block number. 25432 */ 25433 if (NOT_DEVBSIZE(un)) { 25434 blkno += start_block; 25435 } else { 25436 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25437 blkno += start_block; 25438 } 25439 25440 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25441 25442 25443 /* 25444 * Check if the device has a non-512 block size. 25445 */ 25446 wr_bp = NULL; 25447 if (NOT_DEVBSIZE(un)) { 25448 tgt_byte_offset = blkno * un->un_sys_blocksize; 25449 tgt_byte_count = nblk * un->un_sys_blocksize; 25450 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25451 (tgt_byte_count % un->un_tgt_blocksize)) { 25452 doing_rmw = TRUE; 25453 /* 25454 * Calculate the block number and number of block 25455 * in terms of the media block size. 25456 */ 25457 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25458 tgt_nblk = 25459 ((tgt_byte_offset + tgt_byte_count + 25460 (un->un_tgt_blocksize - 1)) / 25461 un->un_tgt_blocksize) - tgt_blkno; 25462 25463 /* 25464 * Invoke the routine which is going to do read part 25465 * of read-modify-write. 25466 * Note that this routine returns a pointer to 25467 * a valid bp in wr_bp. 25468 */ 25469 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25470 &wr_bp); 25471 if (err) { 25472 mutex_exit(SD_MUTEX(un)); 25473 return (err); 25474 } 25475 /* 25476 * Offset is being calculated as - 25477 * (original block # * system block size) - 25478 * (new block # * target block size) 25479 */ 25480 io_start_offset = 25481 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25482 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25483 25484 ASSERT((io_start_offset >= 0) && 25485 (io_start_offset < un->un_tgt_blocksize)); 25486 /* 25487 * Do the modify portion of read modify write. 25488 */ 25489 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25490 (size_t)nblk * un->un_sys_blocksize); 25491 } else { 25492 doing_rmw = FALSE; 25493 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25494 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25495 } 25496 25497 /* Convert blkno and nblk to target blocks */ 25498 blkno = tgt_blkno; 25499 nblk = tgt_nblk; 25500 } else { 25501 wr_bp = &wr_buf; 25502 bzero(wr_bp, sizeof (struct buf)); 25503 wr_bp->b_flags = B_BUSY; 25504 wr_bp->b_un.b_addr = addr; 25505 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25506 wr_bp->b_resid = 0; 25507 } 25508 25509 mutex_exit(SD_MUTEX(un)); 25510 25511 /* 25512 * Obtain a SCSI packet for the write command. 25513 * It should be safe to call the allocator here without 25514 * worrying about being locked for DVMA mapping because 25515 * the address we're passed is already a DVMA mapping 25516 * 25517 * We are also not going to worry about semaphore ownership 25518 * in the dump buffer. Dumping is single threaded at present. 25519 */ 25520 25521 wr_pktp = NULL; 25522 25523 dma_resid = wr_bp->b_bcount; 25524 oblkno = blkno; 25525 25526 if (!(NOT_DEVBSIZE(un))) { 25527 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 25528 } 25529 25530 while (dma_resid != 0) { 25531 25532 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25533 wr_bp->b_flags &= ~B_ERROR; 25534 25535 if (un->un_partial_dma_supported == 1) { 25536 blkno = oblkno + 25537 ((wr_bp->b_bcount - dma_resid) / 25538 un->un_tgt_blocksize); 25539 nblk = dma_resid / un->un_tgt_blocksize; 25540 25541 if (wr_pktp) { 25542 /* 25543 * Partial DMA transfers after initial transfer 25544 */ 25545 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25546 blkno, nblk); 25547 } else { 25548 /* Initial transfer */ 25549 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25550 un->un_pkt_flags, NULL_FUNC, NULL, 25551 blkno, nblk); 25552 } 25553 } else { 25554 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25555 0, NULL_FUNC, NULL, blkno, nblk); 25556 } 25557 25558 if (rval == 0) { 25559 /* We were given a SCSI packet, continue. */ 25560 break; 25561 } 25562 25563 if (i == 0) { 25564 if (wr_bp->b_flags & B_ERROR) { 25565 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25566 "no resources for dumping; " 25567 "error code: 0x%x, retrying", 25568 geterror(wr_bp)); 25569 } else { 25570 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25571 "no resources for dumping; retrying"); 25572 } 25573 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25574 if (wr_bp->b_flags & B_ERROR) { 25575 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25576 "no resources for dumping; error code: " 25577 "0x%x, retrying\n", geterror(wr_bp)); 25578 } 25579 } else { 25580 if (wr_bp->b_flags & B_ERROR) { 25581 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25582 "no resources for dumping; " 25583 "error code: 0x%x, retries failed, " 25584 "giving up.\n", geterror(wr_bp)); 25585 } else { 25586 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25587 "no resources for dumping; " 25588 "retries failed, giving up.\n"); 25589 } 25590 mutex_enter(SD_MUTEX(un)); 25591 Restore_state(un); 25592 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25593 mutex_exit(SD_MUTEX(un)); 25594 scsi_free_consistent_buf(wr_bp); 25595 } else { 25596 mutex_exit(SD_MUTEX(un)); 25597 } 25598 return (EIO); 25599 } 25600 drv_usecwait(10000); 25601 } 25602 25603 if (un->un_partial_dma_supported == 1) { 25604 /* 25605 * save the resid from PARTIAL_DMA 25606 */ 25607 dma_resid = wr_pktp->pkt_resid; 25608 if (dma_resid != 0) 25609 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25610 wr_pktp->pkt_resid = 0; 25611 } else { 25612 dma_resid = 0; 25613 } 25614 25615 /* SunBug 1222170 */ 25616 wr_pktp->pkt_flags = FLAG_NOINTR; 25617 25618 err = EIO; 25619 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25620 25621 /* 25622 * Scsi_poll returns 0 (success) if the command completes and 25623 * the status block is STATUS_GOOD. We should only check 25624 * errors if this condition is not true. Even then we should 25625 * send our own request sense packet only if we have a check 25626 * condition and auto request sense has not been performed by 25627 * the hba. 25628 */ 25629 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25630 25631 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25632 (wr_pktp->pkt_resid == 0)) { 25633 err = SD_SUCCESS; 25634 break; 25635 } 25636 25637 /* 25638 * Check CMD_DEV_GONE 1st, give up if device is gone. 25639 */ 25640 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25641 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25642 "Error while dumping state...Device is gone\n"); 25643 break; 25644 } 25645 25646 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25647 SD_INFO(SD_LOG_DUMP, un, 25648 "sddump: write failed with CHECK, try # %d\n", i); 25649 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25650 (void) sd_send_polled_RQS(un); 25651 } 25652 25653 continue; 25654 } 25655 25656 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25657 int reset_retval = 0; 25658 25659 SD_INFO(SD_LOG_DUMP, un, 25660 "sddump: write failed with BUSY, try # %d\n", i); 25661 25662 if (un->un_f_lun_reset_enabled == TRUE) { 25663 reset_retval = scsi_reset(SD_ADDRESS(un), 25664 RESET_LUN); 25665 } 25666 if (reset_retval == 0) { 25667 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25668 } 25669 (void) sd_send_polled_RQS(un); 25670 25671 } else { 25672 SD_INFO(SD_LOG_DUMP, un, 25673 "sddump: write failed with 0x%x, try # %d\n", 25674 SD_GET_PKT_STATUS(wr_pktp), i); 25675 mutex_enter(SD_MUTEX(un)); 25676 sd_reset_target(un, wr_pktp); 25677 mutex_exit(SD_MUTEX(un)); 25678 } 25679 25680 /* 25681 * If we are not getting anywhere with lun/target resets, 25682 * let's reset the bus. 25683 */ 25684 if (i == SD_NDUMP_RETRIES/2) { 25685 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25686 (void) sd_send_polled_RQS(un); 25687 } 25688 } 25689 } 25690 25691 scsi_destroy_pkt(wr_pktp); 25692 mutex_enter(SD_MUTEX(un)); 25693 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25694 mutex_exit(SD_MUTEX(un)); 25695 scsi_free_consistent_buf(wr_bp); 25696 } else { 25697 mutex_exit(SD_MUTEX(un)); 25698 } 25699 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25700 return (err); 25701 } 25702 25703 /* 25704 * Function: sd_scsi_poll() 25705 * 25706 * Description: This is a wrapper for the scsi_poll call. 25707 * 25708 * Arguments: sd_lun - The unit structure 25709 * scsi_pkt - The scsi packet being sent to the device. 25710 * 25711 * Return Code: 0 - Command completed successfully with good status 25712 * -1 - Command failed. This could indicate a check condition 25713 * or other status value requiring recovery action. 25714 * 25715 * NOTE: This code is only called off sddump(). 25716 */ 25717 25718 static int 25719 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25720 { 25721 int status; 25722 25723 ASSERT(un != NULL); 25724 ASSERT(!mutex_owned(SD_MUTEX(un))); 25725 ASSERT(pktp != NULL); 25726 25727 status = SD_SUCCESS; 25728 25729 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25730 pktp->pkt_flags |= un->un_tagflags; 25731 pktp->pkt_flags &= ~FLAG_NODISCON; 25732 } 25733 25734 status = sd_ddi_scsi_poll(pktp); 25735 /* 25736 * Scsi_poll returns 0 (success) if the command completes and the 25737 * status block is STATUS_GOOD. We should only check errors if this 25738 * condition is not true. Even then we should send our own request 25739 * sense packet only if we have a check condition and auto 25740 * request sense has not been performed by the hba. 25741 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25742 */ 25743 if ((status != SD_SUCCESS) && 25744 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25745 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25746 (pktp->pkt_reason != CMD_DEV_GONE)) 25747 (void) sd_send_polled_RQS(un); 25748 25749 return (status); 25750 } 25751 25752 /* 25753 * Function: sd_send_polled_RQS() 25754 * 25755 * Description: This sends the request sense command to a device. 25756 * 25757 * Arguments: sd_lun - The unit structure 25758 * 25759 * Return Code: 0 - Command completed successfully with good status 25760 * -1 - Command failed. 25761 * 25762 */ 25763 25764 static int 25765 sd_send_polled_RQS(struct sd_lun *un) 25766 { 25767 int ret_val; 25768 struct scsi_pkt *rqs_pktp; 25769 struct buf *rqs_bp; 25770 25771 ASSERT(un != NULL); 25772 ASSERT(!mutex_owned(SD_MUTEX(un))); 25773 25774 ret_val = SD_SUCCESS; 25775 25776 rqs_pktp = un->un_rqs_pktp; 25777 rqs_bp = un->un_rqs_bp; 25778 25779 mutex_enter(SD_MUTEX(un)); 25780 25781 if (un->un_sense_isbusy) { 25782 ret_val = SD_FAILURE; 25783 mutex_exit(SD_MUTEX(un)); 25784 return (ret_val); 25785 } 25786 25787 /* 25788 * If the request sense buffer (and packet) is not in use, 25789 * let's set the un_sense_isbusy and send our packet 25790 */ 25791 un->un_sense_isbusy = 1; 25792 rqs_pktp->pkt_resid = 0; 25793 rqs_pktp->pkt_reason = 0; 25794 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25795 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25796 25797 mutex_exit(SD_MUTEX(un)); 25798 25799 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25800 " 0x%p\n", rqs_bp->b_un.b_addr); 25801 25802 /* 25803 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25804 * axle - it has a call into us! 25805 */ 25806 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25807 SD_INFO(SD_LOG_COMMON, un, 25808 "sd_send_polled_RQS: RQS failed\n"); 25809 } 25810 25811 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25812 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25813 25814 mutex_enter(SD_MUTEX(un)); 25815 un->un_sense_isbusy = 0; 25816 mutex_exit(SD_MUTEX(un)); 25817 25818 return (ret_val); 25819 } 25820 25821 /* 25822 * Defines needed for localized version of the scsi_poll routine. 25823 */ 25824 #define CSEC 10000 /* usecs */ 25825 #define SEC_TO_CSEC (1000000/CSEC) 25826 25827 /* 25828 * Function: sd_ddi_scsi_poll() 25829 * 25830 * Description: Localized version of the scsi_poll routine. The purpose is to 25831 * send a scsi_pkt to a device as a polled command. This version 25832 * is to ensure more robust handling of transport errors. 25833 * Specifically this routine cures not ready, coming ready 25834 * transition for power up and reset of sonoma's. This can take 25835 * up to 45 seconds for power-on and 20 seconds for reset of a 25836 * sonoma lun. 25837 * 25838 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25839 * 25840 * Return Code: 0 - Command completed successfully with good status 25841 * -1 - Command failed. 25842 * 25843 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25844 * be fixed (removing this code), we need to determine how to handle the 25845 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25846 * 25847 * NOTE: This code is only called off sddump(). 25848 */ 25849 static int 25850 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25851 { 25852 int rval = -1; 25853 int savef; 25854 long savet; 25855 void (*savec)(); 25856 int timeout; 25857 int busy_count; 25858 int poll_delay; 25859 int rc; 25860 uint8_t *sensep; 25861 struct scsi_arq_status *arqstat; 25862 extern int do_polled_io; 25863 25864 ASSERT(pkt->pkt_scbp); 25865 25866 /* 25867 * save old flags.. 25868 */ 25869 savef = pkt->pkt_flags; 25870 savec = pkt->pkt_comp; 25871 savet = pkt->pkt_time; 25872 25873 pkt->pkt_flags |= FLAG_NOINTR; 25874 25875 /* 25876 * XXX there is nothing in the SCSA spec that states that we should not 25877 * do a callback for polled cmds; however, removing this will break sd 25878 * and probably other target drivers 25879 */ 25880 pkt->pkt_comp = NULL; 25881 25882 /* 25883 * we don't like a polled command without timeout. 25884 * 60 seconds seems long enough. 25885 */ 25886 if (pkt->pkt_time == 0) 25887 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25888 25889 /* 25890 * Send polled cmd. 25891 * 25892 * We do some error recovery for various errors. Tran_busy, 25893 * queue full, and non-dispatched commands are retried every 10 msec. 25894 * as they are typically transient failures. Busy status and Not 25895 * Ready are retried every second as this status takes a while to 25896 * change. 25897 */ 25898 timeout = pkt->pkt_time * SEC_TO_CSEC; 25899 25900 for (busy_count = 0; busy_count < timeout; busy_count++) { 25901 /* 25902 * Initialize pkt status variables. 25903 */ 25904 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25905 25906 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25907 if (rc != TRAN_BUSY) { 25908 /* Transport failed - give up. */ 25909 break; 25910 } else { 25911 /* Transport busy - try again. */ 25912 poll_delay = 1 * CSEC; /* 10 msec. */ 25913 } 25914 } else { 25915 /* 25916 * Transport accepted - check pkt status. 25917 */ 25918 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25919 if ((pkt->pkt_reason == CMD_CMPLT) && 25920 (rc == STATUS_CHECK) && 25921 (pkt->pkt_state & STATE_ARQ_DONE)) { 25922 arqstat = 25923 (struct scsi_arq_status *)(pkt->pkt_scbp); 25924 sensep = (uint8_t *)&arqstat->sts_sensedata; 25925 } else { 25926 sensep = NULL; 25927 } 25928 25929 if ((pkt->pkt_reason == CMD_CMPLT) && 25930 (rc == STATUS_GOOD)) { 25931 /* No error - we're done */ 25932 rval = 0; 25933 break; 25934 25935 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25936 /* Lost connection - give up */ 25937 break; 25938 25939 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25940 (pkt->pkt_state == 0)) { 25941 /* Pkt not dispatched - try again. */ 25942 poll_delay = 1 * CSEC; /* 10 msec. */ 25943 25944 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25945 (rc == STATUS_QFULL)) { 25946 /* Queue full - try again. */ 25947 poll_delay = 1 * CSEC; /* 10 msec. */ 25948 25949 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25950 (rc == STATUS_BUSY)) { 25951 /* Busy - try again. */ 25952 poll_delay = 100 * CSEC; /* 1 sec. */ 25953 busy_count += (SEC_TO_CSEC - 1); 25954 25955 } else if ((sensep != NULL) && 25956 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25957 /* 25958 * Unit Attention - try again. 25959 * Pretend it took 1 sec. 25960 * NOTE: 'continue' avoids poll_delay 25961 */ 25962 busy_count += (SEC_TO_CSEC - 1); 25963 continue; 25964 25965 } else if ((sensep != NULL) && 25966 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25967 (scsi_sense_asc(sensep) == 0x04) && 25968 (scsi_sense_ascq(sensep) == 0x01)) { 25969 /* 25970 * Not ready -> ready - try again. 25971 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25972 * ...same as STATUS_BUSY 25973 */ 25974 poll_delay = 100 * CSEC; /* 1 sec. */ 25975 busy_count += (SEC_TO_CSEC - 1); 25976 25977 } else { 25978 /* BAD status - give up. */ 25979 break; 25980 } 25981 } 25982 25983 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25984 !do_polled_io) { 25985 delay(drv_usectohz(poll_delay)); 25986 } else { 25987 /* we busy wait during cpr_dump or interrupt threads */ 25988 drv_usecwait(poll_delay); 25989 } 25990 } 25991 25992 pkt->pkt_flags = savef; 25993 pkt->pkt_comp = savec; 25994 pkt->pkt_time = savet; 25995 25996 /* return on error */ 25997 if (rval) 25998 return (rval); 25999 26000 /* 26001 * This is not a performance critical code path. 26002 * 26003 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26004 * issues associated with looking at DMA memory prior to 26005 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26006 */ 26007 scsi_sync_pkt(pkt); 26008 return (0); 26009 } 26010 26011 26012 26013 /* 26014 * Function: sd_persistent_reservation_in_read_keys 26015 * 26016 * Description: This routine is the driver entry point for handling CD-ROM 26017 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26018 * by sending the SCSI-3 PRIN commands to the device. 26019 * Processes the read keys command response by copying the 26020 * reservation key information into the user provided buffer. 26021 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26022 * 26023 * Arguments: un - Pointer to soft state struct for the target. 26024 * usrp - user provided pointer to multihost Persistent In Read 26025 * Keys structure (mhioc_inkeys_t) 26026 * flag - this argument is a pass through to ddi_copyxxx() 26027 * directly from the mode argument of ioctl(). 26028 * 26029 * Return Code: 0 - Success 26030 * EACCES 26031 * ENOTSUP 26032 * errno return code from sd_send_scsi_cmd() 26033 * 26034 * Context: Can sleep. Does not return until command is completed. 26035 */ 26036 26037 static int 26038 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26039 mhioc_inkeys_t *usrp, int flag) 26040 { 26041 #ifdef _MULTI_DATAMODEL 26042 struct mhioc_key_list32 li32; 26043 #endif 26044 sd_prin_readkeys_t *in; 26045 mhioc_inkeys_t *ptr; 26046 mhioc_key_list_t li; 26047 uchar_t *data_bufp; 26048 int data_len; 26049 int rval = 0; 26050 size_t copysz; 26051 sd_ssc_t *ssc; 26052 26053 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26054 return (EINVAL); 26055 } 26056 bzero(&li, sizeof (mhioc_key_list_t)); 26057 26058 ssc = sd_ssc_init(un); 26059 26060 /* 26061 * Get the listsize from user 26062 */ 26063 #ifdef _MULTI_DATAMODEL 26064 26065 switch (ddi_model_convert_from(flag & FMODELS)) { 26066 case DDI_MODEL_ILP32: 26067 copysz = sizeof (struct mhioc_key_list32); 26068 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26069 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26070 "sd_persistent_reservation_in_read_keys: " 26071 "failed ddi_copyin: mhioc_key_list32_t\n"); 26072 rval = EFAULT; 26073 goto done; 26074 } 26075 li.listsize = li32.listsize; 26076 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26077 break; 26078 26079 case DDI_MODEL_NONE: 26080 copysz = sizeof (mhioc_key_list_t); 26081 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26082 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26083 "sd_persistent_reservation_in_read_keys: " 26084 "failed ddi_copyin: mhioc_key_list_t\n"); 26085 rval = EFAULT; 26086 goto done; 26087 } 26088 break; 26089 } 26090 26091 #else /* ! _MULTI_DATAMODEL */ 26092 copysz = sizeof (mhioc_key_list_t); 26093 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26094 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26095 "sd_persistent_reservation_in_read_keys: " 26096 "failed ddi_copyin: mhioc_key_list_t\n"); 26097 rval = EFAULT; 26098 goto done; 26099 } 26100 #endif 26101 26102 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26103 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26104 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26105 26106 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26107 data_len, data_bufp); 26108 if (rval != 0) { 26109 if (rval == EIO) 26110 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26111 else 26112 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26113 goto done; 26114 } 26115 in = (sd_prin_readkeys_t *)data_bufp; 26116 ptr->generation = BE_32(in->generation); 26117 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26118 26119 /* 26120 * Return the min(listsize, listlen) keys 26121 */ 26122 #ifdef _MULTI_DATAMODEL 26123 26124 switch (ddi_model_convert_from(flag & FMODELS)) { 26125 case DDI_MODEL_ILP32: 26126 li32.listlen = li.listlen; 26127 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26128 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26129 "sd_persistent_reservation_in_read_keys: " 26130 "failed ddi_copyout: mhioc_key_list32_t\n"); 26131 rval = EFAULT; 26132 goto done; 26133 } 26134 break; 26135 26136 case DDI_MODEL_NONE: 26137 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26138 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26139 "sd_persistent_reservation_in_read_keys: " 26140 "failed ddi_copyout: mhioc_key_list_t\n"); 26141 rval = EFAULT; 26142 goto done; 26143 } 26144 break; 26145 } 26146 26147 #else /* ! _MULTI_DATAMODEL */ 26148 26149 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26150 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26151 "sd_persistent_reservation_in_read_keys: " 26152 "failed ddi_copyout: mhioc_key_list_t\n"); 26153 rval = EFAULT; 26154 goto done; 26155 } 26156 26157 #endif /* _MULTI_DATAMODEL */ 26158 26159 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26160 li.listsize * MHIOC_RESV_KEY_SIZE); 26161 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26162 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26163 "sd_persistent_reservation_in_read_keys: " 26164 "failed ddi_copyout: keylist\n"); 26165 rval = EFAULT; 26166 } 26167 done: 26168 sd_ssc_fini(ssc); 26169 kmem_free(data_bufp, data_len); 26170 return (rval); 26171 } 26172 26173 26174 /* 26175 * Function: sd_persistent_reservation_in_read_resv 26176 * 26177 * Description: This routine is the driver entry point for handling CD-ROM 26178 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26179 * by sending the SCSI-3 PRIN commands to the device. 26180 * Process the read persistent reservations command response by 26181 * copying the reservation information into the user provided 26182 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26183 * 26184 * Arguments: un - Pointer to soft state struct for the target. 26185 * usrp - user provided pointer to multihost Persistent In Read 26186 * Keys structure (mhioc_inkeys_t) 26187 * flag - this argument is a pass through to ddi_copyxxx() 26188 * directly from the mode argument of ioctl(). 26189 * 26190 * Return Code: 0 - Success 26191 * EACCES 26192 * ENOTSUP 26193 * errno return code from sd_send_scsi_cmd() 26194 * 26195 * Context: Can sleep. Does not return until command is completed. 26196 */ 26197 26198 static int 26199 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26200 mhioc_inresvs_t *usrp, int flag) 26201 { 26202 #ifdef _MULTI_DATAMODEL 26203 struct mhioc_resv_desc_list32 resvlist32; 26204 #endif 26205 sd_prin_readresv_t *in; 26206 mhioc_inresvs_t *ptr; 26207 sd_readresv_desc_t *readresv_ptr; 26208 mhioc_resv_desc_list_t resvlist; 26209 mhioc_resv_desc_t resvdesc; 26210 uchar_t *data_bufp = NULL; 26211 int data_len; 26212 int rval = 0; 26213 int i; 26214 size_t copysz; 26215 mhioc_resv_desc_t *bufp; 26216 sd_ssc_t *ssc; 26217 26218 if ((ptr = usrp) == NULL) { 26219 return (EINVAL); 26220 } 26221 26222 ssc = sd_ssc_init(un); 26223 26224 /* 26225 * Get the listsize from user 26226 */ 26227 #ifdef _MULTI_DATAMODEL 26228 switch (ddi_model_convert_from(flag & FMODELS)) { 26229 case DDI_MODEL_ILP32: 26230 copysz = sizeof (struct mhioc_resv_desc_list32); 26231 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26232 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26233 "sd_persistent_reservation_in_read_resv: " 26234 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26235 rval = EFAULT; 26236 goto done; 26237 } 26238 resvlist.listsize = resvlist32.listsize; 26239 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26240 break; 26241 26242 case DDI_MODEL_NONE: 26243 copysz = sizeof (mhioc_resv_desc_list_t); 26244 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26245 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26246 "sd_persistent_reservation_in_read_resv: " 26247 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26248 rval = EFAULT; 26249 goto done; 26250 } 26251 break; 26252 } 26253 #else /* ! _MULTI_DATAMODEL */ 26254 copysz = sizeof (mhioc_resv_desc_list_t); 26255 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26256 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26257 "sd_persistent_reservation_in_read_resv: " 26258 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26259 rval = EFAULT; 26260 goto done; 26261 } 26262 #endif /* ! _MULTI_DATAMODEL */ 26263 26264 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26265 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26266 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26267 26268 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26269 data_len, data_bufp); 26270 if (rval != 0) { 26271 if (rval == EIO) 26272 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26273 else 26274 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26275 goto done; 26276 } 26277 in = (sd_prin_readresv_t *)data_bufp; 26278 ptr->generation = BE_32(in->generation); 26279 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26280 26281 /* 26282 * Return the min(listsize, listlen( keys 26283 */ 26284 #ifdef _MULTI_DATAMODEL 26285 26286 switch (ddi_model_convert_from(flag & FMODELS)) { 26287 case DDI_MODEL_ILP32: 26288 resvlist32.listlen = resvlist.listlen; 26289 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26290 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26291 "sd_persistent_reservation_in_read_resv: " 26292 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26293 rval = EFAULT; 26294 goto done; 26295 } 26296 break; 26297 26298 case DDI_MODEL_NONE: 26299 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26300 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26301 "sd_persistent_reservation_in_read_resv: " 26302 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26303 rval = EFAULT; 26304 goto done; 26305 } 26306 break; 26307 } 26308 26309 #else /* ! _MULTI_DATAMODEL */ 26310 26311 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26312 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26313 "sd_persistent_reservation_in_read_resv: " 26314 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26315 rval = EFAULT; 26316 goto done; 26317 } 26318 26319 #endif /* ! _MULTI_DATAMODEL */ 26320 26321 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26322 bufp = resvlist.list; 26323 copysz = sizeof (mhioc_resv_desc_t); 26324 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26325 i++, readresv_ptr++, bufp++) { 26326 26327 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26328 MHIOC_RESV_KEY_SIZE); 26329 resvdesc.type = readresv_ptr->type; 26330 resvdesc.scope = readresv_ptr->scope; 26331 resvdesc.scope_specific_addr = 26332 BE_32(readresv_ptr->scope_specific_addr); 26333 26334 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26335 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26336 "sd_persistent_reservation_in_read_resv: " 26337 "failed ddi_copyout: resvlist\n"); 26338 rval = EFAULT; 26339 goto done; 26340 } 26341 } 26342 done: 26343 sd_ssc_fini(ssc); 26344 /* only if data_bufp is allocated, we need to free it */ 26345 if (data_bufp) { 26346 kmem_free(data_bufp, data_len); 26347 } 26348 return (rval); 26349 } 26350 26351 26352 /* 26353 * Function: sr_change_blkmode() 26354 * 26355 * Description: This routine is the driver entry point for handling CD-ROM 26356 * block mode ioctl requests. Support for returning and changing 26357 * the current block size in use by the device is implemented. The 26358 * LBA size is changed via a MODE SELECT Block Descriptor. 26359 * 26360 * This routine issues a mode sense with an allocation length of 26361 * 12 bytes for the mode page header and a single block descriptor. 26362 * 26363 * Arguments: dev - the device 'dev_t' 26364 * cmd - the request type; one of CDROMGBLKMODE (get) or 26365 * CDROMSBLKMODE (set) 26366 * data - current block size or requested block size 26367 * flag - this argument is a pass through to ddi_copyxxx() directly 26368 * from the mode argument of ioctl(). 26369 * 26370 * Return Code: the code returned by sd_send_scsi_cmd() 26371 * EINVAL if invalid arguments are provided 26372 * EFAULT if ddi_copyxxx() fails 26373 * ENXIO if fail ddi_get_soft_state 26374 * EIO if invalid mode sense block descriptor length 26375 * 26376 */ 26377 26378 static int 26379 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26380 { 26381 struct sd_lun *un = NULL; 26382 struct mode_header *sense_mhp, *select_mhp; 26383 struct block_descriptor *sense_desc, *select_desc; 26384 int current_bsize; 26385 int rval = EINVAL; 26386 uchar_t *sense = NULL; 26387 uchar_t *select = NULL; 26388 sd_ssc_t *ssc; 26389 26390 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26391 26392 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26393 return (ENXIO); 26394 } 26395 26396 /* 26397 * The block length is changed via the Mode Select block descriptor, the 26398 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26399 * required as part of this routine. Therefore the mode sense allocation 26400 * length is specified to be the length of a mode page header and a 26401 * block descriptor. 26402 */ 26403 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26404 26405 ssc = sd_ssc_init(un); 26406 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26407 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26408 sd_ssc_fini(ssc); 26409 if (rval != 0) { 26410 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26411 "sr_change_blkmode: Mode Sense Failed\n"); 26412 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26413 return (rval); 26414 } 26415 26416 /* Check the block descriptor len to handle only 1 block descriptor */ 26417 sense_mhp = (struct mode_header *)sense; 26418 if ((sense_mhp->bdesc_length == 0) || 26419 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26420 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26421 "sr_change_blkmode: Mode Sense returned invalid block" 26422 " descriptor length\n"); 26423 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26424 return (EIO); 26425 } 26426 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26427 current_bsize = ((sense_desc->blksize_hi << 16) | 26428 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26429 26430 /* Process command */ 26431 switch (cmd) { 26432 case CDROMGBLKMODE: 26433 /* Return the block size obtained during the mode sense */ 26434 if (ddi_copyout(¤t_bsize, (void *)data, 26435 sizeof (int), flag) != 0) 26436 rval = EFAULT; 26437 break; 26438 case CDROMSBLKMODE: 26439 /* Validate the requested block size */ 26440 switch (data) { 26441 case CDROM_BLK_512: 26442 case CDROM_BLK_1024: 26443 case CDROM_BLK_2048: 26444 case CDROM_BLK_2056: 26445 case CDROM_BLK_2336: 26446 case CDROM_BLK_2340: 26447 case CDROM_BLK_2352: 26448 case CDROM_BLK_2368: 26449 case CDROM_BLK_2448: 26450 case CDROM_BLK_2646: 26451 case CDROM_BLK_2647: 26452 break; 26453 default: 26454 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26455 "sr_change_blkmode: " 26456 "Block Size '%ld' Not Supported\n", data); 26457 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26458 return (EINVAL); 26459 } 26460 26461 /* 26462 * The current block size matches the requested block size so 26463 * there is no need to send the mode select to change the size 26464 */ 26465 if (current_bsize == data) { 26466 break; 26467 } 26468 26469 /* Build the select data for the requested block size */ 26470 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26471 select_mhp = (struct mode_header *)select; 26472 select_desc = 26473 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26474 /* 26475 * The LBA size is changed via the block descriptor, so the 26476 * descriptor is built according to the user data 26477 */ 26478 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26479 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26480 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26481 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26482 26483 /* Send the mode select for the requested block size */ 26484 ssc = sd_ssc_init(un); 26485 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26486 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26487 SD_PATH_STANDARD); 26488 sd_ssc_fini(ssc); 26489 if (rval != 0) { 26490 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26491 "sr_change_blkmode: Mode Select Failed\n"); 26492 /* 26493 * The mode select failed for the requested block size, 26494 * so reset the data for the original block size and 26495 * send it to the target. The error is indicated by the 26496 * return value for the failed mode select. 26497 */ 26498 select_desc->blksize_hi = sense_desc->blksize_hi; 26499 select_desc->blksize_mid = sense_desc->blksize_mid; 26500 select_desc->blksize_lo = sense_desc->blksize_lo; 26501 ssc = sd_ssc_init(un); 26502 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26503 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26504 SD_PATH_STANDARD); 26505 sd_ssc_fini(ssc); 26506 } else { 26507 ASSERT(!mutex_owned(SD_MUTEX(un))); 26508 mutex_enter(SD_MUTEX(un)); 26509 sd_update_block_info(un, (uint32_t)data, 0); 26510 mutex_exit(SD_MUTEX(un)); 26511 } 26512 break; 26513 default: 26514 /* should not reach here, but check anyway */ 26515 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26516 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26517 rval = EINVAL; 26518 break; 26519 } 26520 26521 if (select) { 26522 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26523 } 26524 if (sense) { 26525 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26526 } 26527 return (rval); 26528 } 26529 26530 26531 /* 26532 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26533 * implement driver support for getting and setting the CD speed. The command 26534 * set used will be based on the device type. If the device has not been 26535 * identified as MMC the Toshiba vendor specific mode page will be used. If 26536 * the device is MMC but does not support the Real Time Streaming feature 26537 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26538 * be used to read the speed. 26539 */ 26540 26541 /* 26542 * Function: sr_change_speed() 26543 * 26544 * Description: This routine is the driver entry point for handling CD-ROM 26545 * drive speed ioctl requests for devices supporting the Toshiba 26546 * vendor specific drive speed mode page. Support for returning 26547 * and changing the current drive speed in use by the device is 26548 * implemented. 26549 * 26550 * Arguments: dev - the device 'dev_t' 26551 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26552 * CDROMSDRVSPEED (set) 26553 * data - current drive speed or requested drive speed 26554 * flag - this argument is a pass through to ddi_copyxxx() directly 26555 * from the mode argument of ioctl(). 26556 * 26557 * Return Code: the code returned by sd_send_scsi_cmd() 26558 * EINVAL if invalid arguments are provided 26559 * EFAULT if ddi_copyxxx() fails 26560 * ENXIO if fail ddi_get_soft_state 26561 * EIO if invalid mode sense block descriptor length 26562 */ 26563 26564 static int 26565 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26566 { 26567 struct sd_lun *un = NULL; 26568 struct mode_header *sense_mhp, *select_mhp; 26569 struct mode_speed *sense_page, *select_page; 26570 int current_speed; 26571 int rval = EINVAL; 26572 int bd_len; 26573 uchar_t *sense = NULL; 26574 uchar_t *select = NULL; 26575 sd_ssc_t *ssc; 26576 26577 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26578 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26579 return (ENXIO); 26580 } 26581 26582 /* 26583 * Note: The drive speed is being modified here according to a Toshiba 26584 * vendor specific mode page (0x31). 26585 */ 26586 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26587 26588 ssc = sd_ssc_init(un); 26589 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26590 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26591 SD_PATH_STANDARD); 26592 sd_ssc_fini(ssc); 26593 if (rval != 0) { 26594 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26595 "sr_change_speed: Mode Sense Failed\n"); 26596 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26597 return (rval); 26598 } 26599 sense_mhp = (struct mode_header *)sense; 26600 26601 /* Check the block descriptor len to handle only 1 block descriptor */ 26602 bd_len = sense_mhp->bdesc_length; 26603 if (bd_len > MODE_BLK_DESC_LENGTH) { 26604 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26605 "sr_change_speed: Mode Sense returned invalid block " 26606 "descriptor length\n"); 26607 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26608 return (EIO); 26609 } 26610 26611 sense_page = (struct mode_speed *) 26612 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26613 current_speed = sense_page->speed; 26614 26615 /* Process command */ 26616 switch (cmd) { 26617 case CDROMGDRVSPEED: 26618 /* Return the drive speed obtained during the mode sense */ 26619 if (current_speed == 0x2) { 26620 current_speed = CDROM_TWELVE_SPEED; 26621 } 26622 if (ddi_copyout(¤t_speed, (void *)data, 26623 sizeof (int), flag) != 0) { 26624 rval = EFAULT; 26625 } 26626 break; 26627 case CDROMSDRVSPEED: 26628 /* Validate the requested drive speed */ 26629 switch ((uchar_t)data) { 26630 case CDROM_TWELVE_SPEED: 26631 data = 0x2; 26632 /*FALLTHROUGH*/ 26633 case CDROM_NORMAL_SPEED: 26634 case CDROM_DOUBLE_SPEED: 26635 case CDROM_QUAD_SPEED: 26636 case CDROM_MAXIMUM_SPEED: 26637 break; 26638 default: 26639 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26640 "sr_change_speed: " 26641 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26642 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26643 return (EINVAL); 26644 } 26645 26646 /* 26647 * The current drive speed matches the requested drive speed so 26648 * there is no need to send the mode select to change the speed 26649 */ 26650 if (current_speed == data) { 26651 break; 26652 } 26653 26654 /* Build the select data for the requested drive speed */ 26655 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26656 select_mhp = (struct mode_header *)select; 26657 select_mhp->bdesc_length = 0; 26658 select_page = 26659 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26660 select_page = 26661 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26662 select_page->mode_page.code = CDROM_MODE_SPEED; 26663 select_page->mode_page.length = 2; 26664 select_page->speed = (uchar_t)data; 26665 26666 /* Send the mode select for the requested block size */ 26667 ssc = sd_ssc_init(un); 26668 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26669 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26670 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26671 sd_ssc_fini(ssc); 26672 if (rval != 0) { 26673 /* 26674 * The mode select failed for the requested drive speed, 26675 * so reset the data for the original drive speed and 26676 * send it to the target. The error is indicated by the 26677 * return value for the failed mode select. 26678 */ 26679 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26680 "sr_drive_speed: Mode Select Failed\n"); 26681 select_page->speed = sense_page->speed; 26682 ssc = sd_ssc_init(un); 26683 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26684 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26685 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26686 sd_ssc_fini(ssc); 26687 } 26688 break; 26689 default: 26690 /* should not reach here, but check anyway */ 26691 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26692 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26693 rval = EINVAL; 26694 break; 26695 } 26696 26697 if (select) { 26698 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26699 } 26700 if (sense) { 26701 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26702 } 26703 26704 return (rval); 26705 } 26706 26707 26708 /* 26709 * Function: sr_atapi_change_speed() 26710 * 26711 * Description: This routine is the driver entry point for handling CD-ROM 26712 * drive speed ioctl requests for MMC devices that do not support 26713 * the Real Time Streaming feature (0x107). 26714 * 26715 * Note: This routine will use the SET SPEED command which may not 26716 * be supported by all devices. 26717 * 26718 * Arguments: dev- the device 'dev_t' 26719 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26720 * CDROMSDRVSPEED (set) 26721 * data- current drive speed or requested drive speed 26722 * flag- this argument is a pass through to ddi_copyxxx() directly 26723 * from the mode argument of ioctl(). 26724 * 26725 * Return Code: the code returned by sd_send_scsi_cmd() 26726 * EINVAL if invalid arguments are provided 26727 * EFAULT if ddi_copyxxx() fails 26728 * ENXIO if fail ddi_get_soft_state 26729 * EIO if invalid mode sense block descriptor length 26730 */ 26731 26732 static int 26733 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26734 { 26735 struct sd_lun *un; 26736 struct uscsi_cmd *com = NULL; 26737 struct mode_header_grp2 *sense_mhp; 26738 uchar_t *sense_page; 26739 uchar_t *sense = NULL; 26740 char cdb[CDB_GROUP5]; 26741 int bd_len; 26742 int current_speed = 0; 26743 int max_speed = 0; 26744 int rval; 26745 sd_ssc_t *ssc; 26746 26747 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26748 26749 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26750 return (ENXIO); 26751 } 26752 26753 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26754 26755 ssc = sd_ssc_init(un); 26756 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 26757 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26758 SD_PATH_STANDARD); 26759 sd_ssc_fini(ssc); 26760 if (rval != 0) { 26761 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26762 "sr_atapi_change_speed: Mode Sense Failed\n"); 26763 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26764 return (rval); 26765 } 26766 26767 /* Check the block descriptor len to handle only 1 block descriptor */ 26768 sense_mhp = (struct mode_header_grp2 *)sense; 26769 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26770 if (bd_len > MODE_BLK_DESC_LENGTH) { 26771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26772 "sr_atapi_change_speed: Mode Sense returned invalid " 26773 "block descriptor length\n"); 26774 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26775 return (EIO); 26776 } 26777 26778 /* Calculate the current and maximum drive speeds */ 26779 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26780 current_speed = (sense_page[14] << 8) | sense_page[15]; 26781 max_speed = (sense_page[8] << 8) | sense_page[9]; 26782 26783 /* Process the command */ 26784 switch (cmd) { 26785 case CDROMGDRVSPEED: 26786 current_speed /= SD_SPEED_1X; 26787 if (ddi_copyout(¤t_speed, (void *)data, 26788 sizeof (int), flag) != 0) 26789 rval = EFAULT; 26790 break; 26791 case CDROMSDRVSPEED: 26792 /* Convert the speed code to KB/sec */ 26793 switch ((uchar_t)data) { 26794 case CDROM_NORMAL_SPEED: 26795 current_speed = SD_SPEED_1X; 26796 break; 26797 case CDROM_DOUBLE_SPEED: 26798 current_speed = 2 * SD_SPEED_1X; 26799 break; 26800 case CDROM_QUAD_SPEED: 26801 current_speed = 4 * SD_SPEED_1X; 26802 break; 26803 case CDROM_TWELVE_SPEED: 26804 current_speed = 12 * SD_SPEED_1X; 26805 break; 26806 case CDROM_MAXIMUM_SPEED: 26807 current_speed = 0xffff; 26808 break; 26809 default: 26810 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26811 "sr_atapi_change_speed: invalid drive speed %d\n", 26812 (uchar_t)data); 26813 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26814 return (EINVAL); 26815 } 26816 26817 /* Check the request against the drive's max speed. */ 26818 if (current_speed != 0xffff) { 26819 if (current_speed > max_speed) { 26820 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26821 return (EINVAL); 26822 } 26823 } 26824 26825 /* 26826 * Build and send the SET SPEED command 26827 * 26828 * Note: The SET SPEED (0xBB) command used in this routine is 26829 * obsolete per the SCSI MMC spec but still supported in the 26830 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26831 * therefore the command is still implemented in this routine. 26832 */ 26833 bzero(cdb, sizeof (cdb)); 26834 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26835 cdb[2] = (uchar_t)(current_speed >> 8); 26836 cdb[3] = (uchar_t)current_speed; 26837 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26838 com->uscsi_cdb = (caddr_t)cdb; 26839 com->uscsi_cdblen = CDB_GROUP5; 26840 com->uscsi_bufaddr = NULL; 26841 com->uscsi_buflen = 0; 26842 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26843 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26844 break; 26845 default: 26846 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26847 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26848 rval = EINVAL; 26849 } 26850 26851 if (sense) { 26852 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26853 } 26854 if (com) { 26855 kmem_free(com, sizeof (*com)); 26856 } 26857 return (rval); 26858 } 26859 26860 26861 /* 26862 * Function: sr_pause_resume() 26863 * 26864 * Description: This routine is the driver entry point for handling CD-ROM 26865 * pause/resume ioctl requests. This only affects the audio play 26866 * operation. 26867 * 26868 * Arguments: dev - the device 'dev_t' 26869 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26870 * for setting the resume bit of the cdb. 26871 * 26872 * Return Code: the code returned by sd_send_scsi_cmd() 26873 * EINVAL if invalid mode specified 26874 * 26875 */ 26876 26877 static int 26878 sr_pause_resume(dev_t dev, int cmd) 26879 { 26880 struct sd_lun *un; 26881 struct uscsi_cmd *com; 26882 char cdb[CDB_GROUP1]; 26883 int rval; 26884 26885 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26886 return (ENXIO); 26887 } 26888 26889 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26890 bzero(cdb, CDB_GROUP1); 26891 cdb[0] = SCMD_PAUSE_RESUME; 26892 switch (cmd) { 26893 case CDROMRESUME: 26894 cdb[8] = 1; 26895 break; 26896 case CDROMPAUSE: 26897 cdb[8] = 0; 26898 break; 26899 default: 26900 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26901 " Command '%x' Not Supported\n", cmd); 26902 rval = EINVAL; 26903 goto done; 26904 } 26905 26906 com->uscsi_cdb = cdb; 26907 com->uscsi_cdblen = CDB_GROUP1; 26908 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26909 26910 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26911 SD_PATH_STANDARD); 26912 26913 done: 26914 kmem_free(com, sizeof (*com)); 26915 return (rval); 26916 } 26917 26918 26919 /* 26920 * Function: sr_play_msf() 26921 * 26922 * Description: This routine is the driver entry point for handling CD-ROM 26923 * ioctl requests to output the audio signals at the specified 26924 * starting address and continue the audio play until the specified 26925 * ending address (CDROMPLAYMSF) The address is in Minute Second 26926 * Frame (MSF) format. 26927 * 26928 * Arguments: dev - the device 'dev_t' 26929 * data - pointer to user provided audio msf structure, 26930 * specifying start/end addresses. 26931 * flag - this argument is a pass through to ddi_copyxxx() 26932 * directly from the mode argument of ioctl(). 26933 * 26934 * Return Code: the code returned by sd_send_scsi_cmd() 26935 * EFAULT if ddi_copyxxx() fails 26936 * ENXIO if fail ddi_get_soft_state 26937 * EINVAL if data pointer is NULL 26938 */ 26939 26940 static int 26941 sr_play_msf(dev_t dev, caddr_t data, int flag) 26942 { 26943 struct sd_lun *un; 26944 struct uscsi_cmd *com; 26945 struct cdrom_msf msf_struct; 26946 struct cdrom_msf *msf = &msf_struct; 26947 char cdb[CDB_GROUP1]; 26948 int rval; 26949 26950 if (data == NULL) { 26951 return (EINVAL); 26952 } 26953 26954 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26955 return (ENXIO); 26956 } 26957 26958 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26959 return (EFAULT); 26960 } 26961 26962 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26963 bzero(cdb, CDB_GROUP1); 26964 cdb[0] = SCMD_PLAYAUDIO_MSF; 26965 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26966 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26967 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26968 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26969 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26970 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26971 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26972 } else { 26973 cdb[3] = msf->cdmsf_min0; 26974 cdb[4] = msf->cdmsf_sec0; 26975 cdb[5] = msf->cdmsf_frame0; 26976 cdb[6] = msf->cdmsf_min1; 26977 cdb[7] = msf->cdmsf_sec1; 26978 cdb[8] = msf->cdmsf_frame1; 26979 } 26980 com->uscsi_cdb = cdb; 26981 com->uscsi_cdblen = CDB_GROUP1; 26982 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26983 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26984 SD_PATH_STANDARD); 26985 kmem_free(com, sizeof (*com)); 26986 return (rval); 26987 } 26988 26989 26990 /* 26991 * Function: sr_play_trkind() 26992 * 26993 * Description: This routine is the driver entry point for handling CD-ROM 26994 * ioctl requests to output the audio signals at the specified 26995 * starting address and continue the audio play until the specified 26996 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26997 * format. 26998 * 26999 * Arguments: dev - the device 'dev_t' 27000 * data - pointer to user provided audio track/index structure, 27001 * specifying start/end addresses. 27002 * flag - this argument is a pass through to ddi_copyxxx() 27003 * directly from the mode argument of ioctl(). 27004 * 27005 * Return Code: the code returned by sd_send_scsi_cmd() 27006 * EFAULT if ddi_copyxxx() fails 27007 * ENXIO if fail ddi_get_soft_state 27008 * EINVAL if data pointer is NULL 27009 */ 27010 27011 static int 27012 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27013 { 27014 struct cdrom_ti ti_struct; 27015 struct cdrom_ti *ti = &ti_struct; 27016 struct uscsi_cmd *com = NULL; 27017 char cdb[CDB_GROUP1]; 27018 int rval; 27019 27020 if (data == NULL) { 27021 return (EINVAL); 27022 } 27023 27024 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27025 return (EFAULT); 27026 } 27027 27028 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27029 bzero(cdb, CDB_GROUP1); 27030 cdb[0] = SCMD_PLAYAUDIO_TI; 27031 cdb[4] = ti->cdti_trk0; 27032 cdb[5] = ti->cdti_ind0; 27033 cdb[7] = ti->cdti_trk1; 27034 cdb[8] = ti->cdti_ind1; 27035 com->uscsi_cdb = cdb; 27036 com->uscsi_cdblen = CDB_GROUP1; 27037 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27038 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27039 SD_PATH_STANDARD); 27040 kmem_free(com, sizeof (*com)); 27041 return (rval); 27042 } 27043 27044 27045 /* 27046 * Function: sr_read_all_subcodes() 27047 * 27048 * Description: This routine is the driver entry point for handling CD-ROM 27049 * ioctl requests to return raw subcode data while the target is 27050 * playing audio (CDROMSUBCODE). 27051 * 27052 * Arguments: dev - the device 'dev_t' 27053 * data - pointer to user provided cdrom subcode structure, 27054 * specifying the transfer length and address. 27055 * flag - this argument is a pass through to ddi_copyxxx() 27056 * directly from the mode argument of ioctl(). 27057 * 27058 * Return Code: the code returned by sd_send_scsi_cmd() 27059 * EFAULT if ddi_copyxxx() fails 27060 * ENXIO if fail ddi_get_soft_state 27061 * EINVAL if data pointer is NULL 27062 */ 27063 27064 static int 27065 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27066 { 27067 struct sd_lun *un = NULL; 27068 struct uscsi_cmd *com = NULL; 27069 struct cdrom_subcode *subcode = NULL; 27070 int rval; 27071 size_t buflen; 27072 char cdb[CDB_GROUP5]; 27073 27074 #ifdef _MULTI_DATAMODEL 27075 /* To support ILP32 applications in an LP64 world */ 27076 struct cdrom_subcode32 cdrom_subcode32; 27077 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27078 #endif 27079 if (data == NULL) { 27080 return (EINVAL); 27081 } 27082 27083 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27084 return (ENXIO); 27085 } 27086 27087 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27088 27089 #ifdef _MULTI_DATAMODEL 27090 switch (ddi_model_convert_from(flag & FMODELS)) { 27091 case DDI_MODEL_ILP32: 27092 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27093 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27094 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27095 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27096 return (EFAULT); 27097 } 27098 /* Convert the ILP32 uscsi data from the application to LP64 */ 27099 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27100 break; 27101 case DDI_MODEL_NONE: 27102 if (ddi_copyin(data, subcode, 27103 sizeof (struct cdrom_subcode), flag)) { 27104 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27105 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27106 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27107 return (EFAULT); 27108 } 27109 break; 27110 } 27111 #else /* ! _MULTI_DATAMODEL */ 27112 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27113 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27114 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27115 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27116 return (EFAULT); 27117 } 27118 #endif /* _MULTI_DATAMODEL */ 27119 27120 /* 27121 * Since MMC-2 expects max 3 bytes for length, check if the 27122 * length input is greater than 3 bytes 27123 */ 27124 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27125 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27126 "sr_read_all_subcodes: " 27127 "cdrom transfer length too large: %d (limit %d)\n", 27128 subcode->cdsc_length, 0xFFFFFF); 27129 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27130 return (EINVAL); 27131 } 27132 27133 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27134 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27135 bzero(cdb, CDB_GROUP5); 27136 27137 if (un->un_f_mmc_cap == TRUE) { 27138 cdb[0] = (char)SCMD_READ_CD; 27139 cdb[2] = (char)0xff; 27140 cdb[3] = (char)0xff; 27141 cdb[4] = (char)0xff; 27142 cdb[5] = (char)0xff; 27143 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27144 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27145 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27146 cdb[10] = 1; 27147 } else { 27148 /* 27149 * Note: A vendor specific command (0xDF) is being used her to 27150 * request a read of all subcodes. 27151 */ 27152 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27153 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27154 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27155 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27156 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27157 } 27158 com->uscsi_cdb = cdb; 27159 com->uscsi_cdblen = CDB_GROUP5; 27160 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27161 com->uscsi_buflen = buflen; 27162 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27163 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27164 SD_PATH_STANDARD); 27165 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27166 kmem_free(com, sizeof (*com)); 27167 return (rval); 27168 } 27169 27170 27171 /* 27172 * Function: sr_read_subchannel() 27173 * 27174 * Description: This routine is the driver entry point for handling CD-ROM 27175 * ioctl requests to return the Q sub-channel data of the CD 27176 * current position block. (CDROMSUBCHNL) The data includes the 27177 * track number, index number, absolute CD-ROM address (LBA or MSF 27178 * format per the user) , track relative CD-ROM address (LBA or MSF 27179 * format per the user), control data and audio status. 27180 * 27181 * Arguments: dev - the device 'dev_t' 27182 * data - pointer to user provided cdrom sub-channel structure 27183 * flag - this argument is a pass through to ddi_copyxxx() 27184 * directly from the mode argument of ioctl(). 27185 * 27186 * Return Code: the code returned by sd_send_scsi_cmd() 27187 * EFAULT if ddi_copyxxx() fails 27188 * ENXIO if fail ddi_get_soft_state 27189 * EINVAL if data pointer is NULL 27190 */ 27191 27192 static int 27193 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27194 { 27195 struct sd_lun *un; 27196 struct uscsi_cmd *com; 27197 struct cdrom_subchnl subchanel; 27198 struct cdrom_subchnl *subchnl = &subchanel; 27199 char cdb[CDB_GROUP1]; 27200 caddr_t buffer; 27201 int rval; 27202 27203 if (data == NULL) { 27204 return (EINVAL); 27205 } 27206 27207 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27208 (un->un_state == SD_STATE_OFFLINE)) { 27209 return (ENXIO); 27210 } 27211 27212 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27213 return (EFAULT); 27214 } 27215 27216 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27217 bzero(cdb, CDB_GROUP1); 27218 cdb[0] = SCMD_READ_SUBCHANNEL; 27219 /* Set the MSF bit based on the user requested address format */ 27220 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27221 /* 27222 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27223 * returned 27224 */ 27225 cdb[2] = 0x40; 27226 /* 27227 * Set byte 3 to specify the return data format. A value of 0x01 27228 * indicates that the CD-ROM current position should be returned. 27229 */ 27230 cdb[3] = 0x01; 27231 cdb[8] = 0x10; 27232 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27233 com->uscsi_cdb = cdb; 27234 com->uscsi_cdblen = CDB_GROUP1; 27235 com->uscsi_bufaddr = buffer; 27236 com->uscsi_buflen = 16; 27237 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27238 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27239 SD_PATH_STANDARD); 27240 if (rval != 0) { 27241 kmem_free(buffer, 16); 27242 kmem_free(com, sizeof (*com)); 27243 return (rval); 27244 } 27245 27246 /* Process the returned Q sub-channel data */ 27247 subchnl->cdsc_audiostatus = buffer[1]; 27248 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27249 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27250 subchnl->cdsc_trk = buffer[6]; 27251 subchnl->cdsc_ind = buffer[7]; 27252 if (subchnl->cdsc_format & CDROM_LBA) { 27253 subchnl->cdsc_absaddr.lba = 27254 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27255 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27256 subchnl->cdsc_reladdr.lba = 27257 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27258 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27259 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27260 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27261 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27262 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27263 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27264 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27265 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27266 } else { 27267 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27268 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27269 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27270 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27271 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27272 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27273 } 27274 kmem_free(buffer, 16); 27275 kmem_free(com, sizeof (*com)); 27276 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27277 != 0) { 27278 return (EFAULT); 27279 } 27280 return (rval); 27281 } 27282 27283 27284 /* 27285 * Function: sr_read_tocentry() 27286 * 27287 * Description: This routine is the driver entry point for handling CD-ROM 27288 * ioctl requests to read from the Table of Contents (TOC) 27289 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27290 * fields, the starting address (LBA or MSF format per the user) 27291 * and the data mode if the user specified track is a data track. 27292 * 27293 * Note: The READ HEADER (0x44) command used in this routine is 27294 * obsolete per the SCSI MMC spec but still supported in the 27295 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27296 * therefore the command is still implemented in this routine. 27297 * 27298 * Arguments: dev - the device 'dev_t' 27299 * data - pointer to user provided toc entry structure, 27300 * specifying the track # and the address format 27301 * (LBA or MSF). 27302 * flag - this argument is a pass through to ddi_copyxxx() 27303 * directly from the mode argument of ioctl(). 27304 * 27305 * Return Code: the code returned by sd_send_scsi_cmd() 27306 * EFAULT if ddi_copyxxx() fails 27307 * ENXIO if fail ddi_get_soft_state 27308 * EINVAL if data pointer is NULL 27309 */ 27310 27311 static int 27312 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27313 { 27314 struct sd_lun *un = NULL; 27315 struct uscsi_cmd *com; 27316 struct cdrom_tocentry toc_entry; 27317 struct cdrom_tocentry *entry = &toc_entry; 27318 caddr_t buffer; 27319 int rval; 27320 char cdb[CDB_GROUP1]; 27321 27322 if (data == NULL) { 27323 return (EINVAL); 27324 } 27325 27326 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27327 (un->un_state == SD_STATE_OFFLINE)) { 27328 return (ENXIO); 27329 } 27330 27331 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27332 return (EFAULT); 27333 } 27334 27335 /* Validate the requested track and address format */ 27336 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27337 return (EINVAL); 27338 } 27339 27340 if (entry->cdte_track == 0) { 27341 return (EINVAL); 27342 } 27343 27344 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27345 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27346 bzero(cdb, CDB_GROUP1); 27347 27348 cdb[0] = SCMD_READ_TOC; 27349 /* Set the MSF bit based on the user requested address format */ 27350 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27351 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27352 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27353 } else { 27354 cdb[6] = entry->cdte_track; 27355 } 27356 27357 /* 27358 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27359 * (4 byte TOC response header + 8 byte track descriptor) 27360 */ 27361 cdb[8] = 12; 27362 com->uscsi_cdb = cdb; 27363 com->uscsi_cdblen = CDB_GROUP1; 27364 com->uscsi_bufaddr = buffer; 27365 com->uscsi_buflen = 0x0C; 27366 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27367 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27368 SD_PATH_STANDARD); 27369 if (rval != 0) { 27370 kmem_free(buffer, 12); 27371 kmem_free(com, sizeof (*com)); 27372 return (rval); 27373 } 27374 27375 /* Process the toc entry */ 27376 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27377 entry->cdte_ctrl = (buffer[5] & 0x0F); 27378 if (entry->cdte_format & CDROM_LBA) { 27379 entry->cdte_addr.lba = 27380 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27381 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27382 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27383 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27384 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27385 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27386 /* 27387 * Send a READ TOC command using the LBA address format to get 27388 * the LBA for the track requested so it can be used in the 27389 * READ HEADER request 27390 * 27391 * Note: The MSF bit of the READ HEADER command specifies the 27392 * output format. The block address specified in that command 27393 * must be in LBA format. 27394 */ 27395 cdb[1] = 0; 27396 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27397 SD_PATH_STANDARD); 27398 if (rval != 0) { 27399 kmem_free(buffer, 12); 27400 kmem_free(com, sizeof (*com)); 27401 return (rval); 27402 } 27403 } else { 27404 entry->cdte_addr.msf.minute = buffer[9]; 27405 entry->cdte_addr.msf.second = buffer[10]; 27406 entry->cdte_addr.msf.frame = buffer[11]; 27407 /* 27408 * Send a READ TOC command using the LBA address format to get 27409 * the LBA for the track requested so it can be used in the 27410 * READ HEADER request 27411 * 27412 * Note: The MSF bit of the READ HEADER command specifies the 27413 * output format. The block address specified in that command 27414 * must be in LBA format. 27415 */ 27416 cdb[1] = 0; 27417 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27418 SD_PATH_STANDARD); 27419 if (rval != 0) { 27420 kmem_free(buffer, 12); 27421 kmem_free(com, sizeof (*com)); 27422 return (rval); 27423 } 27424 } 27425 27426 /* 27427 * Build and send the READ HEADER command to determine the data mode of 27428 * the user specified track. 27429 */ 27430 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27431 (entry->cdte_track != CDROM_LEADOUT)) { 27432 bzero(cdb, CDB_GROUP1); 27433 cdb[0] = SCMD_READ_HEADER; 27434 cdb[2] = buffer[8]; 27435 cdb[3] = buffer[9]; 27436 cdb[4] = buffer[10]; 27437 cdb[5] = buffer[11]; 27438 cdb[8] = 0x08; 27439 com->uscsi_buflen = 0x08; 27440 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27441 SD_PATH_STANDARD); 27442 if (rval == 0) { 27443 entry->cdte_datamode = buffer[0]; 27444 } else { 27445 /* 27446 * READ HEADER command failed, since this is 27447 * obsoleted in one spec, its better to return 27448 * -1 for an invlid track so that we can still 27449 * receive the rest of the TOC data. 27450 */ 27451 entry->cdte_datamode = (uchar_t)-1; 27452 } 27453 } else { 27454 entry->cdte_datamode = (uchar_t)-1; 27455 } 27456 27457 kmem_free(buffer, 12); 27458 kmem_free(com, sizeof (*com)); 27459 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27460 return (EFAULT); 27461 27462 return (rval); 27463 } 27464 27465 27466 /* 27467 * Function: sr_read_tochdr() 27468 * 27469 * Description: This routine is the driver entry point for handling CD-ROM 27470 * ioctl requests to read the Table of Contents (TOC) header 27471 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27472 * and ending track numbers 27473 * 27474 * Arguments: dev - the device 'dev_t' 27475 * data - pointer to user provided toc header structure, 27476 * specifying the starting and ending track numbers. 27477 * flag - this argument is a pass through to ddi_copyxxx() 27478 * directly from the mode argument of ioctl(). 27479 * 27480 * Return Code: the code returned by sd_send_scsi_cmd() 27481 * EFAULT if ddi_copyxxx() fails 27482 * ENXIO if fail ddi_get_soft_state 27483 * EINVAL if data pointer is NULL 27484 */ 27485 27486 static int 27487 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27488 { 27489 struct sd_lun *un; 27490 struct uscsi_cmd *com; 27491 struct cdrom_tochdr toc_header; 27492 struct cdrom_tochdr *hdr = &toc_header; 27493 char cdb[CDB_GROUP1]; 27494 int rval; 27495 caddr_t buffer; 27496 27497 if (data == NULL) { 27498 return (EINVAL); 27499 } 27500 27501 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27502 (un->un_state == SD_STATE_OFFLINE)) { 27503 return (ENXIO); 27504 } 27505 27506 buffer = kmem_zalloc(4, KM_SLEEP); 27507 bzero(cdb, CDB_GROUP1); 27508 cdb[0] = SCMD_READ_TOC; 27509 /* 27510 * Specifying a track number of 0x00 in the READ TOC command indicates 27511 * that the TOC header should be returned 27512 */ 27513 cdb[6] = 0x00; 27514 /* 27515 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27516 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27517 */ 27518 cdb[8] = 0x04; 27519 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27520 com->uscsi_cdb = cdb; 27521 com->uscsi_cdblen = CDB_GROUP1; 27522 com->uscsi_bufaddr = buffer; 27523 com->uscsi_buflen = 0x04; 27524 com->uscsi_timeout = 300; 27525 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27526 27527 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27528 SD_PATH_STANDARD); 27529 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27530 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27531 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27532 } else { 27533 hdr->cdth_trk0 = buffer[2]; 27534 hdr->cdth_trk1 = buffer[3]; 27535 } 27536 kmem_free(buffer, 4); 27537 kmem_free(com, sizeof (*com)); 27538 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27539 return (EFAULT); 27540 } 27541 return (rval); 27542 } 27543 27544 27545 /* 27546 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27547 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27548 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27549 * digital audio and extended architecture digital audio. These modes are 27550 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27551 * MMC specs. 27552 * 27553 * In addition to support for the various data formats these routines also 27554 * include support for devices that implement only the direct access READ 27555 * commands (0x08, 0x28), devices that implement the READ_CD commands 27556 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27557 * READ CDXA commands (0xD8, 0xDB) 27558 */ 27559 27560 /* 27561 * Function: sr_read_mode1() 27562 * 27563 * Description: This routine is the driver entry point for handling CD-ROM 27564 * ioctl read mode1 requests (CDROMREADMODE1). 27565 * 27566 * Arguments: dev - the device 'dev_t' 27567 * data - pointer to user provided cd read structure specifying 27568 * the lba buffer address and length. 27569 * flag - this argument is a pass through to ddi_copyxxx() 27570 * directly from the mode argument of ioctl(). 27571 * 27572 * Return Code: the code returned by sd_send_scsi_cmd() 27573 * EFAULT if ddi_copyxxx() fails 27574 * ENXIO if fail ddi_get_soft_state 27575 * EINVAL if data pointer is NULL 27576 */ 27577 27578 static int 27579 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27580 { 27581 struct sd_lun *un; 27582 struct cdrom_read mode1_struct; 27583 struct cdrom_read *mode1 = &mode1_struct; 27584 int rval; 27585 sd_ssc_t *ssc; 27586 27587 #ifdef _MULTI_DATAMODEL 27588 /* To support ILP32 applications in an LP64 world */ 27589 struct cdrom_read32 cdrom_read32; 27590 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27591 #endif /* _MULTI_DATAMODEL */ 27592 27593 if (data == NULL) { 27594 return (EINVAL); 27595 } 27596 27597 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27598 (un->un_state == SD_STATE_OFFLINE)) { 27599 return (ENXIO); 27600 } 27601 27602 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27603 "sd_read_mode1: entry: un:0x%p\n", un); 27604 27605 #ifdef _MULTI_DATAMODEL 27606 switch (ddi_model_convert_from(flag & FMODELS)) { 27607 case DDI_MODEL_ILP32: 27608 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27609 return (EFAULT); 27610 } 27611 /* Convert the ILP32 uscsi data from the application to LP64 */ 27612 cdrom_read32tocdrom_read(cdrd32, mode1); 27613 break; 27614 case DDI_MODEL_NONE: 27615 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27616 return (EFAULT); 27617 } 27618 } 27619 #else /* ! _MULTI_DATAMODEL */ 27620 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27621 return (EFAULT); 27622 } 27623 #endif /* _MULTI_DATAMODEL */ 27624 27625 ssc = sd_ssc_init(un); 27626 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 27627 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27628 sd_ssc_fini(ssc); 27629 27630 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27631 "sd_read_mode1: exit: un:0x%p\n", un); 27632 27633 return (rval); 27634 } 27635 27636 27637 /* 27638 * Function: sr_read_cd_mode2() 27639 * 27640 * Description: This routine is the driver entry point for handling CD-ROM 27641 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27642 * support the READ CD (0xBE) command or the 1st generation 27643 * READ CD (0xD4) command. 27644 * 27645 * Arguments: dev - the device 'dev_t' 27646 * data - pointer to user provided cd read structure specifying 27647 * the lba buffer address and length. 27648 * flag - this argument is a pass through to ddi_copyxxx() 27649 * directly from the mode argument of ioctl(). 27650 * 27651 * Return Code: the code returned by sd_send_scsi_cmd() 27652 * EFAULT if ddi_copyxxx() fails 27653 * ENXIO if fail ddi_get_soft_state 27654 * EINVAL if data pointer is NULL 27655 */ 27656 27657 static int 27658 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27659 { 27660 struct sd_lun *un; 27661 struct uscsi_cmd *com; 27662 struct cdrom_read mode2_struct; 27663 struct cdrom_read *mode2 = &mode2_struct; 27664 uchar_t cdb[CDB_GROUP5]; 27665 int nblocks; 27666 int rval; 27667 #ifdef _MULTI_DATAMODEL 27668 /* To support ILP32 applications in an LP64 world */ 27669 struct cdrom_read32 cdrom_read32; 27670 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27671 #endif /* _MULTI_DATAMODEL */ 27672 27673 if (data == NULL) { 27674 return (EINVAL); 27675 } 27676 27677 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27678 (un->un_state == SD_STATE_OFFLINE)) { 27679 return (ENXIO); 27680 } 27681 27682 #ifdef _MULTI_DATAMODEL 27683 switch (ddi_model_convert_from(flag & FMODELS)) { 27684 case DDI_MODEL_ILP32: 27685 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27686 return (EFAULT); 27687 } 27688 /* Convert the ILP32 uscsi data from the application to LP64 */ 27689 cdrom_read32tocdrom_read(cdrd32, mode2); 27690 break; 27691 case DDI_MODEL_NONE: 27692 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27693 return (EFAULT); 27694 } 27695 break; 27696 } 27697 27698 #else /* ! _MULTI_DATAMODEL */ 27699 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27700 return (EFAULT); 27701 } 27702 #endif /* _MULTI_DATAMODEL */ 27703 27704 bzero(cdb, sizeof (cdb)); 27705 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27706 /* Read command supported by 1st generation atapi drives */ 27707 cdb[0] = SCMD_READ_CDD4; 27708 } else { 27709 /* Universal CD Access Command */ 27710 cdb[0] = SCMD_READ_CD; 27711 } 27712 27713 /* 27714 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27715 */ 27716 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27717 27718 /* set the start address */ 27719 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27720 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27721 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27722 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27723 27724 /* set the transfer length */ 27725 nblocks = mode2->cdread_buflen / 2336; 27726 cdb[6] = (uchar_t)(nblocks >> 16); 27727 cdb[7] = (uchar_t)(nblocks >> 8); 27728 cdb[8] = (uchar_t)nblocks; 27729 27730 /* set the filter bits */ 27731 cdb[9] = CDROM_READ_CD_USERDATA; 27732 27733 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27734 com->uscsi_cdb = (caddr_t)cdb; 27735 com->uscsi_cdblen = sizeof (cdb); 27736 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27737 com->uscsi_buflen = mode2->cdread_buflen; 27738 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27739 27740 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27741 SD_PATH_STANDARD); 27742 kmem_free(com, sizeof (*com)); 27743 return (rval); 27744 } 27745 27746 27747 /* 27748 * Function: sr_read_mode2() 27749 * 27750 * Description: This routine is the driver entry point for handling CD-ROM 27751 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27752 * do not support the READ CD (0xBE) command. 27753 * 27754 * Arguments: dev - the device 'dev_t' 27755 * data - pointer to user provided cd read structure specifying 27756 * the lba buffer address and length. 27757 * flag - this argument is a pass through to ddi_copyxxx() 27758 * directly from the mode argument of ioctl(). 27759 * 27760 * Return Code: the code returned by sd_send_scsi_cmd() 27761 * EFAULT if ddi_copyxxx() fails 27762 * ENXIO if fail ddi_get_soft_state 27763 * EINVAL if data pointer is NULL 27764 * EIO if fail to reset block size 27765 * EAGAIN if commands are in progress in the driver 27766 */ 27767 27768 static int 27769 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27770 { 27771 struct sd_lun *un; 27772 struct cdrom_read mode2_struct; 27773 struct cdrom_read *mode2 = &mode2_struct; 27774 int rval; 27775 uint32_t restore_blksize; 27776 struct uscsi_cmd *com; 27777 uchar_t cdb[CDB_GROUP0]; 27778 int nblocks; 27779 27780 #ifdef _MULTI_DATAMODEL 27781 /* To support ILP32 applications in an LP64 world */ 27782 struct cdrom_read32 cdrom_read32; 27783 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27784 #endif /* _MULTI_DATAMODEL */ 27785 27786 if (data == NULL) { 27787 return (EINVAL); 27788 } 27789 27790 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27791 (un->un_state == SD_STATE_OFFLINE)) { 27792 return (ENXIO); 27793 } 27794 27795 /* 27796 * Because this routine will update the device and driver block size 27797 * being used we want to make sure there are no commands in progress. 27798 * If commands are in progress the user will have to try again. 27799 * 27800 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27801 * in sdioctl to protect commands from sdioctl through to the top of 27802 * sd_uscsi_strategy. See sdioctl for details. 27803 */ 27804 mutex_enter(SD_MUTEX(un)); 27805 if (un->un_ncmds_in_driver != 1) { 27806 mutex_exit(SD_MUTEX(un)); 27807 return (EAGAIN); 27808 } 27809 mutex_exit(SD_MUTEX(un)); 27810 27811 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27812 "sd_read_mode2: entry: un:0x%p\n", un); 27813 27814 #ifdef _MULTI_DATAMODEL 27815 switch (ddi_model_convert_from(flag & FMODELS)) { 27816 case DDI_MODEL_ILP32: 27817 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27818 return (EFAULT); 27819 } 27820 /* Convert the ILP32 uscsi data from the application to LP64 */ 27821 cdrom_read32tocdrom_read(cdrd32, mode2); 27822 break; 27823 case DDI_MODEL_NONE: 27824 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27825 return (EFAULT); 27826 } 27827 break; 27828 } 27829 #else /* ! _MULTI_DATAMODEL */ 27830 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27831 return (EFAULT); 27832 } 27833 #endif /* _MULTI_DATAMODEL */ 27834 27835 /* Store the current target block size for restoration later */ 27836 restore_blksize = un->un_tgt_blocksize; 27837 27838 /* Change the device and soft state target block size to 2336 */ 27839 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27840 rval = EIO; 27841 goto done; 27842 } 27843 27844 27845 bzero(cdb, sizeof (cdb)); 27846 27847 /* set READ operation */ 27848 cdb[0] = SCMD_READ; 27849 27850 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27851 mode2->cdread_lba >>= 2; 27852 27853 /* set the start address */ 27854 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27855 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27856 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27857 27858 /* set the transfer length */ 27859 nblocks = mode2->cdread_buflen / 2336; 27860 cdb[4] = (uchar_t)nblocks & 0xFF; 27861 27862 /* build command */ 27863 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27864 com->uscsi_cdb = (caddr_t)cdb; 27865 com->uscsi_cdblen = sizeof (cdb); 27866 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27867 com->uscsi_buflen = mode2->cdread_buflen; 27868 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27869 27870 /* 27871 * Issue SCSI command with user space address for read buffer. 27872 * 27873 * This sends the command through main channel in the driver. 27874 * 27875 * Since this is accessed via an IOCTL call, we go through the 27876 * standard path, so that if the device was powered down, then 27877 * it would be 'awakened' to handle the command. 27878 */ 27879 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27880 SD_PATH_STANDARD); 27881 27882 kmem_free(com, sizeof (*com)); 27883 27884 /* Restore the device and soft state target block size */ 27885 if (sr_sector_mode(dev, restore_blksize) != 0) { 27886 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27887 "can't do switch back to mode 1\n"); 27888 /* 27889 * If sd_send_scsi_READ succeeded we still need to report 27890 * an error because we failed to reset the block size 27891 */ 27892 if (rval == 0) { 27893 rval = EIO; 27894 } 27895 } 27896 27897 done: 27898 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27899 "sd_read_mode2: exit: un:0x%p\n", un); 27900 27901 return (rval); 27902 } 27903 27904 27905 /* 27906 * Function: sr_sector_mode() 27907 * 27908 * Description: This utility function is used by sr_read_mode2 to set the target 27909 * block size based on the user specified size. This is a legacy 27910 * implementation based upon a vendor specific mode page 27911 * 27912 * Arguments: dev - the device 'dev_t' 27913 * data - flag indicating if block size is being set to 2336 or 27914 * 512. 27915 * 27916 * Return Code: the code returned by sd_send_scsi_cmd() 27917 * EFAULT if ddi_copyxxx() fails 27918 * ENXIO if fail ddi_get_soft_state 27919 * EINVAL if data pointer is NULL 27920 */ 27921 27922 static int 27923 sr_sector_mode(dev_t dev, uint32_t blksize) 27924 { 27925 struct sd_lun *un; 27926 uchar_t *sense; 27927 uchar_t *select; 27928 int rval; 27929 sd_ssc_t *ssc; 27930 27931 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27932 (un->un_state == SD_STATE_OFFLINE)) { 27933 return (ENXIO); 27934 } 27935 27936 sense = kmem_zalloc(20, KM_SLEEP); 27937 27938 /* Note: This is a vendor specific mode page (0x81) */ 27939 ssc = sd_ssc_init(un); 27940 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27941 SD_PATH_STANDARD); 27942 sd_ssc_fini(ssc); 27943 if (rval != 0) { 27944 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27945 "sr_sector_mode: Mode Sense failed\n"); 27946 kmem_free(sense, 20); 27947 return (rval); 27948 } 27949 select = kmem_zalloc(20, KM_SLEEP); 27950 select[3] = 0x08; 27951 select[10] = ((blksize >> 8) & 0xff); 27952 select[11] = (blksize & 0xff); 27953 select[12] = 0x01; 27954 select[13] = 0x06; 27955 select[14] = sense[14]; 27956 select[15] = sense[15]; 27957 if (blksize == SD_MODE2_BLKSIZE) { 27958 select[14] |= 0x01; 27959 } 27960 27961 ssc = sd_ssc_init(un); 27962 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27963 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27964 sd_ssc_fini(ssc); 27965 if (rval != 0) { 27966 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27967 "sr_sector_mode: Mode Select failed\n"); 27968 } else { 27969 /* 27970 * Only update the softstate block size if we successfully 27971 * changed the device block mode. 27972 */ 27973 mutex_enter(SD_MUTEX(un)); 27974 sd_update_block_info(un, blksize, 0); 27975 mutex_exit(SD_MUTEX(un)); 27976 } 27977 kmem_free(sense, 20); 27978 kmem_free(select, 20); 27979 return (rval); 27980 } 27981 27982 27983 /* 27984 * Function: sr_read_cdda() 27985 * 27986 * Description: This routine is the driver entry point for handling CD-ROM 27987 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27988 * the target supports CDDA these requests are handled via a vendor 27989 * specific command (0xD8) If the target does not support CDDA 27990 * these requests are handled via the READ CD command (0xBE). 27991 * 27992 * Arguments: dev - the device 'dev_t' 27993 * data - pointer to user provided CD-DA structure specifying 27994 * the track starting address, transfer length, and 27995 * subcode options. 27996 * flag - this argument is a pass through to ddi_copyxxx() 27997 * directly from the mode argument of ioctl(). 27998 * 27999 * Return Code: the code returned by sd_send_scsi_cmd() 28000 * EFAULT if ddi_copyxxx() fails 28001 * ENXIO if fail ddi_get_soft_state 28002 * EINVAL if invalid arguments are provided 28003 * ENOTTY 28004 */ 28005 28006 static int 28007 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28008 { 28009 struct sd_lun *un; 28010 struct uscsi_cmd *com; 28011 struct cdrom_cdda *cdda; 28012 int rval; 28013 size_t buflen; 28014 char cdb[CDB_GROUP5]; 28015 28016 #ifdef _MULTI_DATAMODEL 28017 /* To support ILP32 applications in an LP64 world */ 28018 struct cdrom_cdda32 cdrom_cdda32; 28019 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28020 #endif /* _MULTI_DATAMODEL */ 28021 28022 if (data == NULL) { 28023 return (EINVAL); 28024 } 28025 28026 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28027 return (ENXIO); 28028 } 28029 28030 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28031 28032 #ifdef _MULTI_DATAMODEL 28033 switch (ddi_model_convert_from(flag & FMODELS)) { 28034 case DDI_MODEL_ILP32: 28035 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28036 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28037 "sr_read_cdda: ddi_copyin Failed\n"); 28038 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28039 return (EFAULT); 28040 } 28041 /* Convert the ILP32 uscsi data from the application to LP64 */ 28042 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28043 break; 28044 case DDI_MODEL_NONE: 28045 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28046 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28047 "sr_read_cdda: ddi_copyin Failed\n"); 28048 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28049 return (EFAULT); 28050 } 28051 break; 28052 } 28053 #else /* ! _MULTI_DATAMODEL */ 28054 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28055 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28056 "sr_read_cdda: ddi_copyin Failed\n"); 28057 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28058 return (EFAULT); 28059 } 28060 #endif /* _MULTI_DATAMODEL */ 28061 28062 /* 28063 * Since MMC-2 expects max 3 bytes for length, check if the 28064 * length input is greater than 3 bytes 28065 */ 28066 if ((cdda->cdda_length & 0xFF000000) != 0) { 28067 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28068 "cdrom transfer length too large: %d (limit %d)\n", 28069 cdda->cdda_length, 0xFFFFFF); 28070 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28071 return (EINVAL); 28072 } 28073 28074 switch (cdda->cdda_subcode) { 28075 case CDROM_DA_NO_SUBCODE: 28076 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28077 break; 28078 case CDROM_DA_SUBQ: 28079 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28080 break; 28081 case CDROM_DA_ALL_SUBCODE: 28082 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28083 break; 28084 case CDROM_DA_SUBCODE_ONLY: 28085 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28086 break; 28087 default: 28088 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28089 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28090 cdda->cdda_subcode); 28091 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28092 return (EINVAL); 28093 } 28094 28095 /* Build and send the command */ 28096 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28097 bzero(cdb, CDB_GROUP5); 28098 28099 if (un->un_f_cfg_cdda == TRUE) { 28100 cdb[0] = (char)SCMD_READ_CD; 28101 cdb[1] = 0x04; 28102 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28103 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28104 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28105 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28106 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28107 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28108 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28109 cdb[9] = 0x10; 28110 switch (cdda->cdda_subcode) { 28111 case CDROM_DA_NO_SUBCODE : 28112 cdb[10] = 0x0; 28113 break; 28114 case CDROM_DA_SUBQ : 28115 cdb[10] = 0x2; 28116 break; 28117 case CDROM_DA_ALL_SUBCODE : 28118 cdb[10] = 0x1; 28119 break; 28120 case CDROM_DA_SUBCODE_ONLY : 28121 /* FALLTHROUGH */ 28122 default : 28123 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28124 kmem_free(com, sizeof (*com)); 28125 return (ENOTTY); 28126 } 28127 } else { 28128 cdb[0] = (char)SCMD_READ_CDDA; 28129 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28130 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28131 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28132 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28133 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28134 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28135 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28136 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28137 cdb[10] = cdda->cdda_subcode; 28138 } 28139 28140 com->uscsi_cdb = cdb; 28141 com->uscsi_cdblen = CDB_GROUP5; 28142 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28143 com->uscsi_buflen = buflen; 28144 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28145 28146 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28147 SD_PATH_STANDARD); 28148 28149 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28150 kmem_free(com, sizeof (*com)); 28151 return (rval); 28152 } 28153 28154 28155 /* 28156 * Function: sr_read_cdxa() 28157 * 28158 * Description: This routine is the driver entry point for handling CD-ROM 28159 * ioctl requests to return CD-XA (Extended Architecture) data. 28160 * (CDROMCDXA). 28161 * 28162 * Arguments: dev - the device 'dev_t' 28163 * data - pointer to user provided CD-XA structure specifying 28164 * the data starting address, transfer length, and format 28165 * flag - this argument is a pass through to ddi_copyxxx() 28166 * directly from the mode argument of ioctl(). 28167 * 28168 * Return Code: the code returned by sd_send_scsi_cmd() 28169 * EFAULT if ddi_copyxxx() fails 28170 * ENXIO if fail ddi_get_soft_state 28171 * EINVAL if data pointer is NULL 28172 */ 28173 28174 static int 28175 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28176 { 28177 struct sd_lun *un; 28178 struct uscsi_cmd *com; 28179 struct cdrom_cdxa *cdxa; 28180 int rval; 28181 size_t buflen; 28182 char cdb[CDB_GROUP5]; 28183 uchar_t read_flags; 28184 28185 #ifdef _MULTI_DATAMODEL 28186 /* To support ILP32 applications in an LP64 world */ 28187 struct cdrom_cdxa32 cdrom_cdxa32; 28188 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28189 #endif /* _MULTI_DATAMODEL */ 28190 28191 if (data == NULL) { 28192 return (EINVAL); 28193 } 28194 28195 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28196 return (ENXIO); 28197 } 28198 28199 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28200 28201 #ifdef _MULTI_DATAMODEL 28202 switch (ddi_model_convert_from(flag & FMODELS)) { 28203 case DDI_MODEL_ILP32: 28204 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28205 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28206 return (EFAULT); 28207 } 28208 /* 28209 * Convert the ILP32 uscsi data from the 28210 * application to LP64 for internal use. 28211 */ 28212 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28213 break; 28214 case DDI_MODEL_NONE: 28215 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28216 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28217 return (EFAULT); 28218 } 28219 break; 28220 } 28221 #else /* ! _MULTI_DATAMODEL */ 28222 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28223 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28224 return (EFAULT); 28225 } 28226 #endif /* _MULTI_DATAMODEL */ 28227 28228 /* 28229 * Since MMC-2 expects max 3 bytes for length, check if the 28230 * length input is greater than 3 bytes 28231 */ 28232 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28233 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28234 "cdrom transfer length too large: %d (limit %d)\n", 28235 cdxa->cdxa_length, 0xFFFFFF); 28236 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28237 return (EINVAL); 28238 } 28239 28240 switch (cdxa->cdxa_format) { 28241 case CDROM_XA_DATA: 28242 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28243 read_flags = 0x10; 28244 break; 28245 case CDROM_XA_SECTOR_DATA: 28246 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28247 read_flags = 0xf8; 28248 break; 28249 case CDROM_XA_DATA_W_ERROR: 28250 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28251 read_flags = 0xfc; 28252 break; 28253 default: 28254 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28255 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28256 cdxa->cdxa_format); 28257 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28258 return (EINVAL); 28259 } 28260 28261 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28262 bzero(cdb, CDB_GROUP5); 28263 if (un->un_f_mmc_cap == TRUE) { 28264 cdb[0] = (char)SCMD_READ_CD; 28265 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28266 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28267 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28268 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28269 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28270 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28271 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28272 cdb[9] = (char)read_flags; 28273 } else { 28274 /* 28275 * Note: A vendor specific command (0xDB) is being used her to 28276 * request a read of all subcodes. 28277 */ 28278 cdb[0] = (char)SCMD_READ_CDXA; 28279 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28280 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28281 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28282 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28283 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28284 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28285 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28286 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28287 cdb[10] = cdxa->cdxa_format; 28288 } 28289 com->uscsi_cdb = cdb; 28290 com->uscsi_cdblen = CDB_GROUP5; 28291 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28292 com->uscsi_buflen = buflen; 28293 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28294 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28295 SD_PATH_STANDARD); 28296 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28297 kmem_free(com, sizeof (*com)); 28298 return (rval); 28299 } 28300 28301 28302 /* 28303 * Function: sr_eject() 28304 * 28305 * Description: This routine is the driver entry point for handling CD-ROM 28306 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28307 * 28308 * Arguments: dev - the device 'dev_t' 28309 * 28310 * Return Code: the code returned by sd_send_scsi_cmd() 28311 */ 28312 28313 static int 28314 sr_eject(dev_t dev) 28315 { 28316 struct sd_lun *un; 28317 int rval; 28318 sd_ssc_t *ssc; 28319 28320 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28321 (un->un_state == SD_STATE_OFFLINE)) { 28322 return (ENXIO); 28323 } 28324 28325 /* 28326 * To prevent race conditions with the eject 28327 * command, keep track of an eject command as 28328 * it progresses. If we are already handling 28329 * an eject command in the driver for the given 28330 * unit and another request to eject is received 28331 * immediately return EAGAIN so we don't lose 28332 * the command if the current eject command fails. 28333 */ 28334 mutex_enter(SD_MUTEX(un)); 28335 if (un->un_f_ejecting == TRUE) { 28336 mutex_exit(SD_MUTEX(un)); 28337 return (EAGAIN); 28338 } 28339 un->un_f_ejecting = TRUE; 28340 mutex_exit(SD_MUTEX(un)); 28341 28342 ssc = sd_ssc_init(un); 28343 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28344 SD_PATH_STANDARD); 28345 sd_ssc_fini(ssc); 28346 28347 if (rval != 0) { 28348 mutex_enter(SD_MUTEX(un)); 28349 un->un_f_ejecting = FALSE; 28350 mutex_exit(SD_MUTEX(un)); 28351 return (rval); 28352 } 28353 28354 ssc = sd_ssc_init(un); 28355 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 28356 SD_PATH_STANDARD); 28357 sd_ssc_fini(ssc); 28358 28359 if (rval == 0) { 28360 mutex_enter(SD_MUTEX(un)); 28361 sr_ejected(un); 28362 un->un_mediastate = DKIO_EJECTED; 28363 un->un_f_ejecting = FALSE; 28364 cv_broadcast(&un->un_state_cv); 28365 mutex_exit(SD_MUTEX(un)); 28366 } else { 28367 mutex_enter(SD_MUTEX(un)); 28368 un->un_f_ejecting = FALSE; 28369 mutex_exit(SD_MUTEX(un)); 28370 } 28371 return (rval); 28372 } 28373 28374 28375 /* 28376 * Function: sr_ejected() 28377 * 28378 * Description: This routine updates the soft state structure to invalidate the 28379 * geometry information after the media has been ejected or a 28380 * media eject has been detected. 28381 * 28382 * Arguments: un - driver soft state (unit) structure 28383 */ 28384 28385 static void 28386 sr_ejected(struct sd_lun *un) 28387 { 28388 struct sd_errstats *stp; 28389 28390 ASSERT(un != NULL); 28391 ASSERT(mutex_owned(SD_MUTEX(un))); 28392 28393 un->un_f_blockcount_is_valid = FALSE; 28394 un->un_f_tgt_blocksize_is_valid = FALSE; 28395 mutex_exit(SD_MUTEX(un)); 28396 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28397 mutex_enter(SD_MUTEX(un)); 28398 28399 if (un->un_errstats != NULL) { 28400 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28401 stp->sd_capacity.value.ui64 = 0; 28402 } 28403 } 28404 28405 28406 /* 28407 * Function: sr_check_wp() 28408 * 28409 * Description: This routine checks the write protection of a removable 28410 * media disk and hotpluggable devices via the write protect bit of 28411 * the Mode Page Header device specific field. Some devices choke 28412 * on unsupported mode page. In order to workaround this issue, 28413 * this routine has been implemented to use 0x3f mode page(request 28414 * for all pages) for all device types. 28415 * 28416 * Arguments: dev - the device 'dev_t' 28417 * 28418 * Return Code: int indicating if the device is write protected (1) or not (0) 28419 * 28420 * Context: Kernel thread. 28421 * 28422 */ 28423 28424 static int 28425 sr_check_wp(dev_t dev) 28426 { 28427 struct sd_lun *un; 28428 uchar_t device_specific; 28429 uchar_t *sense; 28430 int hdrlen; 28431 int rval = FALSE; 28432 int status; 28433 sd_ssc_t *ssc; 28434 28435 /* 28436 * Note: The return codes for this routine should be reworked to 28437 * properly handle the case of a NULL softstate. 28438 */ 28439 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28440 return (FALSE); 28441 } 28442 28443 if (un->un_f_cfg_is_atapi == TRUE) { 28444 /* 28445 * The mode page contents are not required; set the allocation 28446 * length for the mode page header only 28447 */ 28448 hdrlen = MODE_HEADER_LENGTH_GRP2; 28449 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28450 ssc = sd_ssc_init(un); 28451 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28452 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28453 sd_ssc_fini(ssc); 28454 if (status != 0) 28455 goto err_exit; 28456 device_specific = 28457 ((struct mode_header_grp2 *)sense)->device_specific; 28458 } else { 28459 hdrlen = MODE_HEADER_LENGTH; 28460 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28461 ssc = sd_ssc_init(un); 28462 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28463 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28464 sd_ssc_fini(ssc); 28465 if (status != 0) 28466 goto err_exit; 28467 device_specific = 28468 ((struct mode_header *)sense)->device_specific; 28469 } 28470 28471 28472 /* 28473 * Write protect mode sense failed; not all disks 28474 * understand this query. Return FALSE assuming that 28475 * these devices are not writable. 28476 */ 28477 if (device_specific & WRITE_PROTECT) { 28478 rval = TRUE; 28479 } 28480 28481 err_exit: 28482 kmem_free(sense, hdrlen); 28483 return (rval); 28484 } 28485 28486 /* 28487 * Function: sr_volume_ctrl() 28488 * 28489 * Description: This routine is the driver entry point for handling CD-ROM 28490 * audio output volume ioctl requests. (CDROMVOLCTRL) 28491 * 28492 * Arguments: dev - the device 'dev_t' 28493 * data - pointer to user audio volume control structure 28494 * flag - this argument is a pass through to ddi_copyxxx() 28495 * directly from the mode argument of ioctl(). 28496 * 28497 * Return Code: the code returned by sd_send_scsi_cmd() 28498 * EFAULT if ddi_copyxxx() fails 28499 * ENXIO if fail ddi_get_soft_state 28500 * EINVAL if data pointer is NULL 28501 * 28502 */ 28503 28504 static int 28505 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28506 { 28507 struct sd_lun *un; 28508 struct cdrom_volctrl volume; 28509 struct cdrom_volctrl *vol = &volume; 28510 uchar_t *sense_page; 28511 uchar_t *select_page; 28512 uchar_t *sense; 28513 uchar_t *select; 28514 int sense_buflen; 28515 int select_buflen; 28516 int rval; 28517 sd_ssc_t *ssc; 28518 28519 if (data == NULL) { 28520 return (EINVAL); 28521 } 28522 28523 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28524 (un->un_state == SD_STATE_OFFLINE)) { 28525 return (ENXIO); 28526 } 28527 28528 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28529 return (EFAULT); 28530 } 28531 28532 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28533 struct mode_header_grp2 *sense_mhp; 28534 struct mode_header_grp2 *select_mhp; 28535 int bd_len; 28536 28537 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28538 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28539 MODEPAGE_AUDIO_CTRL_LEN; 28540 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28541 select = kmem_zalloc(select_buflen, KM_SLEEP); 28542 ssc = sd_ssc_init(un); 28543 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 28544 sense_buflen, MODEPAGE_AUDIO_CTRL, 28545 SD_PATH_STANDARD); 28546 sd_ssc_fini(ssc); 28547 28548 if (rval != 0) { 28549 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28550 "sr_volume_ctrl: Mode Sense Failed\n"); 28551 kmem_free(sense, sense_buflen); 28552 kmem_free(select, select_buflen); 28553 return (rval); 28554 } 28555 sense_mhp = (struct mode_header_grp2 *)sense; 28556 select_mhp = (struct mode_header_grp2 *)select; 28557 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28558 sense_mhp->bdesc_length_lo; 28559 if (bd_len > MODE_BLK_DESC_LENGTH) { 28560 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28561 "sr_volume_ctrl: Mode Sense returned invalid " 28562 "block descriptor length\n"); 28563 kmem_free(sense, sense_buflen); 28564 kmem_free(select, select_buflen); 28565 return (EIO); 28566 } 28567 sense_page = (uchar_t *) 28568 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28569 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28570 select_mhp->length_msb = 0; 28571 select_mhp->length_lsb = 0; 28572 select_mhp->bdesc_length_hi = 0; 28573 select_mhp->bdesc_length_lo = 0; 28574 } else { 28575 struct mode_header *sense_mhp, *select_mhp; 28576 28577 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28578 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28579 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28580 select = kmem_zalloc(select_buflen, KM_SLEEP); 28581 ssc = sd_ssc_init(un); 28582 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 28583 sense_buflen, MODEPAGE_AUDIO_CTRL, 28584 SD_PATH_STANDARD); 28585 sd_ssc_fini(ssc); 28586 28587 if (rval != 0) { 28588 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28589 "sr_volume_ctrl: Mode Sense Failed\n"); 28590 kmem_free(sense, sense_buflen); 28591 kmem_free(select, select_buflen); 28592 return (rval); 28593 } 28594 sense_mhp = (struct mode_header *)sense; 28595 select_mhp = (struct mode_header *)select; 28596 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28598 "sr_volume_ctrl: Mode Sense returned invalid " 28599 "block descriptor length\n"); 28600 kmem_free(sense, sense_buflen); 28601 kmem_free(select, select_buflen); 28602 return (EIO); 28603 } 28604 sense_page = (uchar_t *) 28605 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28606 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28607 select_mhp->length = 0; 28608 select_mhp->bdesc_length = 0; 28609 } 28610 /* 28611 * Note: An audio control data structure could be created and overlayed 28612 * on the following in place of the array indexing method implemented. 28613 */ 28614 28615 /* Build the select data for the user volume data */ 28616 select_page[0] = MODEPAGE_AUDIO_CTRL; 28617 select_page[1] = 0xE; 28618 /* Set the immediate bit */ 28619 select_page[2] = 0x04; 28620 /* Zero out reserved fields */ 28621 select_page[3] = 0x00; 28622 select_page[4] = 0x00; 28623 /* Return sense data for fields not to be modified */ 28624 select_page[5] = sense_page[5]; 28625 select_page[6] = sense_page[6]; 28626 select_page[7] = sense_page[7]; 28627 /* Set the user specified volume levels for channel 0 and 1 */ 28628 select_page[8] = 0x01; 28629 select_page[9] = vol->channel0; 28630 select_page[10] = 0x02; 28631 select_page[11] = vol->channel1; 28632 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28633 select_page[12] = sense_page[12]; 28634 select_page[13] = sense_page[13]; 28635 select_page[14] = sense_page[14]; 28636 select_page[15] = sense_page[15]; 28637 28638 ssc = sd_ssc_init(un); 28639 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28640 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 28641 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28642 } else { 28643 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 28644 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28645 } 28646 sd_ssc_fini(ssc); 28647 28648 kmem_free(sense, sense_buflen); 28649 kmem_free(select, select_buflen); 28650 return (rval); 28651 } 28652 28653 28654 /* 28655 * Function: sr_read_sony_session_offset() 28656 * 28657 * Description: This routine is the driver entry point for handling CD-ROM 28658 * ioctl requests for session offset information. (CDROMREADOFFSET) 28659 * The address of the first track in the last session of a 28660 * multi-session CD-ROM is returned 28661 * 28662 * Note: This routine uses a vendor specific key value in the 28663 * command control field without implementing any vendor check here 28664 * or in the ioctl routine. 28665 * 28666 * Arguments: dev - the device 'dev_t' 28667 * data - pointer to an int to hold the requested address 28668 * flag - this argument is a pass through to ddi_copyxxx() 28669 * directly from the mode argument of ioctl(). 28670 * 28671 * Return Code: the code returned by sd_send_scsi_cmd() 28672 * EFAULT if ddi_copyxxx() fails 28673 * ENXIO if fail ddi_get_soft_state 28674 * EINVAL if data pointer is NULL 28675 */ 28676 28677 static int 28678 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28679 { 28680 struct sd_lun *un; 28681 struct uscsi_cmd *com; 28682 caddr_t buffer; 28683 char cdb[CDB_GROUP1]; 28684 int session_offset = 0; 28685 int rval; 28686 28687 if (data == NULL) { 28688 return (EINVAL); 28689 } 28690 28691 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28692 (un->un_state == SD_STATE_OFFLINE)) { 28693 return (ENXIO); 28694 } 28695 28696 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28697 bzero(cdb, CDB_GROUP1); 28698 cdb[0] = SCMD_READ_TOC; 28699 /* 28700 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28701 * (4 byte TOC response header + 8 byte response data) 28702 */ 28703 cdb[8] = SONY_SESSION_OFFSET_LEN; 28704 /* Byte 9 is the control byte. A vendor specific value is used */ 28705 cdb[9] = SONY_SESSION_OFFSET_KEY; 28706 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28707 com->uscsi_cdb = cdb; 28708 com->uscsi_cdblen = CDB_GROUP1; 28709 com->uscsi_bufaddr = buffer; 28710 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28711 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28712 28713 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28714 SD_PATH_STANDARD); 28715 if (rval != 0) { 28716 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28717 kmem_free(com, sizeof (*com)); 28718 return (rval); 28719 } 28720 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28721 session_offset = 28722 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28723 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28724 /* 28725 * Offset returned offset in current lbasize block's. Convert to 28726 * 2k block's to return to the user 28727 */ 28728 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28729 session_offset >>= 2; 28730 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28731 session_offset >>= 1; 28732 } 28733 } 28734 28735 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28736 rval = EFAULT; 28737 } 28738 28739 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28740 kmem_free(com, sizeof (*com)); 28741 return (rval); 28742 } 28743 28744 28745 /* 28746 * Function: sd_wm_cache_constructor() 28747 * 28748 * Description: Cache Constructor for the wmap cache for the read/modify/write 28749 * devices. 28750 * 28751 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28752 * un - sd_lun structure for the device. 28753 * flag - the km flags passed to constructor 28754 * 28755 * Return Code: 0 on success. 28756 * -1 on failure. 28757 */ 28758 28759 /*ARGSUSED*/ 28760 static int 28761 sd_wm_cache_constructor(void *wm, void *un, int flags) 28762 { 28763 bzero(wm, sizeof (struct sd_w_map)); 28764 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28765 return (0); 28766 } 28767 28768 28769 /* 28770 * Function: sd_wm_cache_destructor() 28771 * 28772 * Description: Cache destructor for the wmap cache for the read/modify/write 28773 * devices. 28774 * 28775 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28776 * un - sd_lun structure for the device. 28777 */ 28778 /*ARGSUSED*/ 28779 static void 28780 sd_wm_cache_destructor(void *wm, void *un) 28781 { 28782 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28783 } 28784 28785 28786 /* 28787 * Function: sd_range_lock() 28788 * 28789 * Description: Lock the range of blocks specified as parameter to ensure 28790 * that read, modify write is atomic and no other i/o writes 28791 * to the same location. The range is specified in terms 28792 * of start and end blocks. Block numbers are the actual 28793 * media block numbers and not system. 28794 * 28795 * Arguments: un - sd_lun structure for the device. 28796 * startb - The starting block number 28797 * endb - The end block number 28798 * typ - type of i/o - simple/read_modify_write 28799 * 28800 * Return Code: wm - pointer to the wmap structure. 28801 * 28802 * Context: This routine can sleep. 28803 */ 28804 28805 static struct sd_w_map * 28806 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28807 { 28808 struct sd_w_map *wmp = NULL; 28809 struct sd_w_map *sl_wmp = NULL; 28810 struct sd_w_map *tmp_wmp; 28811 wm_state state = SD_WM_CHK_LIST; 28812 28813 28814 ASSERT(un != NULL); 28815 ASSERT(!mutex_owned(SD_MUTEX(un))); 28816 28817 mutex_enter(SD_MUTEX(un)); 28818 28819 while (state != SD_WM_DONE) { 28820 28821 switch (state) { 28822 case SD_WM_CHK_LIST: 28823 /* 28824 * This is the starting state. Check the wmap list 28825 * to see if the range is currently available. 28826 */ 28827 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28828 /* 28829 * If this is a simple write and no rmw 28830 * i/o is pending then try to lock the 28831 * range as the range should be available. 28832 */ 28833 state = SD_WM_LOCK_RANGE; 28834 } else { 28835 tmp_wmp = sd_get_range(un, startb, endb); 28836 if (tmp_wmp != NULL) { 28837 if ((wmp != NULL) && ONLIST(un, wmp)) { 28838 /* 28839 * Should not keep onlist wmps 28840 * while waiting this macro 28841 * will also do wmp = NULL; 28842 */ 28843 FREE_ONLIST_WMAP(un, wmp); 28844 } 28845 /* 28846 * sl_wmp is the wmap on which wait 28847 * is done, since the tmp_wmp points 28848 * to the inuse wmap, set sl_wmp to 28849 * tmp_wmp and change the state to sleep 28850 */ 28851 sl_wmp = tmp_wmp; 28852 state = SD_WM_WAIT_MAP; 28853 } else { 28854 state = SD_WM_LOCK_RANGE; 28855 } 28856 28857 } 28858 break; 28859 28860 case SD_WM_LOCK_RANGE: 28861 ASSERT(un->un_wm_cache); 28862 /* 28863 * The range need to be locked, try to get a wmap. 28864 * First attempt it with NO_SLEEP, want to avoid a sleep 28865 * if possible as we will have to release the sd mutex 28866 * if we have to sleep. 28867 */ 28868 if (wmp == NULL) 28869 wmp = kmem_cache_alloc(un->un_wm_cache, 28870 KM_NOSLEEP); 28871 if (wmp == NULL) { 28872 mutex_exit(SD_MUTEX(un)); 28873 _NOTE(DATA_READABLE_WITHOUT_LOCK 28874 (sd_lun::un_wm_cache)) 28875 wmp = kmem_cache_alloc(un->un_wm_cache, 28876 KM_SLEEP); 28877 mutex_enter(SD_MUTEX(un)); 28878 /* 28879 * we released the mutex so recheck and go to 28880 * check list state. 28881 */ 28882 state = SD_WM_CHK_LIST; 28883 } else { 28884 /* 28885 * We exit out of state machine since we 28886 * have the wmap. Do the housekeeping first. 28887 * place the wmap on the wmap list if it is not 28888 * on it already and then set the state to done. 28889 */ 28890 wmp->wm_start = startb; 28891 wmp->wm_end = endb; 28892 wmp->wm_flags = typ | SD_WM_BUSY; 28893 if (typ & SD_WTYPE_RMW) { 28894 un->un_rmw_count++; 28895 } 28896 /* 28897 * If not already on the list then link 28898 */ 28899 if (!ONLIST(un, wmp)) { 28900 wmp->wm_next = un->un_wm; 28901 wmp->wm_prev = NULL; 28902 if (wmp->wm_next) 28903 wmp->wm_next->wm_prev = wmp; 28904 un->un_wm = wmp; 28905 } 28906 state = SD_WM_DONE; 28907 } 28908 break; 28909 28910 case SD_WM_WAIT_MAP: 28911 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28912 /* 28913 * Wait is done on sl_wmp, which is set in the 28914 * check_list state. 28915 */ 28916 sl_wmp->wm_wanted_count++; 28917 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28918 sl_wmp->wm_wanted_count--; 28919 /* 28920 * We can reuse the memory from the completed sl_wmp 28921 * lock range for our new lock, but only if noone is 28922 * waiting for it. 28923 */ 28924 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28925 if (sl_wmp->wm_wanted_count == 0) { 28926 if (wmp != NULL) 28927 CHK_N_FREEWMP(un, wmp); 28928 wmp = sl_wmp; 28929 } 28930 sl_wmp = NULL; 28931 /* 28932 * After waking up, need to recheck for availability of 28933 * range. 28934 */ 28935 state = SD_WM_CHK_LIST; 28936 break; 28937 28938 default: 28939 panic("sd_range_lock: " 28940 "Unknown state %d in sd_range_lock", state); 28941 /*NOTREACHED*/ 28942 } /* switch(state) */ 28943 28944 } /* while(state != SD_WM_DONE) */ 28945 28946 mutex_exit(SD_MUTEX(un)); 28947 28948 ASSERT(wmp != NULL); 28949 28950 return (wmp); 28951 } 28952 28953 28954 /* 28955 * Function: sd_get_range() 28956 * 28957 * Description: Find if there any overlapping I/O to this one 28958 * Returns the write-map of 1st such I/O, NULL otherwise. 28959 * 28960 * Arguments: un - sd_lun structure for the device. 28961 * startb - The starting block number 28962 * endb - The end block number 28963 * 28964 * Return Code: wm - pointer to the wmap structure. 28965 */ 28966 28967 static struct sd_w_map * 28968 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28969 { 28970 struct sd_w_map *wmp; 28971 28972 ASSERT(un != NULL); 28973 28974 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28975 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28976 continue; 28977 } 28978 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28979 break; 28980 } 28981 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28982 break; 28983 } 28984 } 28985 28986 return (wmp); 28987 } 28988 28989 28990 /* 28991 * Function: sd_free_inlist_wmap() 28992 * 28993 * Description: Unlink and free a write map struct. 28994 * 28995 * Arguments: un - sd_lun structure for the device. 28996 * wmp - sd_w_map which needs to be unlinked. 28997 */ 28998 28999 static void 29000 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29001 { 29002 ASSERT(un != NULL); 29003 29004 if (un->un_wm == wmp) { 29005 un->un_wm = wmp->wm_next; 29006 } else { 29007 wmp->wm_prev->wm_next = wmp->wm_next; 29008 } 29009 29010 if (wmp->wm_next) { 29011 wmp->wm_next->wm_prev = wmp->wm_prev; 29012 } 29013 29014 wmp->wm_next = wmp->wm_prev = NULL; 29015 29016 kmem_cache_free(un->un_wm_cache, wmp); 29017 } 29018 29019 29020 /* 29021 * Function: sd_range_unlock() 29022 * 29023 * Description: Unlock the range locked by wm. 29024 * Free write map if nobody else is waiting on it. 29025 * 29026 * Arguments: un - sd_lun structure for the device. 29027 * wmp - sd_w_map which needs to be unlinked. 29028 */ 29029 29030 static void 29031 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29032 { 29033 ASSERT(un != NULL); 29034 ASSERT(wm != NULL); 29035 ASSERT(!mutex_owned(SD_MUTEX(un))); 29036 29037 mutex_enter(SD_MUTEX(un)); 29038 29039 if (wm->wm_flags & SD_WTYPE_RMW) { 29040 un->un_rmw_count--; 29041 } 29042 29043 if (wm->wm_wanted_count) { 29044 wm->wm_flags = 0; 29045 /* 29046 * Broadcast that the wmap is available now. 29047 */ 29048 cv_broadcast(&wm->wm_avail); 29049 } else { 29050 /* 29051 * If no one is waiting on the map, it should be free'ed. 29052 */ 29053 sd_free_inlist_wmap(un, wm); 29054 } 29055 29056 mutex_exit(SD_MUTEX(un)); 29057 } 29058 29059 29060 /* 29061 * Function: sd_read_modify_write_task 29062 * 29063 * Description: Called from a taskq thread to initiate the write phase of 29064 * a read-modify-write request. This is used for targets where 29065 * un->un_sys_blocksize != un->un_tgt_blocksize. 29066 * 29067 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29068 * 29069 * Context: Called under taskq thread context. 29070 */ 29071 29072 static void 29073 sd_read_modify_write_task(void *arg) 29074 { 29075 struct sd_mapblocksize_info *bsp; 29076 struct buf *bp; 29077 struct sd_xbuf *xp; 29078 struct sd_lun *un; 29079 29080 bp = arg; /* The bp is given in arg */ 29081 ASSERT(bp != NULL); 29082 29083 /* Get the pointer to the layer-private data struct */ 29084 xp = SD_GET_XBUF(bp); 29085 ASSERT(xp != NULL); 29086 bsp = xp->xb_private; 29087 ASSERT(bsp != NULL); 29088 29089 un = SD_GET_UN(bp); 29090 ASSERT(un != NULL); 29091 ASSERT(!mutex_owned(SD_MUTEX(un))); 29092 29093 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29094 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29095 29096 /* 29097 * This is the write phase of a read-modify-write request, called 29098 * under the context of a taskq thread in response to the completion 29099 * of the read portion of the rmw request completing under interrupt 29100 * context. The write request must be sent from here down the iostart 29101 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29102 * we use the layer index saved in the layer-private data area. 29103 */ 29104 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29105 29106 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29107 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29108 } 29109 29110 29111 /* 29112 * Function: sddump_do_read_of_rmw() 29113 * 29114 * Description: This routine will be called from sddump, If sddump is called 29115 * with an I/O which not aligned on device blocksize boundary 29116 * then the write has to be converted to read-modify-write. 29117 * Do the read part here in order to keep sddump simple. 29118 * Note - That the sd_mutex is held across the call to this 29119 * routine. 29120 * 29121 * Arguments: un - sd_lun 29122 * blkno - block number in terms of media block size. 29123 * nblk - number of blocks. 29124 * bpp - pointer to pointer to the buf structure. On return 29125 * from this function, *bpp points to the valid buffer 29126 * to which the write has to be done. 29127 * 29128 * Return Code: 0 for success or errno-type return code 29129 */ 29130 29131 static int 29132 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29133 struct buf **bpp) 29134 { 29135 int err; 29136 int i; 29137 int rval; 29138 struct buf *bp; 29139 struct scsi_pkt *pkt = NULL; 29140 uint32_t target_blocksize; 29141 29142 ASSERT(un != NULL); 29143 ASSERT(mutex_owned(SD_MUTEX(un))); 29144 29145 target_blocksize = un->un_tgt_blocksize; 29146 29147 mutex_exit(SD_MUTEX(un)); 29148 29149 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29150 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29151 if (bp == NULL) { 29152 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29153 "no resources for dumping; giving up"); 29154 err = ENOMEM; 29155 goto done; 29156 } 29157 29158 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29159 blkno, nblk); 29160 if (rval != 0) { 29161 scsi_free_consistent_buf(bp); 29162 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29163 "no resources for dumping; giving up"); 29164 err = ENOMEM; 29165 goto done; 29166 } 29167 29168 pkt->pkt_flags |= FLAG_NOINTR; 29169 29170 err = EIO; 29171 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29172 29173 /* 29174 * Scsi_poll returns 0 (success) if the command completes and 29175 * the status block is STATUS_GOOD. We should only check 29176 * errors if this condition is not true. Even then we should 29177 * send our own request sense packet only if we have a check 29178 * condition and auto request sense has not been performed by 29179 * the hba. 29180 */ 29181 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29182 29183 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29184 err = 0; 29185 break; 29186 } 29187 29188 /* 29189 * Check CMD_DEV_GONE 1st, give up if device is gone, 29190 * no need to read RQS data. 29191 */ 29192 if (pkt->pkt_reason == CMD_DEV_GONE) { 29193 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29194 "Error while dumping state with rmw..." 29195 "Device is gone\n"); 29196 break; 29197 } 29198 29199 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29200 SD_INFO(SD_LOG_DUMP, un, 29201 "sddump: read failed with CHECK, try # %d\n", i); 29202 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29203 (void) sd_send_polled_RQS(un); 29204 } 29205 29206 continue; 29207 } 29208 29209 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29210 int reset_retval = 0; 29211 29212 SD_INFO(SD_LOG_DUMP, un, 29213 "sddump: read failed with BUSY, try # %d\n", i); 29214 29215 if (un->un_f_lun_reset_enabled == TRUE) { 29216 reset_retval = scsi_reset(SD_ADDRESS(un), 29217 RESET_LUN); 29218 } 29219 if (reset_retval == 0) { 29220 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29221 } 29222 (void) sd_send_polled_RQS(un); 29223 29224 } else { 29225 SD_INFO(SD_LOG_DUMP, un, 29226 "sddump: read failed with 0x%x, try # %d\n", 29227 SD_GET_PKT_STATUS(pkt), i); 29228 mutex_enter(SD_MUTEX(un)); 29229 sd_reset_target(un, pkt); 29230 mutex_exit(SD_MUTEX(un)); 29231 } 29232 29233 /* 29234 * If we are not getting anywhere with lun/target resets, 29235 * let's reset the bus. 29236 */ 29237 if (i > SD_NDUMP_RETRIES/2) { 29238 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29239 (void) sd_send_polled_RQS(un); 29240 } 29241 29242 } 29243 scsi_destroy_pkt(pkt); 29244 29245 if (err != 0) { 29246 scsi_free_consistent_buf(bp); 29247 *bpp = NULL; 29248 } else { 29249 *bpp = bp; 29250 } 29251 29252 done: 29253 mutex_enter(SD_MUTEX(un)); 29254 return (err); 29255 } 29256 29257 29258 /* 29259 * Function: sd_failfast_flushq 29260 * 29261 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29262 * in b_flags and move them onto the failfast queue, then kick 29263 * off a thread to return all bp's on the failfast queue to 29264 * their owners with an error set. 29265 * 29266 * Arguments: un - pointer to the soft state struct for the instance. 29267 * 29268 * Context: may execute in interrupt context. 29269 */ 29270 29271 static void 29272 sd_failfast_flushq(struct sd_lun *un) 29273 { 29274 struct buf *bp; 29275 struct buf *next_waitq_bp; 29276 struct buf *prev_waitq_bp = NULL; 29277 29278 ASSERT(un != NULL); 29279 ASSERT(mutex_owned(SD_MUTEX(un))); 29280 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29281 ASSERT(un->un_failfast_bp == NULL); 29282 29283 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29284 "sd_failfast_flushq: entry: un:0x%p\n", un); 29285 29286 /* 29287 * Check if we should flush all bufs when entering failfast state, or 29288 * just those with B_FAILFAST set. 29289 */ 29290 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29291 /* 29292 * Move *all* bp's on the wait queue to the failfast flush 29293 * queue, including those that do NOT have B_FAILFAST set. 29294 */ 29295 if (un->un_failfast_headp == NULL) { 29296 ASSERT(un->un_failfast_tailp == NULL); 29297 un->un_failfast_headp = un->un_waitq_headp; 29298 } else { 29299 ASSERT(un->un_failfast_tailp != NULL); 29300 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29301 } 29302 29303 un->un_failfast_tailp = un->un_waitq_tailp; 29304 29305 /* update kstat for each bp moved out of the waitq */ 29306 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29307 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29308 } 29309 29310 /* empty the waitq */ 29311 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29312 29313 } else { 29314 /* 29315 * Go thru the wait queue, pick off all entries with 29316 * B_FAILFAST set, and move these onto the failfast queue. 29317 */ 29318 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29319 /* 29320 * Save the pointer to the next bp on the wait queue, 29321 * so we get to it on the next iteration of this loop. 29322 */ 29323 next_waitq_bp = bp->av_forw; 29324 29325 /* 29326 * If this bp from the wait queue does NOT have 29327 * B_FAILFAST set, just move on to the next element 29328 * in the wait queue. Note, this is the only place 29329 * where it is correct to set prev_waitq_bp. 29330 */ 29331 if ((bp->b_flags & B_FAILFAST) == 0) { 29332 prev_waitq_bp = bp; 29333 continue; 29334 } 29335 29336 /* 29337 * Remove the bp from the wait queue. 29338 */ 29339 if (bp == un->un_waitq_headp) { 29340 /* The bp is the first element of the waitq. */ 29341 un->un_waitq_headp = next_waitq_bp; 29342 if (un->un_waitq_headp == NULL) { 29343 /* The wait queue is now empty */ 29344 un->un_waitq_tailp = NULL; 29345 } 29346 } else { 29347 /* 29348 * The bp is either somewhere in the middle 29349 * or at the end of the wait queue. 29350 */ 29351 ASSERT(un->un_waitq_headp != NULL); 29352 ASSERT(prev_waitq_bp != NULL); 29353 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29354 == 0); 29355 if (bp == un->un_waitq_tailp) { 29356 /* bp is the last entry on the waitq. */ 29357 ASSERT(next_waitq_bp == NULL); 29358 un->un_waitq_tailp = prev_waitq_bp; 29359 } 29360 prev_waitq_bp->av_forw = next_waitq_bp; 29361 } 29362 bp->av_forw = NULL; 29363 29364 /* 29365 * update kstat since the bp is moved out of 29366 * the waitq 29367 */ 29368 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29369 29370 /* 29371 * Now put the bp onto the failfast queue. 29372 */ 29373 if (un->un_failfast_headp == NULL) { 29374 /* failfast queue is currently empty */ 29375 ASSERT(un->un_failfast_tailp == NULL); 29376 un->un_failfast_headp = 29377 un->un_failfast_tailp = bp; 29378 } else { 29379 /* Add the bp to the end of the failfast q */ 29380 ASSERT(un->un_failfast_tailp != NULL); 29381 ASSERT(un->un_failfast_tailp->b_flags & 29382 B_FAILFAST); 29383 un->un_failfast_tailp->av_forw = bp; 29384 un->un_failfast_tailp = bp; 29385 } 29386 } 29387 } 29388 29389 /* 29390 * Now return all bp's on the failfast queue to their owners. 29391 */ 29392 while ((bp = un->un_failfast_headp) != NULL) { 29393 29394 un->un_failfast_headp = bp->av_forw; 29395 if (un->un_failfast_headp == NULL) { 29396 un->un_failfast_tailp = NULL; 29397 } 29398 29399 /* 29400 * We want to return the bp with a failure error code, but 29401 * we do not want a call to sd_start_cmds() to occur here, 29402 * so use sd_return_failed_command_no_restart() instead of 29403 * sd_return_failed_command(). 29404 */ 29405 sd_return_failed_command_no_restart(un, bp, EIO); 29406 } 29407 29408 /* Flush the xbuf queues if required. */ 29409 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29410 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29411 } 29412 29413 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29414 "sd_failfast_flushq: exit: un:0x%p\n", un); 29415 } 29416 29417 29418 /* 29419 * Function: sd_failfast_flushq_callback 29420 * 29421 * Description: Return TRUE if the given bp meets the criteria for failfast 29422 * flushing. Used with ddi_xbuf_flushq(9F). 29423 * 29424 * Arguments: bp - ptr to buf struct to be examined. 29425 * 29426 * Context: Any 29427 */ 29428 29429 static int 29430 sd_failfast_flushq_callback(struct buf *bp) 29431 { 29432 /* 29433 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29434 * state is entered; OR (2) the given bp has B_FAILFAST set. 29435 */ 29436 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29437 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29438 } 29439 29440 29441 29442 /* 29443 * Function: sd_setup_next_xfer 29444 * 29445 * Description: Prepare next I/O operation using DMA_PARTIAL 29446 * 29447 */ 29448 29449 static int 29450 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29451 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29452 { 29453 ssize_t num_blks_not_xfered; 29454 daddr_t strt_blk_num; 29455 ssize_t bytes_not_xfered; 29456 int rval; 29457 29458 ASSERT(pkt->pkt_resid == 0); 29459 29460 /* 29461 * Calculate next block number and amount to be transferred. 29462 * 29463 * How much data NOT transfered to the HBA yet. 29464 */ 29465 bytes_not_xfered = xp->xb_dma_resid; 29466 29467 /* 29468 * figure how many blocks NOT transfered to the HBA yet. 29469 */ 29470 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29471 29472 /* 29473 * set starting block number to the end of what WAS transfered. 29474 */ 29475 strt_blk_num = xp->xb_blkno + 29476 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29477 29478 /* 29479 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29480 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29481 * the disk mutex here. 29482 */ 29483 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29484 strt_blk_num, num_blks_not_xfered); 29485 29486 if (rval == 0) { 29487 29488 /* 29489 * Success. 29490 * 29491 * Adjust things if there are still more blocks to be 29492 * transfered. 29493 */ 29494 xp->xb_dma_resid = pkt->pkt_resid; 29495 pkt->pkt_resid = 0; 29496 29497 return (1); 29498 } 29499 29500 /* 29501 * There's really only one possible return value from 29502 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29503 * returns NULL. 29504 */ 29505 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29506 29507 bp->b_resid = bp->b_bcount; 29508 bp->b_flags |= B_ERROR; 29509 29510 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29511 "Error setting up next portion of DMA transfer\n"); 29512 29513 return (0); 29514 } 29515 29516 /* 29517 * Function: sd_panic_for_res_conflict 29518 * 29519 * Description: Call panic with a string formatted with "Reservation Conflict" 29520 * and a human readable identifier indicating the SD instance 29521 * that experienced the reservation conflict. 29522 * 29523 * Arguments: un - pointer to the soft state struct for the instance. 29524 * 29525 * Context: may execute in interrupt context. 29526 */ 29527 29528 #define SD_RESV_CONFLICT_FMT_LEN 40 29529 void 29530 sd_panic_for_res_conflict(struct sd_lun *un) 29531 { 29532 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 29533 char path_str[MAXPATHLEN]; 29534 29535 (void) snprintf(panic_str, sizeof (panic_str), 29536 "Reservation Conflict\nDisk: %s", 29537 ddi_pathname(SD_DEVINFO(un), path_str)); 29538 29539 panic(panic_str); 29540 } 29541 29542 /* 29543 * Note: The following sd_faultinjection_ioctl( ) routines implement 29544 * driver support for handling fault injection for error analysis 29545 * causing faults in multiple layers of the driver. 29546 * 29547 */ 29548 29549 #ifdef SD_FAULT_INJECTION 29550 static uint_t sd_fault_injection_on = 0; 29551 29552 /* 29553 * Function: sd_faultinjection_ioctl() 29554 * 29555 * Description: This routine is the driver entry point for handling 29556 * faultinjection ioctls to inject errors into the 29557 * layer model 29558 * 29559 * Arguments: cmd - the ioctl cmd received 29560 * arg - the arguments from user and returns 29561 */ 29562 29563 static void 29564 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29565 29566 uint_t i = 0; 29567 uint_t rval; 29568 29569 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29570 29571 mutex_enter(SD_MUTEX(un)); 29572 29573 switch (cmd) { 29574 case SDIOCRUN: 29575 /* Allow pushed faults to be injected */ 29576 SD_INFO(SD_LOG_SDTEST, un, 29577 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29578 29579 sd_fault_injection_on = 1; 29580 29581 SD_INFO(SD_LOG_IOERR, un, 29582 "sd_faultinjection_ioctl: run finished\n"); 29583 break; 29584 29585 case SDIOCSTART: 29586 /* Start Injection Session */ 29587 SD_INFO(SD_LOG_SDTEST, un, 29588 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29589 29590 sd_fault_injection_on = 0; 29591 un->sd_injection_mask = 0xFFFFFFFF; 29592 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29593 un->sd_fi_fifo_pkt[i] = NULL; 29594 un->sd_fi_fifo_xb[i] = NULL; 29595 un->sd_fi_fifo_un[i] = NULL; 29596 un->sd_fi_fifo_arq[i] = NULL; 29597 } 29598 un->sd_fi_fifo_start = 0; 29599 un->sd_fi_fifo_end = 0; 29600 29601 mutex_enter(&(un->un_fi_mutex)); 29602 un->sd_fi_log[0] = '\0'; 29603 un->sd_fi_buf_len = 0; 29604 mutex_exit(&(un->un_fi_mutex)); 29605 29606 SD_INFO(SD_LOG_IOERR, un, 29607 "sd_faultinjection_ioctl: start finished\n"); 29608 break; 29609 29610 case SDIOCSTOP: 29611 /* Stop Injection Session */ 29612 SD_INFO(SD_LOG_SDTEST, un, 29613 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29614 sd_fault_injection_on = 0; 29615 un->sd_injection_mask = 0x0; 29616 29617 /* Empty stray or unuseds structs from fifo */ 29618 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29619 if (un->sd_fi_fifo_pkt[i] != NULL) { 29620 kmem_free(un->sd_fi_fifo_pkt[i], 29621 sizeof (struct sd_fi_pkt)); 29622 } 29623 if (un->sd_fi_fifo_xb[i] != NULL) { 29624 kmem_free(un->sd_fi_fifo_xb[i], 29625 sizeof (struct sd_fi_xb)); 29626 } 29627 if (un->sd_fi_fifo_un[i] != NULL) { 29628 kmem_free(un->sd_fi_fifo_un[i], 29629 sizeof (struct sd_fi_un)); 29630 } 29631 if (un->sd_fi_fifo_arq[i] != NULL) { 29632 kmem_free(un->sd_fi_fifo_arq[i], 29633 sizeof (struct sd_fi_arq)); 29634 } 29635 un->sd_fi_fifo_pkt[i] = NULL; 29636 un->sd_fi_fifo_un[i] = NULL; 29637 un->sd_fi_fifo_xb[i] = NULL; 29638 un->sd_fi_fifo_arq[i] = NULL; 29639 } 29640 un->sd_fi_fifo_start = 0; 29641 un->sd_fi_fifo_end = 0; 29642 29643 SD_INFO(SD_LOG_IOERR, un, 29644 "sd_faultinjection_ioctl: stop finished\n"); 29645 break; 29646 29647 case SDIOCINSERTPKT: 29648 /* Store a packet struct to be pushed onto fifo */ 29649 SD_INFO(SD_LOG_SDTEST, un, 29650 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29651 29652 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29653 29654 sd_fault_injection_on = 0; 29655 29656 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29657 if (un->sd_fi_fifo_pkt[i] != NULL) { 29658 kmem_free(un->sd_fi_fifo_pkt[i], 29659 sizeof (struct sd_fi_pkt)); 29660 } 29661 if (arg != NULL) { 29662 un->sd_fi_fifo_pkt[i] = 29663 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29664 if (un->sd_fi_fifo_pkt[i] == NULL) { 29665 /* Alloc failed don't store anything */ 29666 break; 29667 } 29668 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29669 sizeof (struct sd_fi_pkt), 0); 29670 if (rval == -1) { 29671 kmem_free(un->sd_fi_fifo_pkt[i], 29672 sizeof (struct sd_fi_pkt)); 29673 un->sd_fi_fifo_pkt[i] = NULL; 29674 } 29675 } else { 29676 SD_INFO(SD_LOG_IOERR, un, 29677 "sd_faultinjection_ioctl: pkt null\n"); 29678 } 29679 break; 29680 29681 case SDIOCINSERTXB: 29682 /* Store a xb struct to be pushed onto fifo */ 29683 SD_INFO(SD_LOG_SDTEST, un, 29684 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29685 29686 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29687 29688 sd_fault_injection_on = 0; 29689 29690 if (un->sd_fi_fifo_xb[i] != NULL) { 29691 kmem_free(un->sd_fi_fifo_xb[i], 29692 sizeof (struct sd_fi_xb)); 29693 un->sd_fi_fifo_xb[i] = NULL; 29694 } 29695 if (arg != NULL) { 29696 un->sd_fi_fifo_xb[i] = 29697 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29698 if (un->sd_fi_fifo_xb[i] == NULL) { 29699 /* Alloc failed don't store anything */ 29700 break; 29701 } 29702 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29703 sizeof (struct sd_fi_xb), 0); 29704 29705 if (rval == -1) { 29706 kmem_free(un->sd_fi_fifo_xb[i], 29707 sizeof (struct sd_fi_xb)); 29708 un->sd_fi_fifo_xb[i] = NULL; 29709 } 29710 } else { 29711 SD_INFO(SD_LOG_IOERR, un, 29712 "sd_faultinjection_ioctl: xb null\n"); 29713 } 29714 break; 29715 29716 case SDIOCINSERTUN: 29717 /* Store a un struct to be pushed onto fifo */ 29718 SD_INFO(SD_LOG_SDTEST, un, 29719 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29720 29721 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29722 29723 sd_fault_injection_on = 0; 29724 29725 if (un->sd_fi_fifo_un[i] != NULL) { 29726 kmem_free(un->sd_fi_fifo_un[i], 29727 sizeof (struct sd_fi_un)); 29728 un->sd_fi_fifo_un[i] = NULL; 29729 } 29730 if (arg != NULL) { 29731 un->sd_fi_fifo_un[i] = 29732 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29733 if (un->sd_fi_fifo_un[i] == NULL) { 29734 /* Alloc failed don't store anything */ 29735 break; 29736 } 29737 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29738 sizeof (struct sd_fi_un), 0); 29739 if (rval == -1) { 29740 kmem_free(un->sd_fi_fifo_un[i], 29741 sizeof (struct sd_fi_un)); 29742 un->sd_fi_fifo_un[i] = NULL; 29743 } 29744 29745 } else { 29746 SD_INFO(SD_LOG_IOERR, un, 29747 "sd_faultinjection_ioctl: un null\n"); 29748 } 29749 29750 break; 29751 29752 case SDIOCINSERTARQ: 29753 /* Store a arq struct to be pushed onto fifo */ 29754 SD_INFO(SD_LOG_SDTEST, un, 29755 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29756 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29757 29758 sd_fault_injection_on = 0; 29759 29760 if (un->sd_fi_fifo_arq[i] != NULL) { 29761 kmem_free(un->sd_fi_fifo_arq[i], 29762 sizeof (struct sd_fi_arq)); 29763 un->sd_fi_fifo_arq[i] = NULL; 29764 } 29765 if (arg != NULL) { 29766 un->sd_fi_fifo_arq[i] = 29767 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29768 if (un->sd_fi_fifo_arq[i] == NULL) { 29769 /* Alloc failed don't store anything */ 29770 break; 29771 } 29772 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29773 sizeof (struct sd_fi_arq), 0); 29774 if (rval == -1) { 29775 kmem_free(un->sd_fi_fifo_arq[i], 29776 sizeof (struct sd_fi_arq)); 29777 un->sd_fi_fifo_arq[i] = NULL; 29778 } 29779 29780 } else { 29781 SD_INFO(SD_LOG_IOERR, un, 29782 "sd_faultinjection_ioctl: arq null\n"); 29783 } 29784 29785 break; 29786 29787 case SDIOCPUSH: 29788 /* Push stored xb, pkt, un, and arq onto fifo */ 29789 sd_fault_injection_on = 0; 29790 29791 if (arg != NULL) { 29792 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29793 if (rval != -1 && 29794 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29795 un->sd_fi_fifo_end += i; 29796 } 29797 } else { 29798 SD_INFO(SD_LOG_IOERR, un, 29799 "sd_faultinjection_ioctl: push arg null\n"); 29800 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29801 un->sd_fi_fifo_end++; 29802 } 29803 } 29804 SD_INFO(SD_LOG_IOERR, un, 29805 "sd_faultinjection_ioctl: push to end=%d\n", 29806 un->sd_fi_fifo_end); 29807 break; 29808 29809 case SDIOCRETRIEVE: 29810 /* Return buffer of log from Injection session */ 29811 SD_INFO(SD_LOG_SDTEST, un, 29812 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29813 29814 sd_fault_injection_on = 0; 29815 29816 mutex_enter(&(un->un_fi_mutex)); 29817 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29818 un->sd_fi_buf_len+1, 0); 29819 mutex_exit(&(un->un_fi_mutex)); 29820 29821 if (rval == -1) { 29822 /* 29823 * arg is possibly invalid setting 29824 * it to NULL for return 29825 */ 29826 arg = NULL; 29827 } 29828 break; 29829 } 29830 29831 mutex_exit(SD_MUTEX(un)); 29832 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29833 " exit\n"); 29834 } 29835 29836 29837 /* 29838 * Function: sd_injection_log() 29839 * 29840 * Description: This routine adds buff to the already existing injection log 29841 * for retrieval via faultinjection_ioctl for use in fault 29842 * detection and recovery 29843 * 29844 * Arguments: buf - the string to add to the log 29845 */ 29846 29847 static void 29848 sd_injection_log(char *buf, struct sd_lun *un) 29849 { 29850 uint_t len; 29851 29852 ASSERT(un != NULL); 29853 ASSERT(buf != NULL); 29854 29855 mutex_enter(&(un->un_fi_mutex)); 29856 29857 len = min(strlen(buf), 255); 29858 /* Add logged value to Injection log to be returned later */ 29859 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29860 uint_t offset = strlen((char *)un->sd_fi_log); 29861 char *destp = (char *)un->sd_fi_log + offset; 29862 int i; 29863 for (i = 0; i < len; i++) { 29864 *destp++ = *buf++; 29865 } 29866 un->sd_fi_buf_len += len; 29867 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29868 } 29869 29870 mutex_exit(&(un->un_fi_mutex)); 29871 } 29872 29873 29874 /* 29875 * Function: sd_faultinjection() 29876 * 29877 * Description: This routine takes the pkt and changes its 29878 * content based on error injection scenerio. 29879 * 29880 * Arguments: pktp - packet to be changed 29881 */ 29882 29883 static void 29884 sd_faultinjection(struct scsi_pkt *pktp) 29885 { 29886 uint_t i; 29887 struct sd_fi_pkt *fi_pkt; 29888 struct sd_fi_xb *fi_xb; 29889 struct sd_fi_un *fi_un; 29890 struct sd_fi_arq *fi_arq; 29891 struct buf *bp; 29892 struct sd_xbuf *xb; 29893 struct sd_lun *un; 29894 29895 ASSERT(pktp != NULL); 29896 29897 /* pull bp xb and un from pktp */ 29898 bp = (struct buf *)pktp->pkt_private; 29899 xb = SD_GET_XBUF(bp); 29900 un = SD_GET_UN(bp); 29901 29902 ASSERT(un != NULL); 29903 29904 mutex_enter(SD_MUTEX(un)); 29905 29906 SD_TRACE(SD_LOG_SDTEST, un, 29907 "sd_faultinjection: entry Injection from sdintr\n"); 29908 29909 /* if injection is off return */ 29910 if (sd_fault_injection_on == 0 || 29911 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29912 mutex_exit(SD_MUTEX(un)); 29913 return; 29914 } 29915 29916 SD_INFO(SD_LOG_SDTEST, un, 29917 "sd_faultinjection: is working for copying\n"); 29918 29919 /* take next set off fifo */ 29920 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29921 29922 fi_pkt = un->sd_fi_fifo_pkt[i]; 29923 fi_xb = un->sd_fi_fifo_xb[i]; 29924 fi_un = un->sd_fi_fifo_un[i]; 29925 fi_arq = un->sd_fi_fifo_arq[i]; 29926 29927 29928 /* set variables accordingly */ 29929 /* set pkt if it was on fifo */ 29930 if (fi_pkt != NULL) { 29931 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29932 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29933 if (fi_pkt->pkt_cdbp != 0xff) 29934 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29935 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29936 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29937 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29938 29939 } 29940 /* set xb if it was on fifo */ 29941 if (fi_xb != NULL) { 29942 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29943 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29944 if (fi_xb->xb_retry_count != 0) 29945 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29946 SD_CONDSET(xb, xb, xb_victim_retry_count, 29947 "xb_victim_retry_count"); 29948 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29949 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29950 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29951 29952 /* copy in block data from sense */ 29953 /* 29954 * if (fi_xb->xb_sense_data[0] != -1) { 29955 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29956 * SENSE_LENGTH); 29957 * } 29958 */ 29959 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29960 29961 /* copy in extended sense codes */ 29962 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29963 xb, es_code, "es_code"); 29964 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29965 xb, es_key, "es_key"); 29966 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29967 xb, es_add_code, "es_add_code"); 29968 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29969 xb, es_qual_code, "es_qual_code"); 29970 struct scsi_extended_sense *esp; 29971 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29972 esp->es_class = CLASS_EXTENDED_SENSE; 29973 } 29974 29975 /* set un if it was on fifo */ 29976 if (fi_un != NULL) { 29977 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29978 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29979 SD_CONDSET(un, un, un_reset_retry_count, 29980 "un_reset_retry_count"); 29981 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29982 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29983 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29984 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29985 "un_f_allow_bus_device_reset"); 29986 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29987 29988 } 29989 29990 /* copy in auto request sense if it was on fifo */ 29991 if (fi_arq != NULL) { 29992 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29993 } 29994 29995 /* free structs */ 29996 if (un->sd_fi_fifo_pkt[i] != NULL) { 29997 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29998 } 29999 if (un->sd_fi_fifo_xb[i] != NULL) { 30000 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30001 } 30002 if (un->sd_fi_fifo_un[i] != NULL) { 30003 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30004 } 30005 if (un->sd_fi_fifo_arq[i] != NULL) { 30006 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30007 } 30008 30009 /* 30010 * kmem_free does not gurantee to set to NULL 30011 * since we uses these to determine if we set 30012 * values or not lets confirm they are always 30013 * NULL after free 30014 */ 30015 un->sd_fi_fifo_pkt[i] = NULL; 30016 un->sd_fi_fifo_un[i] = NULL; 30017 un->sd_fi_fifo_xb[i] = NULL; 30018 un->sd_fi_fifo_arq[i] = NULL; 30019 30020 un->sd_fi_fifo_start++; 30021 30022 mutex_exit(SD_MUTEX(un)); 30023 30024 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30025 } 30026 30027 #endif /* SD_FAULT_INJECTION */ 30028 30029 /* 30030 * This routine is invoked in sd_unit_attach(). Before calling it, the 30031 * properties in conf file should be processed already, and "hotpluggable" 30032 * property was processed also. 30033 * 30034 * The sd driver distinguishes 3 different type of devices: removable media, 30035 * non-removable media, and hotpluggable. Below the differences are defined: 30036 * 30037 * 1. Device ID 30038 * 30039 * The device ID of a device is used to identify this device. Refer to 30040 * ddi_devid_register(9F). 30041 * 30042 * For a non-removable media disk device which can provide 0x80 or 0x83 30043 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30044 * device ID is created to identify this device. For other non-removable 30045 * media devices, a default device ID is created only if this device has 30046 * at least 2 alter cylinders. Otherwise, this device has no devid. 30047 * 30048 * ------------------------------------------------------- 30049 * removable media hotpluggable | Can Have Device ID 30050 * ------------------------------------------------------- 30051 * false false | Yes 30052 * false true | Yes 30053 * true x | No 30054 * ------------------------------------------------------ 30055 * 30056 * 30057 * 2. SCSI group 4 commands 30058 * 30059 * In SCSI specs, only some commands in group 4 command set can use 30060 * 8-byte addresses that can be used to access >2TB storage spaces. 30061 * Other commands have no such capability. Without supporting group4, 30062 * it is impossible to make full use of storage spaces of a disk with 30063 * capacity larger than 2TB. 30064 * 30065 * ----------------------------------------------- 30066 * removable media hotpluggable LP64 | Group 30067 * ----------------------------------------------- 30068 * false false false | 1 30069 * false false true | 4 30070 * false true false | 1 30071 * false true true | 4 30072 * true x x | 5 30073 * ----------------------------------------------- 30074 * 30075 * 30076 * 3. Check for VTOC Label 30077 * 30078 * If a direct-access disk has no EFI label, sd will check if it has a 30079 * valid VTOC label. Now, sd also does that check for removable media 30080 * and hotpluggable devices. 30081 * 30082 * -------------------------------------------------------------- 30083 * Direct-Access removable media hotpluggable | Check Label 30084 * ------------------------------------------------------------- 30085 * false false false | No 30086 * false false true | No 30087 * false true false | Yes 30088 * false true true | Yes 30089 * true x x | Yes 30090 * -------------------------------------------------------------- 30091 * 30092 * 30093 * 4. Building default VTOC label 30094 * 30095 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30096 * If those devices have no valid VTOC label, sd(7d) will attempt to 30097 * create default VTOC for them. Currently sd creates default VTOC label 30098 * for all devices on x86 platform (VTOC_16), but only for removable 30099 * media devices on SPARC (VTOC_8). 30100 * 30101 * ----------------------------------------------------------- 30102 * removable media hotpluggable platform | Default Label 30103 * ----------------------------------------------------------- 30104 * false false sparc | No 30105 * false true x86 | Yes 30106 * false true sparc | Yes 30107 * true x x | Yes 30108 * ---------------------------------------------------------- 30109 * 30110 * 30111 * 5. Supported blocksizes of target devices 30112 * 30113 * Sd supports non-512-byte blocksize for removable media devices only. 30114 * For other devices, only 512-byte blocksize is supported. This may be 30115 * changed in near future because some RAID devices require non-512-byte 30116 * blocksize 30117 * 30118 * ----------------------------------------------------------- 30119 * removable media hotpluggable | non-512-byte blocksize 30120 * ----------------------------------------------------------- 30121 * false false | No 30122 * false true | No 30123 * true x | Yes 30124 * ----------------------------------------------------------- 30125 * 30126 * 30127 * 6. Automatic mount & unmount 30128 * 30129 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30130 * if a device is removable media device. It return 1 for removable media 30131 * devices, and 0 for others. 30132 * 30133 * The automatic mounting subsystem should distinguish between the types 30134 * of devices and apply automounting policies to each. 30135 * 30136 * 30137 * 7. fdisk partition management 30138 * 30139 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30140 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30141 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30142 * fdisk partitions on both x86 and SPARC platform. 30143 * 30144 * ----------------------------------------------------------- 30145 * platform removable media USB/1394 | fdisk supported 30146 * ----------------------------------------------------------- 30147 * x86 X X | true 30148 * ------------------------------------------------------------ 30149 * sparc X X | false 30150 * ------------------------------------------------------------ 30151 * 30152 * 30153 * 8. MBOOT/MBR 30154 * 30155 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30156 * read/write mboot for removable media devices on sparc platform. 30157 * 30158 * ----------------------------------------------------------- 30159 * platform removable media USB/1394 | mboot supported 30160 * ----------------------------------------------------------- 30161 * x86 X X | true 30162 * ------------------------------------------------------------ 30163 * sparc false false | false 30164 * sparc false true | true 30165 * sparc true false | true 30166 * sparc true true | true 30167 * ------------------------------------------------------------ 30168 * 30169 * 30170 * 9. error handling during opening device 30171 * 30172 * If failed to open a disk device, an errno is returned. For some kinds 30173 * of errors, different errno is returned depending on if this device is 30174 * a removable media device. This brings USB/1394 hard disks in line with 30175 * expected hard disk behavior. It is not expected that this breaks any 30176 * application. 30177 * 30178 * ------------------------------------------------------ 30179 * removable media hotpluggable | errno 30180 * ------------------------------------------------------ 30181 * false false | EIO 30182 * false true | EIO 30183 * true x | ENXIO 30184 * ------------------------------------------------------ 30185 * 30186 * 30187 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30188 * 30189 * These IOCTLs are applicable only to removable media devices. 30190 * 30191 * ----------------------------------------------------------- 30192 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30193 * ----------------------------------------------------------- 30194 * false false | No 30195 * false true | No 30196 * true x | Yes 30197 * ----------------------------------------------------------- 30198 * 30199 * 30200 * 12. Kstats for partitions 30201 * 30202 * sd creates partition kstat for non-removable media devices. USB and 30203 * Firewire hard disks now have partition kstats 30204 * 30205 * ------------------------------------------------------ 30206 * removable media hotpluggable | kstat 30207 * ------------------------------------------------------ 30208 * false false | Yes 30209 * false true | Yes 30210 * true x | No 30211 * ------------------------------------------------------ 30212 * 30213 * 30214 * 13. Removable media & hotpluggable properties 30215 * 30216 * Sd driver creates a "removable-media" property for removable media 30217 * devices. Parent nexus drivers create a "hotpluggable" property if 30218 * it supports hotplugging. 30219 * 30220 * --------------------------------------------------------------------- 30221 * removable media hotpluggable | "removable-media" " hotpluggable" 30222 * --------------------------------------------------------------------- 30223 * false false | No No 30224 * false true | No Yes 30225 * true false | Yes No 30226 * true true | Yes Yes 30227 * --------------------------------------------------------------------- 30228 * 30229 * 30230 * 14. Power Management 30231 * 30232 * sd only power manages removable media devices or devices that support 30233 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30234 * 30235 * A parent nexus that supports hotplugging can also set "pm-capable" 30236 * if the disk can be power managed. 30237 * 30238 * ------------------------------------------------------------ 30239 * removable media hotpluggable pm-capable | power manage 30240 * ------------------------------------------------------------ 30241 * false false false | No 30242 * false false true | Yes 30243 * false true false | No 30244 * false true true | Yes 30245 * true x x | Yes 30246 * ------------------------------------------------------------ 30247 * 30248 * USB and firewire hard disks can now be power managed independently 30249 * of the framebuffer 30250 * 30251 * 30252 * 15. Support for USB disks with capacity larger than 1TB 30253 * 30254 * Currently, sd doesn't permit a fixed disk device with capacity 30255 * larger than 1TB to be used in a 32-bit operating system environment. 30256 * However, sd doesn't do that for removable media devices. Instead, it 30257 * assumes that removable media devices cannot have a capacity larger 30258 * than 1TB. Therefore, using those devices on 32-bit system is partially 30259 * supported, which can cause some unexpected results. 30260 * 30261 * --------------------------------------------------------------------- 30262 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30263 * --------------------------------------------------------------------- 30264 * false false | true | no 30265 * false true | true | no 30266 * true false | true | Yes 30267 * true true | true | Yes 30268 * --------------------------------------------------------------------- 30269 * 30270 * 30271 * 16. Check write-protection at open time 30272 * 30273 * When a removable media device is being opened for writing without NDELAY 30274 * flag, sd will check if this device is writable. If attempting to open 30275 * without NDELAY flag a write-protected device, this operation will abort. 30276 * 30277 * ------------------------------------------------------------ 30278 * removable media USB/1394 | WP Check 30279 * ------------------------------------------------------------ 30280 * false false | No 30281 * false true | No 30282 * true false | Yes 30283 * true true | Yes 30284 * ------------------------------------------------------------ 30285 * 30286 * 30287 * 17. syslog when corrupted VTOC is encountered 30288 * 30289 * Currently, if an invalid VTOC is encountered, sd only print syslog 30290 * for fixed SCSI disks. 30291 * ------------------------------------------------------------ 30292 * removable media USB/1394 | print syslog 30293 * ------------------------------------------------------------ 30294 * false false | Yes 30295 * false true | No 30296 * true false | No 30297 * true true | No 30298 * ------------------------------------------------------------ 30299 */ 30300 static void 30301 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30302 { 30303 int pm_capable_prop; 30304 30305 ASSERT(un->un_sd); 30306 ASSERT(un->un_sd->sd_inq); 30307 30308 /* 30309 * Enable SYNC CACHE support for all devices. 30310 */ 30311 un->un_f_sync_cache_supported = TRUE; 30312 30313 /* 30314 * Set the sync cache required flag to false. 30315 * This would ensure that there is no SYNC CACHE 30316 * sent when there are no writes 30317 */ 30318 un->un_f_sync_cache_required = FALSE; 30319 30320 if (un->un_sd->sd_inq->inq_rmb) { 30321 /* 30322 * The media of this device is removable. And for this kind 30323 * of devices, it is possible to change medium after opening 30324 * devices. Thus we should support this operation. 30325 */ 30326 un->un_f_has_removable_media = TRUE; 30327 30328 /* 30329 * support non-512-byte blocksize of removable media devices 30330 */ 30331 un->un_f_non_devbsize_supported = TRUE; 30332 30333 /* 30334 * Assume that all removable media devices support DOOR_LOCK 30335 */ 30336 un->un_f_doorlock_supported = TRUE; 30337 30338 /* 30339 * For a removable media device, it is possible to be opened 30340 * with NDELAY flag when there is no media in drive, in this 30341 * case we don't care if device is writable. But if without 30342 * NDELAY flag, we need to check if media is write-protected. 30343 */ 30344 un->un_f_chk_wp_open = TRUE; 30345 30346 /* 30347 * need to start a SCSI watch thread to monitor media state, 30348 * when media is being inserted or ejected, notify syseventd. 30349 */ 30350 un->un_f_monitor_media_state = TRUE; 30351 30352 /* 30353 * Some devices don't support START_STOP_UNIT command. 30354 * Therefore, we'd better check if a device supports it 30355 * before sending it. 30356 */ 30357 un->un_f_check_start_stop = TRUE; 30358 30359 /* 30360 * support eject media ioctl: 30361 * FDEJECT, DKIOCEJECT, CDROMEJECT 30362 */ 30363 un->un_f_eject_media_supported = TRUE; 30364 30365 /* 30366 * Because many removable-media devices don't support 30367 * LOG_SENSE, we couldn't use this command to check if 30368 * a removable media device support power-management. 30369 * We assume that they support power-management via 30370 * START_STOP_UNIT command and can be spun up and down 30371 * without limitations. 30372 */ 30373 un->un_f_pm_supported = TRUE; 30374 30375 /* 30376 * Need to create a zero length (Boolean) property 30377 * removable-media for the removable media devices. 30378 * Note that the return value of the property is not being 30379 * checked, since if unable to create the property 30380 * then do not want the attach to fail altogether. Consistent 30381 * with other property creation in attach. 30382 */ 30383 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30384 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30385 30386 } else { 30387 /* 30388 * create device ID for device 30389 */ 30390 un->un_f_devid_supported = TRUE; 30391 30392 /* 30393 * Spin up non-removable-media devices once it is attached 30394 */ 30395 un->un_f_attach_spinup = TRUE; 30396 30397 /* 30398 * According to SCSI specification, Sense data has two kinds of 30399 * format: fixed format, and descriptor format. At present, we 30400 * don't support descriptor format sense data for removable 30401 * media. 30402 */ 30403 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30404 un->un_f_descr_format_supported = TRUE; 30405 } 30406 30407 /* 30408 * kstats are created only for non-removable media devices. 30409 * 30410 * Set this in sd.conf to 0 in order to disable kstats. The 30411 * default is 1, so they are enabled by default. 30412 */ 30413 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30414 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30415 "enable-partition-kstats", 1)); 30416 30417 /* 30418 * Check if HBA has set the "pm-capable" property. 30419 * If "pm-capable" exists and is non-zero then we can 30420 * power manage the device without checking the start/stop 30421 * cycle count log sense page. 30422 * 30423 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 30424 * then we should not power manage the device. 30425 * 30426 * If "pm-capable" doesn't exist then pm_capable_prop will 30427 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30428 * sd will check the start/stop cycle count log sense page 30429 * and power manage the device if the cycle count limit has 30430 * not been exceeded. 30431 */ 30432 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30433 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30434 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 30435 un->un_f_log_sense_supported = TRUE; 30436 } else { 30437 /* 30438 * pm-capable property exists. 30439 * 30440 * Convert "TRUE" values for pm_capable_prop to 30441 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 30442 * later. "TRUE" values are any values except 30443 * SD_PM_CAPABLE_FALSE (0) and 30444 * SD_PM_CAPABLE_UNDEFINED (-1) 30445 */ 30446 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 30447 un->un_f_log_sense_supported = FALSE; 30448 } else { 30449 un->un_f_pm_supported = TRUE; 30450 } 30451 30452 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30453 "sd_unit_attach: un:0x%p pm-capable " 30454 "property set to %d.\n", un, un->un_f_pm_supported); 30455 } 30456 } 30457 30458 if (un->un_f_is_hotpluggable) { 30459 30460 /* 30461 * Have to watch hotpluggable devices as well, since 30462 * that's the only way for userland applications to 30463 * detect hot removal while device is busy/mounted. 30464 */ 30465 un->un_f_monitor_media_state = TRUE; 30466 30467 un->un_f_check_start_stop = TRUE; 30468 30469 } 30470 } 30471 30472 /* 30473 * sd_tg_rdwr: 30474 * Provides rdwr access for cmlb via sd_tgops. The start_block is 30475 * in sys block size, req_length in bytes. 30476 * 30477 */ 30478 static int 30479 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 30480 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 30481 { 30482 struct sd_lun *un; 30483 int path_flag = (int)(uintptr_t)tg_cookie; 30484 char *dkl = NULL; 30485 diskaddr_t real_addr = start_block; 30486 diskaddr_t first_byte, end_block; 30487 30488 size_t buffer_size = reqlength; 30489 int rval = 0; 30490 diskaddr_t cap; 30491 uint32_t lbasize; 30492 sd_ssc_t *ssc; 30493 30494 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30495 if (un == NULL) 30496 return (ENXIO); 30497 30498 if (cmd != TG_READ && cmd != TG_WRITE) 30499 return (EINVAL); 30500 30501 ssc = sd_ssc_init(un); 30502 mutex_enter(SD_MUTEX(un)); 30503 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 30504 mutex_exit(SD_MUTEX(un)); 30505 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30506 &lbasize, path_flag); 30507 if (rval != 0) 30508 goto done1; 30509 mutex_enter(SD_MUTEX(un)); 30510 sd_update_block_info(un, lbasize, cap); 30511 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 30512 mutex_exit(SD_MUTEX(un)); 30513 rval = EIO; 30514 goto done; 30515 } 30516 } 30517 30518 if (NOT_DEVBSIZE(un)) { 30519 /* 30520 * sys_blocksize != tgt_blocksize, need to re-adjust 30521 * blkno and save the index to beginning of dk_label 30522 */ 30523 first_byte = SD_SYSBLOCKS2BYTES(start_block); 30524 real_addr = first_byte / un->un_tgt_blocksize; 30525 30526 end_block = (first_byte + reqlength + 30527 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 30528 30529 /* round up buffer size to multiple of target block size */ 30530 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 30531 30532 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 30533 "label_addr: 0x%x allocation size: 0x%x\n", 30534 real_addr, buffer_size); 30535 30536 if (((first_byte % un->un_tgt_blocksize) != 0) || 30537 (reqlength % un->un_tgt_blocksize) != 0) 30538 /* the request is not aligned */ 30539 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 30540 } 30541 30542 /* 30543 * The MMC standard allows READ CAPACITY to be 30544 * inaccurate by a bounded amount (in the interest of 30545 * response latency). As a result, failed READs are 30546 * commonplace (due to the reading of metadata and not 30547 * data). Depending on the per-Vendor/drive Sense data, 30548 * the failed READ can cause many (unnecessary) retries. 30549 */ 30550 30551 if (ISCD(un) && (cmd == TG_READ) && 30552 (un->un_f_blockcount_is_valid == TRUE) && 30553 ((start_block == (un->un_blockcount - 1))|| 30554 (start_block == (un->un_blockcount - 2)))) { 30555 path_flag = SD_PATH_DIRECT_PRIORITY; 30556 } 30557 30558 mutex_exit(SD_MUTEX(un)); 30559 if (cmd == TG_READ) { 30560 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 30561 buffer_size, real_addr, path_flag); 30562 if (dkl != NULL) 30563 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 30564 real_addr), bufaddr, reqlength); 30565 } else { 30566 if (dkl) { 30567 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 30568 real_addr, path_flag); 30569 if (rval) { 30570 goto done1; 30571 } 30572 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 30573 real_addr), reqlength); 30574 } 30575 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 30576 buffer_size, real_addr, path_flag); 30577 } 30578 30579 done1: 30580 if (dkl != NULL) 30581 kmem_free(dkl, buffer_size); 30582 30583 if (rval != 0) { 30584 if (rval == EIO) 30585 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 30586 else 30587 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 30588 } 30589 done: 30590 sd_ssc_fini(ssc); 30591 return (rval); 30592 } 30593 30594 30595 static int 30596 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 30597 { 30598 30599 struct sd_lun *un; 30600 diskaddr_t cap; 30601 uint32_t lbasize; 30602 int path_flag = (int)(uintptr_t)tg_cookie; 30603 int ret = 0; 30604 30605 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30606 if (un == NULL) 30607 return (ENXIO); 30608 30609 switch (cmd) { 30610 case TG_GETPHYGEOM: 30611 case TG_GETVIRTGEOM: 30612 case TG_GETCAPACITY: 30613 case TG_GETBLOCKSIZE: 30614 mutex_enter(SD_MUTEX(un)); 30615 30616 if ((un->un_f_blockcount_is_valid == TRUE) && 30617 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 30618 cap = un->un_blockcount; 30619 lbasize = un->un_tgt_blocksize; 30620 mutex_exit(SD_MUTEX(un)); 30621 } else { 30622 sd_ssc_t *ssc; 30623 mutex_exit(SD_MUTEX(un)); 30624 ssc = sd_ssc_init(un); 30625 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30626 &lbasize, path_flag); 30627 if (ret != 0) { 30628 if (ret == EIO) 30629 sd_ssc_assessment(ssc, 30630 SD_FMT_STATUS_CHECK); 30631 else 30632 sd_ssc_assessment(ssc, 30633 SD_FMT_IGNORE); 30634 sd_ssc_fini(ssc); 30635 return (ret); 30636 } 30637 sd_ssc_fini(ssc); 30638 mutex_enter(SD_MUTEX(un)); 30639 sd_update_block_info(un, lbasize, cap); 30640 if ((un->un_f_blockcount_is_valid == FALSE) || 30641 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 30642 mutex_exit(SD_MUTEX(un)); 30643 return (EIO); 30644 } 30645 mutex_exit(SD_MUTEX(un)); 30646 } 30647 30648 if (cmd == TG_GETCAPACITY) { 30649 *(diskaddr_t *)arg = cap; 30650 return (0); 30651 } 30652 30653 if (cmd == TG_GETBLOCKSIZE) { 30654 *(uint32_t *)arg = lbasize; 30655 return (0); 30656 } 30657 30658 if (cmd == TG_GETPHYGEOM) 30659 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 30660 cap, lbasize, path_flag); 30661 else 30662 /* TG_GETVIRTGEOM */ 30663 ret = sd_get_virtual_geometry(un, 30664 (cmlb_geom_t *)arg, cap, lbasize); 30665 30666 return (ret); 30667 30668 case TG_GETATTR: 30669 mutex_enter(SD_MUTEX(un)); 30670 ((tg_attribute_t *)arg)->media_is_writable = 30671 un->un_f_mmc_writable_media; 30672 mutex_exit(SD_MUTEX(un)); 30673 return (0); 30674 default: 30675 return (ENOTTY); 30676 30677 } 30678 } 30679 30680 /* 30681 * Function: sd_ssc_ereport_post 30682 * 30683 * Description: Will be called when SD driver need to post an ereport. 30684 * 30685 * Context: Kernel thread or interrupt context. 30686 */ 30687 static void 30688 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 30689 { 30690 int uscsi_path_instance = 0; 30691 uchar_t uscsi_pkt_reason; 30692 uint32_t uscsi_pkt_state; 30693 uint32_t uscsi_pkt_statistics; 30694 uint64_t uscsi_ena; 30695 uchar_t op_code; 30696 uint8_t *sensep; 30697 union scsi_cdb *cdbp; 30698 uint_t cdblen = 0; 30699 uint_t senlen = 0; 30700 struct sd_lun *un; 30701 dev_info_t *dip; 30702 char *devid; 30703 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 30704 SSC_FLAGS_INVALID_STATUS | 30705 SSC_FLAGS_INVALID_SENSE | 30706 SSC_FLAGS_INVALID_DATA; 30707 char assessment[16]; 30708 30709 ASSERT(ssc != NULL); 30710 ASSERT(ssc->ssc_uscsi_cmd != NULL); 30711 ASSERT(ssc->ssc_uscsi_info != NULL); 30712 30713 un = ssc->ssc_un; 30714 ASSERT(un != NULL); 30715 30716 dip = un->un_sd->sd_dev; 30717 30718 /* 30719 * Get the devid: 30720 * devid will only be passed to non-transport error reports. 30721 */ 30722 devid = DEVI(dip)->devi_devid_str; 30723 30724 /* 30725 * If we are syncing or dumping, the command will not be executed 30726 * so we bypass this situation. 30727 */ 30728 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 30729 (un->un_state == SD_STATE_DUMPING)) 30730 return; 30731 30732 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 30733 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 30734 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 30735 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 30736 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 30737 30738 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 30739 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 30740 30741 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 30742 if (cdbp == NULL) { 30743 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30744 "sd_ssc_ereport_post meet empty cdb\n"); 30745 return; 30746 } 30747 30748 op_code = cdbp->scc_cmd; 30749 30750 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 30751 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 30752 ssc->ssc_uscsi_cmd->uscsi_rqresid); 30753 30754 if (senlen > 0) 30755 ASSERT(sensep != NULL); 30756 30757 /* 30758 * Initialize drv_assess to corresponding values. 30759 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 30760 * on the sense-key returned back. 30761 */ 30762 switch (drv_assess) { 30763 case SD_FM_DRV_RECOVERY: 30764 (void) sprintf(assessment, "%s", "recovered"); 30765 break; 30766 case SD_FM_DRV_RETRY: 30767 (void) sprintf(assessment, "%s", "retry"); 30768 break; 30769 case SD_FM_DRV_NOTICE: 30770 (void) sprintf(assessment, "%s", "info"); 30771 break; 30772 case SD_FM_DRV_FATAL: 30773 default: 30774 (void) sprintf(assessment, "%s", "unknown"); 30775 } 30776 /* 30777 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30778 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30779 * driver-assessment will always be "recovered" here. 30780 */ 30781 if (drv_assess == SD_FM_DRV_RECOVERY) { 30782 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30783 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30784 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30785 "driver-assessment", DATA_TYPE_STRING, assessment, 30786 "op-code", DATA_TYPE_UINT8, op_code, 30787 "cdb", DATA_TYPE_UINT8_ARRAY, 30788 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30789 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30790 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30791 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30792 NULL); 30793 return; 30794 } 30795 30796 /* 30797 * If there is un-expected/un-decodable data, we should post 30798 * ereport.io.scsi.cmd.disk.dev.uderr. 30799 * driver-assessment will be set based on parameter drv_assess. 30800 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30801 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30802 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30803 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30804 */ 30805 if (ssc->ssc_flags & ssc_invalid_flags) { 30806 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30807 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30808 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30809 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30810 "driver-assessment", DATA_TYPE_STRING, 30811 drv_assess == SD_FM_DRV_FATAL ? 30812 "fail" : assessment, 30813 "op-code", DATA_TYPE_UINT8, op_code, 30814 "cdb", DATA_TYPE_UINT8_ARRAY, 30815 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30816 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30817 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30818 "pkt-stats", DATA_TYPE_UINT32, 30819 uscsi_pkt_statistics, 30820 "stat-code", DATA_TYPE_UINT8, 30821 ssc->ssc_uscsi_cmd->uscsi_status, 30822 "un-decode-info", DATA_TYPE_STRING, 30823 ssc->ssc_info, 30824 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30825 senlen, sensep, 30826 NULL); 30827 } else { 30828 /* 30829 * For other type of invalid data, the 30830 * un-decode-value field would be empty because the 30831 * un-decodable content could be seen from upper 30832 * level payload or inside un-decode-info. 30833 */ 30834 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30835 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30836 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30837 "driver-assessment", DATA_TYPE_STRING, 30838 drv_assess == SD_FM_DRV_FATAL ? 30839 "fail" : assessment, 30840 "op-code", DATA_TYPE_UINT8, op_code, 30841 "cdb", DATA_TYPE_UINT8_ARRAY, 30842 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30843 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30844 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30845 "pkt-stats", DATA_TYPE_UINT32, 30846 uscsi_pkt_statistics, 30847 "stat-code", DATA_TYPE_UINT8, 30848 ssc->ssc_uscsi_cmd->uscsi_status, 30849 "un-decode-info", DATA_TYPE_STRING, 30850 ssc->ssc_info, 30851 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30852 0, NULL, 30853 NULL); 30854 } 30855 ssc->ssc_flags &= ~ssc_invalid_flags; 30856 return; 30857 } 30858 30859 if (uscsi_pkt_reason != CMD_CMPLT || 30860 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30861 /* 30862 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30863 * set inside sd_start_cmds due to errors(bad packet or 30864 * fatal transport error), we should take it as a 30865 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30866 * driver-assessment will be set based on drv_assess. 30867 * We will set devid to NULL because it is a transport 30868 * error. 30869 */ 30870 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30871 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30872 30873 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30874 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30875 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30876 "driver-assessment", DATA_TYPE_STRING, 30877 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30878 "op-code", DATA_TYPE_UINT8, op_code, 30879 "cdb", DATA_TYPE_UINT8_ARRAY, 30880 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30881 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30882 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30883 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30884 NULL); 30885 } else { 30886 /* 30887 * If we got here, we have a completed command, and we need 30888 * to further investigate the sense data to see what kind 30889 * of ereport we should post. 30890 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30891 * if sense-key == 0x3. 30892 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30893 * driver-assessment will be set based on the parameter 30894 * drv_assess. 30895 */ 30896 if (senlen > 0) { 30897 /* 30898 * Here we have sense data available. 30899 */ 30900 uint8_t sense_key; 30901 sense_key = scsi_sense_key(sensep); 30902 if (sense_key == 0x3) { 30903 /* 30904 * sense-key == 0x3(medium error), 30905 * driver-assessment should be "fatal" if 30906 * drv_assess is SD_FM_DRV_FATAL. 30907 */ 30908 scsi_fm_ereport_post(un->un_sd, 30909 uscsi_path_instance, 30910 "cmd.disk.dev.rqs.merr", 30911 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30912 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30913 "driver-assessment", 30914 DATA_TYPE_STRING, 30915 drv_assess == SD_FM_DRV_FATAL ? 30916 "fatal" : assessment, 30917 "op-code", 30918 DATA_TYPE_UINT8, op_code, 30919 "cdb", 30920 DATA_TYPE_UINT8_ARRAY, cdblen, 30921 ssc->ssc_uscsi_cmd->uscsi_cdb, 30922 "pkt-reason", 30923 DATA_TYPE_UINT8, uscsi_pkt_reason, 30924 "pkt-state", 30925 DATA_TYPE_UINT8, uscsi_pkt_state, 30926 "pkt-stats", 30927 DATA_TYPE_UINT32, 30928 uscsi_pkt_statistics, 30929 "stat-code", 30930 DATA_TYPE_UINT8, 30931 ssc->ssc_uscsi_cmd->uscsi_status, 30932 "key", 30933 DATA_TYPE_UINT8, 30934 scsi_sense_key(sensep), 30935 "asc", 30936 DATA_TYPE_UINT8, 30937 scsi_sense_asc(sensep), 30938 "ascq", 30939 DATA_TYPE_UINT8, 30940 scsi_sense_ascq(sensep), 30941 "sense-data", 30942 DATA_TYPE_UINT8_ARRAY, 30943 senlen, sensep, 30944 "lba", 30945 DATA_TYPE_UINT64, 30946 ssc->ssc_uscsi_info->ui_lba, 30947 NULL); 30948 } else { 30949 /* 30950 * if sense-key == 0x4(hardware 30951 * error), driver-assessment should 30952 * be "fatal" if drv_assess is 30953 * SD_FM_DRV_FATAL. 30954 */ 30955 scsi_fm_ereport_post(un->un_sd, 30956 uscsi_path_instance, 30957 "cmd.disk.dev.rqs.derr", 30958 uscsi_ena, devid, DDI_NOSLEEP, 30959 FM_VERSION, 30960 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30961 "driver-assessment", 30962 DATA_TYPE_STRING, 30963 drv_assess == SD_FM_DRV_FATAL ? 30964 (sense_key == 0x4 ? 30965 "fatal" : "fail") : assessment, 30966 "op-code", 30967 DATA_TYPE_UINT8, op_code, 30968 "cdb", 30969 DATA_TYPE_UINT8_ARRAY, cdblen, 30970 ssc->ssc_uscsi_cmd->uscsi_cdb, 30971 "pkt-reason", 30972 DATA_TYPE_UINT8, uscsi_pkt_reason, 30973 "pkt-state", 30974 DATA_TYPE_UINT8, uscsi_pkt_state, 30975 "pkt-stats", 30976 DATA_TYPE_UINT32, 30977 uscsi_pkt_statistics, 30978 "stat-code", 30979 DATA_TYPE_UINT8, 30980 ssc->ssc_uscsi_cmd->uscsi_status, 30981 "key", 30982 DATA_TYPE_UINT8, 30983 scsi_sense_key(sensep), 30984 "asc", 30985 DATA_TYPE_UINT8, 30986 scsi_sense_asc(sensep), 30987 "ascq", 30988 DATA_TYPE_UINT8, 30989 scsi_sense_ascq(sensep), 30990 "sense-data", 30991 DATA_TYPE_UINT8_ARRAY, 30992 senlen, sensep, 30993 NULL); 30994 } 30995 } else { 30996 /* 30997 * For stat_code == STATUS_GOOD, this is not a 30998 * hardware error. 30999 */ 31000 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31001 return; 31002 31003 /* 31004 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31005 * stat-code but with sense data unavailable. 31006 * driver-assessment will be set based on parameter 31007 * drv_assess. 31008 */ 31009 scsi_fm_ereport_post(un->un_sd, 31010 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 31011 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 31012 FM_EREPORT_VERS0, 31013 "driver-assessment", DATA_TYPE_STRING, 31014 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31015 "op-code", DATA_TYPE_UINT8, op_code, 31016 "cdb", 31017 DATA_TYPE_UINT8_ARRAY, 31018 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31019 "pkt-reason", 31020 DATA_TYPE_UINT8, uscsi_pkt_reason, 31021 "pkt-state", 31022 DATA_TYPE_UINT8, uscsi_pkt_state, 31023 "pkt-stats", 31024 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31025 "stat-code", 31026 DATA_TYPE_UINT8, 31027 ssc->ssc_uscsi_cmd->uscsi_status, 31028 NULL); 31029 } 31030 } 31031 } 31032 31033 /* 31034 * Function: sd_ssc_extract_info 31035 * 31036 * Description: Extract information available to help generate ereport. 31037 * 31038 * Context: Kernel thread or interrupt context. 31039 */ 31040 static void 31041 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31042 struct buf *bp, struct sd_xbuf *xp) 31043 { 31044 size_t senlen = 0; 31045 union scsi_cdb *cdbp; 31046 int path_instance; 31047 /* 31048 * Need scsi_cdb_size array to determine the cdb length. 31049 */ 31050 extern uchar_t scsi_cdb_size[]; 31051 31052 ASSERT(un != NULL); 31053 ASSERT(pktp != NULL); 31054 ASSERT(bp != NULL); 31055 ASSERT(xp != NULL); 31056 ASSERT(ssc != NULL); 31057 ASSERT(mutex_owned(SD_MUTEX(un))); 31058 31059 /* 31060 * Transfer the cdb buffer pointer here. 31061 */ 31062 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31063 31064 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31065 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31066 31067 /* 31068 * Transfer the sense data buffer pointer if sense data is available, 31069 * calculate the sense data length first. 31070 */ 31071 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31072 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31073 /* 31074 * For arq case, we will enter here. 31075 */ 31076 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31077 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31078 } else { 31079 senlen = SENSE_LENGTH; 31080 } 31081 } else { 31082 /* 31083 * For non-arq case, we will enter this branch. 31084 */ 31085 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31086 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31087 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31088 } 31089 31090 } 31091 31092 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31093 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31094 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31095 31096 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31097 31098 /* 31099 * Only transfer path_instance when scsi_pkt was properly allocated. 31100 */ 31101 path_instance = pktp->pkt_path_instance; 31102 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31103 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31104 else 31105 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31106 31107 /* 31108 * Copy in the other fields we may need when posting ereport. 31109 */ 31110 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31111 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31112 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31113 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31114 31115 /* 31116 * For partially read/write command, we will not create ena 31117 * in case of a successful command be reconized as recovered. 31118 */ 31119 if ((pktp->pkt_reason == CMD_CMPLT) && 31120 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31121 (senlen == 0)) { 31122 return; 31123 } 31124 31125 /* 31126 * To associate ereports of a single command execution flow, we 31127 * need a shared ena for a specific command. 31128 */ 31129 if (xp->xb_ena == 0) 31130 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31131 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31132 } 31133