1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29 /* 30 * SCSI disk target driver. 31 */ 32 #include <sys/scsi/scsi.h> 33 #include <sys/dkbad.h> 34 #include <sys/dklabel.h> 35 #include <sys/dkio.h> 36 #include <sys/fdio.h> 37 #include <sys/cdio.h> 38 #include <sys/mhd.h> 39 #include <sys/vtoc.h> 40 #include <sys/dktp/fdisk.h> 41 #include <sys/kstat.h> 42 #include <sys/vtrace.h> 43 #include <sys/note.h> 44 #include <sys/thread.h> 45 #include <sys/proc.h> 46 #include <sys/efi_partition.h> 47 #include <sys/var.h> 48 #include <sys/aio_req.h> 49 50 #ifdef __lock_lint 51 #define _LP64 52 #define __amd64 53 #endif 54 55 #if (defined(__fibre)) 56 /* Note: is there a leadville version of the following? */ 57 #include <sys/fc4/fcal_linkapp.h> 58 #endif 59 #include <sys/taskq.h> 60 #include <sys/uuid.h> 61 #include <sys/byteorder.h> 62 #include <sys/sdt.h> 63 64 #include "sd_xbuf.h" 65 66 #include <sys/scsi/targets/sddef.h> 67 #include <sys/cmlb.h> 68 #include <sys/sysevent/eventdefs.h> 69 #include <sys/sysevent/dev.h> 70 71 #include <sys/fm/protocol.h> 72 73 /* 74 * Loadable module info. 75 */ 76 #if (defined(__fibre)) 77 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 78 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 79 #else /* !__fibre */ 80 #define SD_MODULE_NAME "SCSI Disk Driver" 81 char _depends_on[] = "misc/scsi misc/cmlb"; 82 #endif /* !__fibre */ 83 84 /* 85 * Define the interconnect type, to allow the driver to distinguish 86 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 87 * 88 * This is really for backward compatibility. In the future, the driver 89 * should actually check the "interconnect-type" property as reported by 90 * the HBA; however at present this property is not defined by all HBAs, 91 * so we will use this #define (1) to permit the driver to run in 92 * backward-compatibility mode; and (2) to print a notification message 93 * if an FC HBA does not support the "interconnect-type" property. The 94 * behavior of the driver will be to assume parallel SCSI behaviors unless 95 * the "interconnect-type" property is defined by the HBA **AND** has a 96 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 97 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 98 * Channel behaviors (as per the old ssd). (Note that the 99 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 100 * will result in the driver assuming parallel SCSI behaviors.) 101 * 102 * (see common/sys/scsi/impl/services.h) 103 * 104 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 105 * since some FC HBAs may already support that, and there is some code in 106 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 107 * default would confuse that code, and besides things should work fine 108 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 109 * "interconnect_type" property. 110 * 111 */ 112 #if (defined(__fibre)) 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 114 #else 115 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 116 #endif 117 118 /* 119 * The name of the driver, established from the module name in _init. 120 */ 121 static char *sd_label = NULL; 122 123 /* 124 * Driver name is unfortunately prefixed on some driver.conf properties. 125 */ 126 #if (defined(__fibre)) 127 #define sd_max_xfer_size ssd_max_xfer_size 128 #define sd_config_list ssd_config_list 129 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 130 static char *sd_config_list = "ssd-config-list"; 131 #else 132 static char *sd_max_xfer_size = "sd_max_xfer_size"; 133 static char *sd_config_list = "sd-config-list"; 134 #endif 135 136 /* 137 * Driver global variables 138 */ 139 140 #if (defined(__fibre)) 141 /* 142 * These #defines are to avoid namespace collisions that occur because this 143 * code is currently used to compile two separate driver modules: sd and ssd. 144 * All global variables need to be treated this way (even if declared static) 145 * in order to allow the debugger to resolve the names properly. 146 * It is anticipated that in the near future the ssd module will be obsoleted, 147 * at which time this namespace issue should go away. 148 */ 149 #define sd_state ssd_state 150 #define sd_io_time ssd_io_time 151 #define sd_failfast_enable ssd_failfast_enable 152 #define sd_ua_retry_count ssd_ua_retry_count 153 #define sd_report_pfa ssd_report_pfa 154 #define sd_max_throttle ssd_max_throttle 155 #define sd_min_throttle ssd_min_throttle 156 #define sd_rot_delay ssd_rot_delay 157 158 #define sd_retry_on_reservation_conflict \ 159 ssd_retry_on_reservation_conflict 160 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 161 #define sd_resv_conflict_name ssd_resv_conflict_name 162 163 #define sd_component_mask ssd_component_mask 164 #define sd_level_mask ssd_level_mask 165 #define sd_debug_un ssd_debug_un 166 #define sd_error_level ssd_error_level 167 168 #define sd_xbuf_active_limit ssd_xbuf_active_limit 169 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 170 171 #define sd_tr ssd_tr 172 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 173 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 174 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 175 #define sd_check_media_time ssd_check_media_time 176 #define sd_wait_cmds_complete ssd_wait_cmds_complete 177 #define sd_label_mutex ssd_label_mutex 178 #define sd_detach_mutex ssd_detach_mutex 179 #define sd_log_buf ssd_log_buf 180 #define sd_log_mutex ssd_log_mutex 181 182 #define sd_disk_table ssd_disk_table 183 #define sd_disk_table_size ssd_disk_table_size 184 #define sd_sense_mutex ssd_sense_mutex 185 #define sd_cdbtab ssd_cdbtab 186 187 #define sd_cb_ops ssd_cb_ops 188 #define sd_ops ssd_ops 189 #define sd_additional_codes ssd_additional_codes 190 #define sd_tgops ssd_tgops 191 192 #define sd_minor_data ssd_minor_data 193 #define sd_minor_data_efi ssd_minor_data_efi 194 195 #define sd_tq ssd_tq 196 #define sd_wmr_tq ssd_wmr_tq 197 #define sd_taskq_name ssd_taskq_name 198 #define sd_wmr_taskq_name ssd_wmr_taskq_name 199 #define sd_taskq_minalloc ssd_taskq_minalloc 200 #define sd_taskq_maxalloc ssd_taskq_maxalloc 201 202 #define sd_dump_format_string ssd_dump_format_string 203 204 #define sd_iostart_chain ssd_iostart_chain 205 #define sd_iodone_chain ssd_iodone_chain 206 207 #define sd_pm_idletime ssd_pm_idletime 208 209 #define sd_force_pm_supported ssd_force_pm_supported 210 211 #define sd_dtype_optical_bind ssd_dtype_optical_bind 212 213 #define sd_ssc_init ssd_ssc_init 214 #define sd_ssc_send ssd_ssc_send 215 #define sd_ssc_fini ssd_ssc_fini 216 #define sd_ssc_assessment ssd_ssc_assessment 217 #define sd_ssc_post ssd_ssc_post 218 #define sd_ssc_print ssd_ssc_print 219 #define sd_ssc_ereport_post ssd_ssc_ereport_post 220 #define sd_ssc_set_info ssd_ssc_set_info 221 #define sd_ssc_extract_info ssd_ssc_extract_info 222 223 #endif 224 225 #ifdef SDDEBUG 226 int sd_force_pm_supported = 0; 227 #endif /* SDDEBUG */ 228 229 void *sd_state = NULL; 230 int sd_io_time = SD_IO_TIME; 231 int sd_failfast_enable = 1; 232 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 233 int sd_report_pfa = 1; 234 int sd_max_throttle = SD_MAX_THROTTLE; 235 int sd_min_throttle = SD_MIN_THROTTLE; 236 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 237 int sd_qfull_throttle_enable = TRUE; 238 239 int sd_retry_on_reservation_conflict = 1; 240 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 241 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 242 243 static int sd_dtype_optical_bind = -1; 244 245 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 246 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 247 248 /* 249 * Global data for debug logging. To enable debug printing, sd_component_mask 250 * and sd_level_mask should be set to the desired bit patterns as outlined in 251 * sddef.h. 252 */ 253 uint_t sd_component_mask = 0x0; 254 uint_t sd_level_mask = 0x0; 255 struct sd_lun *sd_debug_un = NULL; 256 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 257 258 /* Note: these may go away in the future... */ 259 static uint32_t sd_xbuf_active_limit = 512; 260 static uint32_t sd_xbuf_reserve_limit = 16; 261 262 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 263 264 /* 265 * Timer value used to reset the throttle after it has been reduced 266 * (typically in response to TRAN_BUSY or STATUS_QFULL) 267 */ 268 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 269 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 270 271 /* 272 * Interval value associated with the media change scsi watch. 273 */ 274 static int sd_check_media_time = 3000000; 275 276 /* 277 * Wait value used for in progress operations during a DDI_SUSPEND 278 */ 279 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 280 281 /* 282 * sd_label_mutex protects a static buffer used in the disk label 283 * component of the driver 284 */ 285 static kmutex_t sd_label_mutex; 286 287 /* 288 * sd_detach_mutex protects un_layer_count, un_detach_count, and 289 * un_opens_in_progress in the sd_lun structure. 290 */ 291 static kmutex_t sd_detach_mutex; 292 293 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 294 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 295 296 /* 297 * Global buffer and mutex for debug logging 298 */ 299 static char sd_log_buf[1024]; 300 static kmutex_t sd_log_mutex; 301 302 /* 303 * Structs and globals for recording attached lun information. 304 * This maintains a chain. Each node in the chain represents a SCSI controller. 305 * The structure records the number of luns attached to each target connected 306 * with the controller. 307 * For parallel scsi device only. 308 */ 309 struct sd_scsi_hba_tgt_lun { 310 struct sd_scsi_hba_tgt_lun *next; 311 dev_info_t *pdip; 312 int nlun[NTARGETS_WIDE]; 313 }; 314 315 /* 316 * Flag to indicate the lun is attached or detached 317 */ 318 #define SD_SCSI_LUN_ATTACH 0 319 #define SD_SCSI_LUN_DETACH 1 320 321 static kmutex_t sd_scsi_target_lun_mutex; 322 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 323 324 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 325 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 326 327 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 328 sd_scsi_target_lun_head)) 329 330 /* 331 * "Smart" Probe Caching structs, globals, #defines, etc. 332 * For parallel scsi and non-self-identify device only. 333 */ 334 335 /* 336 * The following resources and routines are implemented to support 337 * "smart" probing, which caches the scsi_probe() results in an array, 338 * in order to help avoid long probe times. 339 */ 340 struct sd_scsi_probe_cache { 341 struct sd_scsi_probe_cache *next; 342 dev_info_t *pdip; 343 int cache[NTARGETS_WIDE]; 344 }; 345 346 static kmutex_t sd_scsi_probe_cache_mutex; 347 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 348 349 /* 350 * Really we only need protection on the head of the linked list, but 351 * better safe than sorry. 352 */ 353 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 354 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 355 356 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 357 sd_scsi_probe_cache_head)) 358 359 /* 360 * Power attribute table 361 */ 362 static sd_power_attr_ss sd_pwr_ss = { 363 { "NAME=spindle-motor", "0=off", "1=on", NULL }, 364 {0, 100}, 365 {30, 0}, 366 {20000, 0} 367 }; 368 369 static sd_power_attr_pc sd_pwr_pc = { 370 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle", 371 "3=active", NULL }, 372 {0, 0, 0, 100}, 373 {90, 90, 20, 0}, 374 {15000, 15000, 1000, 0} 375 }; 376 377 /* 378 * Power level to power condition 379 */ 380 static int sd_pl2pc[] = { 381 SD_TARGET_START_VALID, 382 SD_TARGET_STANDBY, 383 SD_TARGET_IDLE, 384 SD_TARGET_ACTIVE 385 }; 386 387 /* 388 * Vendor specific data name property declarations 389 */ 390 391 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 392 393 static sd_tunables seagate_properties = { 394 SEAGATE_THROTTLE_VALUE, 395 0, 396 0, 397 0, 398 0, 399 0, 400 0, 401 0, 402 0 403 }; 404 405 406 static sd_tunables fujitsu_properties = { 407 FUJITSU_THROTTLE_VALUE, 408 0, 409 0, 410 0, 411 0, 412 0, 413 0, 414 0, 415 0 416 }; 417 418 static sd_tunables ibm_properties = { 419 IBM_THROTTLE_VALUE, 420 0, 421 0, 422 0, 423 0, 424 0, 425 0, 426 0, 427 0 428 }; 429 430 static sd_tunables purple_properties = { 431 PURPLE_THROTTLE_VALUE, 432 0, 433 0, 434 PURPLE_BUSY_RETRIES, 435 PURPLE_RESET_RETRY_COUNT, 436 PURPLE_RESERVE_RELEASE_TIME, 437 0, 438 0, 439 0 440 }; 441 442 static sd_tunables sve_properties = { 443 SVE_THROTTLE_VALUE, 444 0, 445 0, 446 SVE_BUSY_RETRIES, 447 SVE_RESET_RETRY_COUNT, 448 SVE_RESERVE_RELEASE_TIME, 449 SVE_MIN_THROTTLE_VALUE, 450 SVE_DISKSORT_DISABLED_FLAG, 451 0 452 }; 453 454 static sd_tunables maserati_properties = { 455 0, 456 0, 457 0, 458 0, 459 0, 460 0, 461 0, 462 MASERATI_DISKSORT_DISABLED_FLAG, 463 MASERATI_LUN_RESET_ENABLED_FLAG 464 }; 465 466 static sd_tunables pirus_properties = { 467 PIRUS_THROTTLE_VALUE, 468 0, 469 PIRUS_NRR_COUNT, 470 PIRUS_BUSY_RETRIES, 471 PIRUS_RESET_RETRY_COUNT, 472 0, 473 PIRUS_MIN_THROTTLE_VALUE, 474 PIRUS_DISKSORT_DISABLED_FLAG, 475 PIRUS_LUN_RESET_ENABLED_FLAG 476 }; 477 478 #endif 479 480 #if (defined(__sparc) && !defined(__fibre)) || \ 481 (defined(__i386) || defined(__amd64)) 482 483 484 static sd_tunables elite_properties = { 485 ELITE_THROTTLE_VALUE, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0, 492 0, 493 0 494 }; 495 496 static sd_tunables st31200n_properties = { 497 ST31200N_THROTTLE_VALUE, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0, 504 0, 505 0 506 }; 507 508 #endif /* Fibre or not */ 509 510 static sd_tunables lsi_properties_scsi = { 511 LSI_THROTTLE_VALUE, 512 0, 513 LSI_NOTREADY_RETRIES, 514 0, 515 0, 516 0, 517 0, 518 0, 519 0 520 }; 521 522 static sd_tunables symbios_properties = { 523 SYMBIOS_THROTTLE_VALUE, 524 0, 525 SYMBIOS_NOTREADY_RETRIES, 526 0, 527 0, 528 0, 529 0, 530 0, 531 0 532 }; 533 534 static sd_tunables lsi_properties = { 535 0, 536 0, 537 LSI_NOTREADY_RETRIES, 538 0, 539 0, 540 0, 541 0, 542 0, 543 0 544 }; 545 546 static sd_tunables lsi_oem_properties = { 547 0, 548 0, 549 LSI_OEM_NOTREADY_RETRIES, 550 0, 551 0, 552 0, 553 0, 554 0, 555 0, 556 1 557 }; 558 559 560 561 #if (defined(SD_PROP_TST)) 562 563 #define SD_TST_CTYPE_VAL CTYPE_CDROM 564 #define SD_TST_THROTTLE_VAL 16 565 #define SD_TST_NOTREADY_VAL 12 566 #define SD_TST_BUSY_VAL 60 567 #define SD_TST_RST_RETRY_VAL 36 568 #define SD_TST_RSV_REL_TIME 60 569 570 static sd_tunables tst_properties = { 571 SD_TST_THROTTLE_VAL, 572 SD_TST_CTYPE_VAL, 573 SD_TST_NOTREADY_VAL, 574 SD_TST_BUSY_VAL, 575 SD_TST_RST_RETRY_VAL, 576 SD_TST_RSV_REL_TIME, 577 0, 578 0, 579 0 580 }; 581 #endif 582 583 /* This is similar to the ANSI toupper implementation */ 584 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 585 586 /* 587 * Static Driver Configuration Table 588 * 589 * This is the table of disks which need throttle adjustment (or, perhaps 590 * something else as defined by the flags at a future time.) device_id 591 * is a string consisting of concatenated vid (vendor), pid (product/model) 592 * and revision strings as defined in the scsi_inquiry structure. Offsets of 593 * the parts of the string are as defined by the sizes in the scsi_inquiry 594 * structure. Device type is searched as far as the device_id string is 595 * defined. Flags defines which values are to be set in the driver from the 596 * properties list. 597 * 598 * Entries below which begin and end with a "*" are a special case. 599 * These do not have a specific vendor, and the string which follows 600 * can appear anywhere in the 16 byte PID portion of the inquiry data. 601 * 602 * Entries below which begin and end with a " " (blank) are a special 603 * case. The comparison function will treat multiple consecutive blanks 604 * as equivalent to a single blank. For example, this causes a 605 * sd_disk_table entry of " NEC CDROM " to match a device's id string 606 * of "NEC CDROM". 607 * 608 * Note: The MD21 controller type has been obsoleted. 609 * ST318202F is a Legacy device 610 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 611 * made with an FC connection. The entries here are a legacy. 612 */ 613 static sd_disk_config_t sd_disk_table[] = { 614 #if defined(__fibre) || defined(__i386) || defined(__amd64) 615 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 616 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 617 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 618 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 619 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 620 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 621 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 622 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 623 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 624 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 625 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 626 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 627 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 628 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 629 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 630 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 631 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 632 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 633 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 634 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 635 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 636 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 637 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 638 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 639 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 640 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 641 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 642 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 643 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 644 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 645 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 646 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 647 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 648 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 649 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 650 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 651 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 652 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 653 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 654 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 655 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 656 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 657 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 658 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 659 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 660 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 661 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 662 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 663 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 664 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 665 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 666 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 667 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 668 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 669 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 670 { "SUN T3", SD_CONF_BSET_THROTTLE | 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_RSV_REL_TIME, 674 &purple_properties }, 675 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 676 SD_CONF_BSET_BSY_RETRY_COUNT| 677 SD_CONF_BSET_RST_RETRIES| 678 SD_CONF_BSET_RSV_REL_TIME| 679 SD_CONF_BSET_MIN_THROTTLE| 680 SD_CONF_BSET_DISKSORT_DISABLED, 681 &sve_properties }, 682 { "SUN T4", SD_CONF_BSET_THROTTLE | 683 SD_CONF_BSET_BSY_RETRY_COUNT| 684 SD_CONF_BSET_RST_RETRIES| 685 SD_CONF_BSET_RSV_REL_TIME, 686 &purple_properties }, 687 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 688 SD_CONF_BSET_LUN_RESET_ENABLED, 689 &maserati_properties }, 690 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 691 SD_CONF_BSET_NRR_COUNT| 692 SD_CONF_BSET_BSY_RETRY_COUNT| 693 SD_CONF_BSET_RST_RETRIES| 694 SD_CONF_BSET_MIN_THROTTLE| 695 SD_CONF_BSET_DISKSORT_DISABLED| 696 SD_CONF_BSET_LUN_RESET_ENABLED, 697 &pirus_properties }, 698 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 699 SD_CONF_BSET_NRR_COUNT| 700 SD_CONF_BSET_BSY_RETRY_COUNT| 701 SD_CONF_BSET_RST_RETRIES| 702 SD_CONF_BSET_MIN_THROTTLE| 703 SD_CONF_BSET_DISKSORT_DISABLED| 704 SD_CONF_BSET_LUN_RESET_ENABLED, 705 &pirus_properties }, 706 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 707 SD_CONF_BSET_NRR_COUNT| 708 SD_CONF_BSET_BSY_RETRY_COUNT| 709 SD_CONF_BSET_RST_RETRIES| 710 SD_CONF_BSET_MIN_THROTTLE| 711 SD_CONF_BSET_DISKSORT_DISABLED| 712 SD_CONF_BSET_LUN_RESET_ENABLED, 713 &pirus_properties }, 714 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 715 SD_CONF_BSET_NRR_COUNT| 716 SD_CONF_BSET_BSY_RETRY_COUNT| 717 SD_CONF_BSET_RST_RETRIES| 718 SD_CONF_BSET_MIN_THROTTLE| 719 SD_CONF_BSET_DISKSORT_DISABLED| 720 SD_CONF_BSET_LUN_RESET_ENABLED, 721 &pirus_properties }, 722 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 723 SD_CONF_BSET_NRR_COUNT| 724 SD_CONF_BSET_BSY_RETRY_COUNT| 725 SD_CONF_BSET_RST_RETRIES| 726 SD_CONF_BSET_MIN_THROTTLE| 727 SD_CONF_BSET_DISKSORT_DISABLED| 728 SD_CONF_BSET_LUN_RESET_ENABLED, 729 &pirus_properties }, 730 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 731 SD_CONF_BSET_NRR_COUNT| 732 SD_CONF_BSET_BSY_RETRY_COUNT| 733 SD_CONF_BSET_RST_RETRIES| 734 SD_CONF_BSET_MIN_THROTTLE| 735 SD_CONF_BSET_DISKSORT_DISABLED| 736 SD_CONF_BSET_LUN_RESET_ENABLED, 737 &pirus_properties }, 738 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 739 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 740 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 741 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 742 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 743 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 744 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 745 #endif /* fibre or NON-sparc platforms */ 746 #if ((defined(__sparc) && !defined(__fibre)) ||\ 747 (defined(__i386) || defined(__amd64))) 748 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 749 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 750 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 751 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 752 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 753 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 754 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 755 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 756 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 757 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 758 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 759 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 760 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 761 &symbios_properties }, 762 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 763 &lsi_properties_scsi }, 764 #if defined(__i386) || defined(__amd64) 765 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 766 | SD_CONF_BSET_READSUB_BCD 767 | SD_CONF_BSET_READ_TOC_ADDR_BCD 768 | SD_CONF_BSET_NO_READ_HEADER 769 | SD_CONF_BSET_READ_CD_XD4), NULL }, 770 771 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 772 | SD_CONF_BSET_READSUB_BCD 773 | SD_CONF_BSET_READ_TOC_ADDR_BCD 774 | SD_CONF_BSET_NO_READ_HEADER 775 | SD_CONF_BSET_READ_CD_XD4), NULL }, 776 #endif /* __i386 || __amd64 */ 777 #endif /* sparc NON-fibre or NON-sparc platforms */ 778 779 #if (defined(SD_PROP_TST)) 780 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 781 | SD_CONF_BSET_CTYPE 782 | SD_CONF_BSET_NRR_COUNT 783 | SD_CONF_BSET_FAB_DEVID 784 | SD_CONF_BSET_NOCACHE 785 | SD_CONF_BSET_BSY_RETRY_COUNT 786 | SD_CONF_BSET_PLAYMSF_BCD 787 | SD_CONF_BSET_READSUB_BCD 788 | SD_CONF_BSET_READ_TOC_TRK_BCD 789 | SD_CONF_BSET_READ_TOC_ADDR_BCD 790 | SD_CONF_BSET_NO_READ_HEADER 791 | SD_CONF_BSET_READ_CD_XD4 792 | SD_CONF_BSET_RST_RETRIES 793 | SD_CONF_BSET_RSV_REL_TIME 794 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 795 #endif 796 }; 797 798 static const int sd_disk_table_size = 799 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 800 801 /* 802 * Emulation mode disk drive VID/PID table 803 */ 804 static char sd_flash_dev_table[][25] = { 805 "ATA MARVELL SD88SA02", 806 "MARVELL SD88SA02", 807 "TOSHIBA THNSNV05", 808 }; 809 810 static const int sd_flash_dev_table_size = 811 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]); 812 813 #define SD_INTERCONNECT_PARALLEL 0 814 #define SD_INTERCONNECT_FABRIC 1 815 #define SD_INTERCONNECT_FIBRE 2 816 #define SD_INTERCONNECT_SSA 3 817 #define SD_INTERCONNECT_SATA 4 818 #define SD_INTERCONNECT_SAS 5 819 820 #define SD_IS_PARALLEL_SCSI(un) \ 821 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 822 #define SD_IS_SERIAL(un) \ 823 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 824 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 825 826 /* 827 * Definitions used by device id registration routines 828 */ 829 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 830 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 831 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 832 833 static kmutex_t sd_sense_mutex = {0}; 834 835 /* 836 * Macros for updates of the driver state 837 */ 838 #define New_state(un, s) \ 839 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 840 #define Restore_state(un) \ 841 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 842 843 static struct sd_cdbinfo sd_cdbtab[] = { 844 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 845 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 846 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 847 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 848 }; 849 850 /* 851 * Specifies the number of seconds that must have elapsed since the last 852 * cmd. has completed for a device to be declared idle to the PM framework. 853 */ 854 static int sd_pm_idletime = 1; 855 856 /* 857 * Internal function prototypes 858 */ 859 860 #if (defined(__fibre)) 861 /* 862 * These #defines are to avoid namespace collisions that occur because this 863 * code is currently used to compile two separate driver modules: sd and ssd. 864 * All function names need to be treated this way (even if declared static) 865 * in order to allow the debugger to resolve the names properly. 866 * It is anticipated that in the near future the ssd module will be obsoleted, 867 * at which time this ugliness should go away. 868 */ 869 #define sd_log_trace ssd_log_trace 870 #define sd_log_info ssd_log_info 871 #define sd_log_err ssd_log_err 872 #define sdprobe ssdprobe 873 #define sdinfo ssdinfo 874 #define sd_prop_op ssd_prop_op 875 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 876 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 877 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 878 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 879 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 880 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 881 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 882 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 883 #define sd_spin_up_unit ssd_spin_up_unit 884 #define sd_enable_descr_sense ssd_enable_descr_sense 885 #define sd_reenable_dsense_task ssd_reenable_dsense_task 886 #define sd_set_mmc_caps ssd_set_mmc_caps 887 #define sd_read_unit_properties ssd_read_unit_properties 888 #define sd_process_sdconf_file ssd_process_sdconf_file 889 #define sd_process_sdconf_table ssd_process_sdconf_table 890 #define sd_sdconf_id_match ssd_sdconf_id_match 891 #define sd_blank_cmp ssd_blank_cmp 892 #define sd_chk_vers1_data ssd_chk_vers1_data 893 #define sd_set_vers1_properties ssd_set_vers1_properties 894 #define sd_check_solid_state ssd_check_solid_state 895 #define sd_check_emulation_mode ssd_check_emulation_mode 896 897 #define sd_get_physical_geometry ssd_get_physical_geometry 898 #define sd_get_virtual_geometry ssd_get_virtual_geometry 899 #define sd_update_block_info ssd_update_block_info 900 #define sd_register_devid ssd_register_devid 901 #define sd_get_devid ssd_get_devid 902 #define sd_create_devid ssd_create_devid 903 #define sd_write_deviceid ssd_write_deviceid 904 #define sd_check_vpd_page_support ssd_check_vpd_page_support 905 #define sd_setup_pm ssd_setup_pm 906 #define sd_create_pm_components ssd_create_pm_components 907 #define sd_ddi_suspend ssd_ddi_suspend 908 #define sd_ddi_resume ssd_ddi_resume 909 #define sd_pm_state_change ssd_pm_state_change 910 #define sdpower ssdpower 911 #define sdattach ssdattach 912 #define sddetach ssddetach 913 #define sd_unit_attach ssd_unit_attach 914 #define sd_unit_detach ssd_unit_detach 915 #define sd_set_unit_attributes ssd_set_unit_attributes 916 #define sd_create_errstats ssd_create_errstats 917 #define sd_set_errstats ssd_set_errstats 918 #define sd_set_pstats ssd_set_pstats 919 #define sddump ssddump 920 #define sd_scsi_poll ssd_scsi_poll 921 #define sd_send_polled_RQS ssd_send_polled_RQS 922 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 923 #define sd_init_event_callbacks ssd_init_event_callbacks 924 #define sd_event_callback ssd_event_callback 925 #define sd_cache_control ssd_cache_control 926 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 927 #define sd_get_nv_sup ssd_get_nv_sup 928 #define sd_make_device ssd_make_device 929 #define sdopen ssdopen 930 #define sdclose ssdclose 931 #define sd_ready_and_valid ssd_ready_and_valid 932 #define sdmin ssdmin 933 #define sdread ssdread 934 #define sdwrite ssdwrite 935 #define sdaread ssdaread 936 #define sdawrite ssdawrite 937 #define sdstrategy ssdstrategy 938 #define sdioctl ssdioctl 939 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 940 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 941 #define sd_checksum_iostart ssd_checksum_iostart 942 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 943 #define sd_pm_iostart ssd_pm_iostart 944 #define sd_core_iostart ssd_core_iostart 945 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 946 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 947 #define sd_checksum_iodone ssd_checksum_iodone 948 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 949 #define sd_pm_iodone ssd_pm_iodone 950 #define sd_initpkt_for_buf ssd_initpkt_for_buf 951 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 952 #define sd_setup_rw_pkt ssd_setup_rw_pkt 953 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 954 #define sd_buf_iodone ssd_buf_iodone 955 #define sd_uscsi_strategy ssd_uscsi_strategy 956 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 957 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 958 #define sd_uscsi_iodone ssd_uscsi_iodone 959 #define sd_xbuf_strategy ssd_xbuf_strategy 960 #define sd_xbuf_init ssd_xbuf_init 961 #define sd_pm_entry ssd_pm_entry 962 #define sd_pm_exit ssd_pm_exit 963 964 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 965 #define sd_pm_timeout_handler ssd_pm_timeout_handler 966 967 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 968 #define sdintr ssdintr 969 #define sd_start_cmds ssd_start_cmds 970 #define sd_send_scsi_cmd ssd_send_scsi_cmd 971 #define sd_bioclone_alloc ssd_bioclone_alloc 972 #define sd_bioclone_free ssd_bioclone_free 973 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 974 #define sd_shadow_buf_free ssd_shadow_buf_free 975 #define sd_print_transport_rejected_message \ 976 ssd_print_transport_rejected_message 977 #define sd_retry_command ssd_retry_command 978 #define sd_set_retry_bp ssd_set_retry_bp 979 #define sd_send_request_sense_command ssd_send_request_sense_command 980 #define sd_start_retry_command ssd_start_retry_command 981 #define sd_start_direct_priority_command \ 982 ssd_start_direct_priority_command 983 #define sd_return_failed_command ssd_return_failed_command 984 #define sd_return_failed_command_no_restart \ 985 ssd_return_failed_command_no_restart 986 #define sd_return_command ssd_return_command 987 #define sd_sync_with_callback ssd_sync_with_callback 988 #define sdrunout ssdrunout 989 #define sd_mark_rqs_busy ssd_mark_rqs_busy 990 #define sd_mark_rqs_idle ssd_mark_rqs_idle 991 #define sd_reduce_throttle ssd_reduce_throttle 992 #define sd_restore_throttle ssd_restore_throttle 993 #define sd_print_incomplete_msg ssd_print_incomplete_msg 994 #define sd_init_cdb_limits ssd_init_cdb_limits 995 #define sd_pkt_status_good ssd_pkt_status_good 996 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 997 #define sd_pkt_status_busy ssd_pkt_status_busy 998 #define sd_pkt_status_reservation_conflict \ 999 ssd_pkt_status_reservation_conflict 1000 #define sd_pkt_status_qfull ssd_pkt_status_qfull 1001 #define sd_handle_request_sense ssd_handle_request_sense 1002 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 1003 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 1004 #define sd_validate_sense_data ssd_validate_sense_data 1005 #define sd_decode_sense ssd_decode_sense 1006 #define sd_print_sense_msg ssd_print_sense_msg 1007 #define sd_sense_key_no_sense ssd_sense_key_no_sense 1008 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 1009 #define sd_sense_key_not_ready ssd_sense_key_not_ready 1010 #define sd_sense_key_medium_or_hardware_error \ 1011 ssd_sense_key_medium_or_hardware_error 1012 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 1013 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 1014 #define sd_sense_key_fail_command ssd_sense_key_fail_command 1015 #define sd_sense_key_blank_check ssd_sense_key_blank_check 1016 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 1017 #define sd_sense_key_default ssd_sense_key_default 1018 #define sd_print_retry_msg ssd_print_retry_msg 1019 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 1020 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 1021 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 1022 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 1023 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 1024 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 1025 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 1026 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 1027 #define sd_pkt_reason_default ssd_pkt_reason_default 1028 #define sd_reset_target ssd_reset_target 1029 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 1030 #define sd_start_stop_unit_task ssd_start_stop_unit_task 1031 #define sd_taskq_create ssd_taskq_create 1032 #define sd_taskq_delete ssd_taskq_delete 1033 #define sd_target_change_task ssd_target_change_task 1034 #define sd_log_dev_status_event ssd_log_dev_status_event 1035 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 1036 #define sd_log_eject_request_event ssd_log_eject_request_event 1037 #define sd_media_change_task ssd_media_change_task 1038 #define sd_handle_mchange ssd_handle_mchange 1039 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 1040 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 1041 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1042 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1043 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1044 sd_send_scsi_feature_GET_CONFIGURATION 1045 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1046 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1047 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1048 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1049 ssd_send_scsi_PERSISTENT_RESERVE_IN 1050 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1051 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1052 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1053 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1054 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1055 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1056 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1057 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1058 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1059 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \ 1060 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 1061 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid 1062 #define sd_alloc_rqs ssd_alloc_rqs 1063 #define sd_free_rqs ssd_free_rqs 1064 #define sd_dump_memory ssd_dump_memory 1065 #define sd_get_media_info_com ssd_get_media_info_com 1066 #define sd_get_media_info ssd_get_media_info 1067 #define sd_get_media_info_ext ssd_get_media_info_ext 1068 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1069 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1070 #define sd_strtok_r ssd_strtok_r 1071 #define sd_set_properties ssd_set_properties 1072 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1073 #define sd_setup_next_xfer ssd_setup_next_xfer 1074 #define sd_dkio_get_temp ssd_dkio_get_temp 1075 #define sd_check_mhd ssd_check_mhd 1076 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1077 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1078 #define sd_sname ssd_sname 1079 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1080 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1081 #define sd_take_ownership ssd_take_ownership 1082 #define sd_reserve_release ssd_reserve_release 1083 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1084 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1085 #define sd_persistent_reservation_in_read_keys \ 1086 ssd_persistent_reservation_in_read_keys 1087 #define sd_persistent_reservation_in_read_resv \ 1088 ssd_persistent_reservation_in_read_resv 1089 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1090 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1091 #define sd_mhdioc_release ssd_mhdioc_release 1092 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1093 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1094 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1095 #define sr_change_blkmode ssr_change_blkmode 1096 #define sr_change_speed ssr_change_speed 1097 #define sr_atapi_change_speed ssr_atapi_change_speed 1098 #define sr_pause_resume ssr_pause_resume 1099 #define sr_play_msf ssr_play_msf 1100 #define sr_play_trkind ssr_play_trkind 1101 #define sr_read_all_subcodes ssr_read_all_subcodes 1102 #define sr_read_subchannel ssr_read_subchannel 1103 #define sr_read_tocentry ssr_read_tocentry 1104 #define sr_read_tochdr ssr_read_tochdr 1105 #define sr_read_cdda ssr_read_cdda 1106 #define sr_read_cdxa ssr_read_cdxa 1107 #define sr_read_mode1 ssr_read_mode1 1108 #define sr_read_mode2 ssr_read_mode2 1109 #define sr_read_cd_mode2 ssr_read_cd_mode2 1110 #define sr_sector_mode ssr_sector_mode 1111 #define sr_eject ssr_eject 1112 #define sr_ejected ssr_ejected 1113 #define sr_check_wp ssr_check_wp 1114 #define sd_watch_request_submit ssd_watch_request_submit 1115 #define sd_check_media ssd_check_media 1116 #define sd_media_watch_cb ssd_media_watch_cb 1117 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1118 #define sr_volume_ctrl ssr_volume_ctrl 1119 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1120 #define sd_log_page_supported ssd_log_page_supported 1121 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1122 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1123 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1124 #define sd_range_lock ssd_range_lock 1125 #define sd_get_range ssd_get_range 1126 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1127 #define sd_range_unlock ssd_range_unlock 1128 #define sd_read_modify_write_task ssd_read_modify_write_task 1129 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1130 1131 #define sd_iostart_chain ssd_iostart_chain 1132 #define sd_iodone_chain ssd_iodone_chain 1133 #define sd_initpkt_map ssd_initpkt_map 1134 #define sd_destroypkt_map ssd_destroypkt_map 1135 #define sd_chain_type_map ssd_chain_type_map 1136 #define sd_chain_index_map ssd_chain_index_map 1137 1138 #define sd_failfast_flushctl ssd_failfast_flushctl 1139 #define sd_failfast_flushq ssd_failfast_flushq 1140 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1141 1142 #define sd_is_lsi ssd_is_lsi 1143 #define sd_tg_rdwr ssd_tg_rdwr 1144 #define sd_tg_getinfo ssd_tg_getinfo 1145 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1146 1147 #endif /* #if (defined(__fibre)) */ 1148 1149 1150 int _init(void); 1151 int _fini(void); 1152 int _info(struct modinfo *modinfop); 1153 1154 /*PRINTFLIKE3*/ 1155 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1156 /*PRINTFLIKE3*/ 1157 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1158 /*PRINTFLIKE3*/ 1159 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1160 1161 static int sdprobe(dev_info_t *devi); 1162 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1163 void **result); 1164 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1165 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1166 1167 /* 1168 * Smart probe for parallel scsi 1169 */ 1170 static void sd_scsi_probe_cache_init(void); 1171 static void sd_scsi_probe_cache_fini(void); 1172 static void sd_scsi_clear_probe_cache(void); 1173 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1174 1175 /* 1176 * Attached luns on target for parallel scsi 1177 */ 1178 static void sd_scsi_target_lun_init(void); 1179 static void sd_scsi_target_lun_fini(void); 1180 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1181 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1182 1183 static int sd_spin_up_unit(sd_ssc_t *ssc); 1184 1185 /* 1186 * Using sd_ssc_init to establish sd_ssc_t struct 1187 * Using sd_ssc_send to send uscsi internal command 1188 * Using sd_ssc_fini to free sd_ssc_t struct 1189 */ 1190 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1191 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1192 int flag, enum uio_seg dataspace, int path_flag); 1193 static void sd_ssc_fini(sd_ssc_t *ssc); 1194 1195 /* 1196 * Using sd_ssc_assessment to set correct type-of-assessment 1197 * Using sd_ssc_post to post ereport & system log 1198 * sd_ssc_post will call sd_ssc_print to print system log 1199 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1200 */ 1201 static void sd_ssc_assessment(sd_ssc_t *ssc, 1202 enum sd_type_assessment tp_assess); 1203 1204 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1205 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1206 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1207 enum sd_driver_assessment drv_assess); 1208 1209 /* 1210 * Using sd_ssc_set_info to mark an un-decodable-data error. 1211 * Using sd_ssc_extract_info to transfer information from internal 1212 * data structures to sd_ssc_t. 1213 */ 1214 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1215 const char *fmt, ...); 1216 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1217 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1218 1219 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1220 enum uio_seg dataspace, int path_flag); 1221 1222 #ifdef _LP64 1223 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1224 static void sd_reenable_dsense_task(void *arg); 1225 #endif /* _LP64 */ 1226 1227 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1228 1229 static void sd_read_unit_properties(struct sd_lun *un); 1230 static int sd_process_sdconf_file(struct sd_lun *un); 1231 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1232 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1233 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1234 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1235 int *data_list, sd_tunables *values); 1236 static void sd_process_sdconf_table(struct sd_lun *un); 1237 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1238 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1239 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1240 int list_len, char *dataname_ptr); 1241 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1242 sd_tunables *prop_list); 1243 1244 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1245 int reservation_flag); 1246 static int sd_get_devid(sd_ssc_t *ssc); 1247 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1248 static int sd_write_deviceid(sd_ssc_t *ssc); 1249 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1250 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1251 1252 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1253 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1254 1255 static int sd_ddi_suspend(dev_info_t *devi); 1256 static int sd_ddi_resume(dev_info_t *devi); 1257 static int sd_pm_state_change(struct sd_lun *un, int level, int flag); 1258 static int sdpower(dev_info_t *devi, int component, int level); 1259 1260 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1261 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1262 static int sd_unit_attach(dev_info_t *devi); 1263 static int sd_unit_detach(dev_info_t *devi); 1264 1265 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1266 static void sd_create_errstats(struct sd_lun *un, int instance); 1267 static void sd_set_errstats(struct sd_lun *un); 1268 static void sd_set_pstats(struct sd_lun *un); 1269 1270 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1271 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1272 static int sd_send_polled_RQS(struct sd_lun *un); 1273 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1274 1275 #if (defined(__fibre)) 1276 /* 1277 * Event callbacks (photon) 1278 */ 1279 static void sd_init_event_callbacks(struct sd_lun *un); 1280 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1281 #endif 1282 1283 /* 1284 * Defines for sd_cache_control 1285 */ 1286 1287 #define SD_CACHE_ENABLE 1 1288 #define SD_CACHE_DISABLE 0 1289 #define SD_CACHE_NOCHANGE -1 1290 1291 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1292 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1293 static void sd_get_nv_sup(sd_ssc_t *ssc); 1294 static dev_t sd_make_device(dev_info_t *devi); 1295 static void sd_check_solid_state(sd_ssc_t *ssc); 1296 static void sd_check_emulation_mode(sd_ssc_t *ssc); 1297 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1298 uint64_t capacity); 1299 1300 /* 1301 * Driver entry point functions. 1302 */ 1303 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1304 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1305 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1306 1307 static void sdmin(struct buf *bp); 1308 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1309 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1310 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1311 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1312 1313 static int sdstrategy(struct buf *bp); 1314 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1315 1316 /* 1317 * Function prototypes for layering functions in the iostart chain. 1318 */ 1319 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1320 struct buf *bp); 1321 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1322 struct buf *bp); 1323 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1324 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1325 struct buf *bp); 1326 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1327 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1328 1329 /* 1330 * Function prototypes for layering functions in the iodone chain. 1331 */ 1332 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1333 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1334 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1335 struct buf *bp); 1336 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1337 struct buf *bp); 1338 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1339 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1340 struct buf *bp); 1341 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1342 1343 /* 1344 * Prototypes for functions to support buf(9S) based IO. 1345 */ 1346 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1347 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1348 static void sd_destroypkt_for_buf(struct buf *); 1349 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1350 struct buf *bp, int flags, 1351 int (*callback)(caddr_t), caddr_t callback_arg, 1352 diskaddr_t lba, uint32_t blockcount); 1353 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1354 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1355 1356 /* 1357 * Prototypes for functions to support USCSI IO. 1358 */ 1359 static int sd_uscsi_strategy(struct buf *bp); 1360 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1361 static void sd_destroypkt_for_uscsi(struct buf *); 1362 1363 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1364 uchar_t chain_type, void *pktinfop); 1365 1366 static int sd_pm_entry(struct sd_lun *un); 1367 static void sd_pm_exit(struct sd_lun *un); 1368 1369 static void sd_pm_idletimeout_handler(void *arg); 1370 1371 /* 1372 * sd_core internal functions (used at the sd_core_io layer). 1373 */ 1374 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1375 static void sdintr(struct scsi_pkt *pktp); 1376 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1377 1378 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1379 enum uio_seg dataspace, int path_flag); 1380 1381 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1382 daddr_t blkno, int (*func)(struct buf *)); 1383 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1384 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1385 static void sd_bioclone_free(struct buf *bp); 1386 static void sd_shadow_buf_free(struct buf *bp); 1387 1388 static void sd_print_transport_rejected_message(struct sd_lun *un, 1389 struct sd_xbuf *xp, int code); 1390 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1391 void *arg, int code); 1392 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1393 void *arg, int code); 1394 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1395 void *arg, int code); 1396 1397 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1398 int retry_check_flag, 1399 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1400 int c), 1401 void *user_arg, int failure_code, clock_t retry_delay, 1402 void (*statp)(kstat_io_t *)); 1403 1404 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1405 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1406 1407 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1408 struct scsi_pkt *pktp); 1409 static void sd_start_retry_command(void *arg); 1410 static void sd_start_direct_priority_command(void *arg); 1411 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1412 int errcode); 1413 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1414 struct buf *bp, int errcode); 1415 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1416 static void sd_sync_with_callback(struct sd_lun *un); 1417 static int sdrunout(caddr_t arg); 1418 1419 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1420 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1421 1422 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1423 static void sd_restore_throttle(void *arg); 1424 1425 static void sd_init_cdb_limits(struct sd_lun *un); 1426 1427 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1428 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1429 1430 /* 1431 * Error handling functions 1432 */ 1433 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1434 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1435 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1436 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1437 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1438 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1439 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1441 1442 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1447 struct sd_xbuf *xp, size_t actual_len); 1448 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1449 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1450 1451 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1452 void *arg, int code); 1453 1454 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1455 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1456 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1457 uint8_t *sense_datap, 1458 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1459 static void sd_sense_key_not_ready(struct sd_lun *un, 1460 uint8_t *sense_datap, 1461 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1462 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1463 uint8_t *sense_datap, 1464 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1465 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1466 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1467 static void sd_sense_key_unit_attention(struct sd_lun *un, 1468 uint8_t *sense_datap, 1469 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1470 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1471 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1472 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1473 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1474 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1475 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1476 static void sd_sense_key_default(struct sd_lun *un, 1477 uint8_t *sense_datap, 1478 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1479 1480 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1481 void *arg, int flag); 1482 1483 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1484 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1485 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1486 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1487 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1488 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1489 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1490 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1491 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1492 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1493 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1494 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1495 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1496 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1497 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1498 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1499 1500 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1501 1502 static void sd_start_stop_unit_callback(void *arg); 1503 static void sd_start_stop_unit_task(void *arg); 1504 1505 static void sd_taskq_create(void); 1506 static void sd_taskq_delete(void); 1507 static void sd_target_change_task(void *arg); 1508 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag); 1509 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1510 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag); 1511 static void sd_media_change_task(void *arg); 1512 1513 static int sd_handle_mchange(struct sd_lun *un); 1514 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1515 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1516 uint32_t *lbap, int path_flag); 1517 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1518 uint32_t *lbap, uint32_t *psp, int path_flag); 1519 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, 1520 int flag, int path_flag); 1521 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1522 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1523 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1524 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1525 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1526 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1527 uchar_t usr_cmd, uchar_t *usr_bufp); 1528 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1529 struct dk_callback *dkc); 1530 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1531 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1532 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1533 uchar_t *bufaddr, uint_t buflen, int path_flag); 1534 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1535 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1536 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1537 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1538 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1539 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1540 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1541 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1542 size_t buflen, daddr_t start_block, int path_flag); 1543 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1544 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1545 path_flag) 1546 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1547 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1548 path_flag) 1549 1550 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1551 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1552 uint16_t param_ptr, int path_flag); 1553 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, 1554 uchar_t *bufaddr, size_t buflen, uchar_t class_req); 1555 static boolean_t sd_gesn_media_data_valid(uchar_t *data); 1556 1557 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1558 static void sd_free_rqs(struct sd_lun *un); 1559 1560 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1561 uchar_t *data, int len, int fmt); 1562 static void sd_panic_for_res_conflict(struct sd_lun *un); 1563 1564 /* 1565 * Disk Ioctl Function Prototypes 1566 */ 1567 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1568 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1569 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1570 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1571 1572 /* 1573 * Multi-host Ioctl Prototypes 1574 */ 1575 static int sd_check_mhd(dev_t dev, int interval); 1576 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1577 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1578 static char *sd_sname(uchar_t status); 1579 static void sd_mhd_resvd_recover(void *arg); 1580 static void sd_resv_reclaim_thread(); 1581 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1582 static int sd_reserve_release(dev_t dev, int cmd); 1583 static void sd_rmv_resv_reclaim_req(dev_t dev); 1584 static void sd_mhd_reset_notify_cb(caddr_t arg); 1585 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1586 mhioc_inkeys_t *usrp, int flag); 1587 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1588 mhioc_inresvs_t *usrp, int flag); 1589 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1590 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1591 static int sd_mhdioc_release(dev_t dev); 1592 static int sd_mhdioc_register_devid(dev_t dev); 1593 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1594 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1595 1596 /* 1597 * SCSI removable prototypes 1598 */ 1599 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1600 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1601 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1602 static int sr_pause_resume(dev_t dev, int mode); 1603 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1604 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1605 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1606 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1607 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1608 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1609 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1610 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1611 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1612 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1613 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1614 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1615 static int sr_eject(dev_t dev); 1616 static void sr_ejected(register struct sd_lun *un); 1617 static int sr_check_wp(dev_t dev); 1618 static opaque_t sd_watch_request_submit(struct sd_lun *un); 1619 static int sd_check_media(dev_t dev, enum dkio_state state); 1620 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1621 static void sd_delayed_cv_broadcast(void *arg); 1622 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1623 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1624 1625 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1626 1627 /* 1628 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1629 */ 1630 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1631 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1632 static void sd_wm_cache_destructor(void *wm, void *un); 1633 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1634 daddr_t endb, ushort_t typ); 1635 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1636 daddr_t endb); 1637 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1638 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1639 static void sd_read_modify_write_task(void * arg); 1640 static int 1641 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1642 struct buf **bpp); 1643 1644 1645 /* 1646 * Function prototypes for failfast support. 1647 */ 1648 static void sd_failfast_flushq(struct sd_lun *un); 1649 static int sd_failfast_flushq_callback(struct buf *bp); 1650 1651 /* 1652 * Function prototypes to check for lsi devices 1653 */ 1654 static void sd_is_lsi(struct sd_lun *un); 1655 1656 /* 1657 * Function prototypes for partial DMA support 1658 */ 1659 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1660 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1661 1662 1663 /* Function prototypes for cmlb */ 1664 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1665 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1666 1667 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1668 1669 /* 1670 * For printing RMW warning message timely 1671 */ 1672 static void sd_rmw_msg_print_handler(void *arg); 1673 1674 /* 1675 * Constants for failfast support: 1676 * 1677 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1678 * failfast processing being performed. 1679 * 1680 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1681 * failfast processing on all bufs with B_FAILFAST set. 1682 */ 1683 1684 #define SD_FAILFAST_INACTIVE 0 1685 #define SD_FAILFAST_ACTIVE 1 1686 1687 /* 1688 * Bitmask to control behavior of buf(9S) flushes when a transition to 1689 * the failfast state occurs. Optional bits include: 1690 * 1691 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1692 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1693 * be flushed. 1694 * 1695 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1696 * driver, in addition to the regular wait queue. This includes the xbuf 1697 * queues. When clear, only the driver's wait queue will be flushed. 1698 */ 1699 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1700 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1701 1702 /* 1703 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1704 * to flush all queues within the driver. 1705 */ 1706 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1707 1708 1709 /* 1710 * SD Testing Fault Injection 1711 */ 1712 #ifdef SD_FAULT_INJECTION 1713 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1714 static void sd_faultinjection(struct scsi_pkt *pktp); 1715 static void sd_injection_log(char *buf, struct sd_lun *un); 1716 #endif 1717 1718 /* 1719 * Device driver ops vector 1720 */ 1721 static struct cb_ops sd_cb_ops = { 1722 sdopen, /* open */ 1723 sdclose, /* close */ 1724 sdstrategy, /* strategy */ 1725 nodev, /* print */ 1726 sddump, /* dump */ 1727 sdread, /* read */ 1728 sdwrite, /* write */ 1729 sdioctl, /* ioctl */ 1730 nodev, /* devmap */ 1731 nodev, /* mmap */ 1732 nodev, /* segmap */ 1733 nochpoll, /* poll */ 1734 sd_prop_op, /* cb_prop_op */ 1735 0, /* streamtab */ 1736 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1737 CB_REV, /* cb_rev */ 1738 sdaread, /* async I/O read entry point */ 1739 sdawrite /* async I/O write entry point */ 1740 }; 1741 1742 struct dev_ops sd_ops = { 1743 DEVO_REV, /* devo_rev, */ 1744 0, /* refcnt */ 1745 sdinfo, /* info */ 1746 nulldev, /* identify */ 1747 sdprobe, /* probe */ 1748 sdattach, /* attach */ 1749 sddetach, /* detach */ 1750 nodev, /* reset */ 1751 &sd_cb_ops, /* driver operations */ 1752 NULL, /* bus operations */ 1753 sdpower, /* power */ 1754 ddi_quiesce_not_needed, /* quiesce */ 1755 }; 1756 1757 /* 1758 * This is the loadable module wrapper. 1759 */ 1760 #include <sys/modctl.h> 1761 1762 #ifndef XPV_HVM_DRIVER 1763 static struct modldrv modldrv = { 1764 &mod_driverops, /* Type of module. This one is a driver */ 1765 SD_MODULE_NAME, /* Module name. */ 1766 &sd_ops /* driver ops */ 1767 }; 1768 1769 static struct modlinkage modlinkage = { 1770 MODREV_1, &modldrv, NULL 1771 }; 1772 1773 #else /* XPV_HVM_DRIVER */ 1774 static struct modlmisc modlmisc = { 1775 &mod_miscops, /* Type of module. This one is a misc */ 1776 "HVM " SD_MODULE_NAME, /* Module name. */ 1777 }; 1778 1779 static struct modlinkage modlinkage = { 1780 MODREV_1, &modlmisc, NULL 1781 }; 1782 1783 #endif /* XPV_HVM_DRIVER */ 1784 1785 static cmlb_tg_ops_t sd_tgops = { 1786 TG_DK_OPS_VERSION_1, 1787 sd_tg_rdwr, 1788 sd_tg_getinfo 1789 }; 1790 1791 static struct scsi_asq_key_strings sd_additional_codes[] = { 1792 0x81, 0, "Logical Unit is Reserved", 1793 0x85, 0, "Audio Address Not Valid", 1794 0xb6, 0, "Media Load Mechanism Failed", 1795 0xB9, 0, "Audio Play Operation Aborted", 1796 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1797 0x53, 2, "Medium removal prevented", 1798 0x6f, 0, "Authentication failed during key exchange", 1799 0x6f, 1, "Key not present", 1800 0x6f, 2, "Key not established", 1801 0x6f, 3, "Read without proper authentication", 1802 0x6f, 4, "Mismatched region to this logical unit", 1803 0x6f, 5, "Region reset count error", 1804 0xffff, 0x0, NULL 1805 }; 1806 1807 1808 /* 1809 * Struct for passing printing information for sense data messages 1810 */ 1811 struct sd_sense_info { 1812 int ssi_severity; 1813 int ssi_pfa_flag; 1814 }; 1815 1816 /* 1817 * Table of function pointers for iostart-side routines. Separate "chains" 1818 * of layered function calls are formed by placing the function pointers 1819 * sequentially in the desired order. Functions are called according to an 1820 * incrementing table index ordering. The last function in each chain must 1821 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1822 * in the sd_iodone_chain[] array. 1823 * 1824 * Note: It may seem more natural to organize both the iostart and iodone 1825 * functions together, into an array of structures (or some similar 1826 * organization) with a common index, rather than two separate arrays which 1827 * must be maintained in synchronization. The purpose of this division is 1828 * to achieve improved performance: individual arrays allows for more 1829 * effective cache line utilization on certain platforms. 1830 */ 1831 1832 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1833 1834 1835 static sd_chain_t sd_iostart_chain[] = { 1836 1837 /* Chain for buf IO for disk drive targets (PM enabled) */ 1838 sd_mapblockaddr_iostart, /* Index: 0 */ 1839 sd_pm_iostart, /* Index: 1 */ 1840 sd_core_iostart, /* Index: 2 */ 1841 1842 /* Chain for buf IO for disk drive targets (PM disabled) */ 1843 sd_mapblockaddr_iostart, /* Index: 3 */ 1844 sd_core_iostart, /* Index: 4 */ 1845 1846 /* 1847 * Chain for buf IO for removable-media or large sector size 1848 * disk drive targets with RMW needed (PM enabled) 1849 */ 1850 sd_mapblockaddr_iostart, /* Index: 5 */ 1851 sd_mapblocksize_iostart, /* Index: 6 */ 1852 sd_pm_iostart, /* Index: 7 */ 1853 sd_core_iostart, /* Index: 8 */ 1854 1855 /* 1856 * Chain for buf IO for removable-media or large sector size 1857 * disk drive targets with RMW needed (PM disabled) 1858 */ 1859 sd_mapblockaddr_iostart, /* Index: 9 */ 1860 sd_mapblocksize_iostart, /* Index: 10 */ 1861 sd_core_iostart, /* Index: 11 */ 1862 1863 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1864 sd_mapblockaddr_iostart, /* Index: 12 */ 1865 sd_checksum_iostart, /* Index: 13 */ 1866 sd_pm_iostart, /* Index: 14 */ 1867 sd_core_iostart, /* Index: 15 */ 1868 1869 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1870 sd_mapblockaddr_iostart, /* Index: 16 */ 1871 sd_checksum_iostart, /* Index: 17 */ 1872 sd_core_iostart, /* Index: 18 */ 1873 1874 /* Chain for USCSI commands (all targets) */ 1875 sd_pm_iostart, /* Index: 19 */ 1876 sd_core_iostart, /* Index: 20 */ 1877 1878 /* Chain for checksumming USCSI commands (all targets) */ 1879 sd_checksum_uscsi_iostart, /* Index: 21 */ 1880 sd_pm_iostart, /* Index: 22 */ 1881 sd_core_iostart, /* Index: 23 */ 1882 1883 /* Chain for "direct" USCSI commands (all targets) */ 1884 sd_core_iostart, /* Index: 24 */ 1885 1886 /* Chain for "direct priority" USCSI commands (all targets) */ 1887 sd_core_iostart, /* Index: 25 */ 1888 1889 /* 1890 * Chain for buf IO for large sector size disk drive targets 1891 * with RMW needed with checksumming (PM enabled) 1892 */ 1893 sd_mapblockaddr_iostart, /* Index: 26 */ 1894 sd_mapblocksize_iostart, /* Index: 27 */ 1895 sd_checksum_iostart, /* Index: 28 */ 1896 sd_pm_iostart, /* Index: 29 */ 1897 sd_core_iostart, /* Index: 30 */ 1898 1899 /* 1900 * Chain for buf IO for large sector size disk drive targets 1901 * with RMW needed with checksumming (PM disabled) 1902 */ 1903 sd_mapblockaddr_iostart, /* Index: 31 */ 1904 sd_mapblocksize_iostart, /* Index: 32 */ 1905 sd_checksum_iostart, /* Index: 33 */ 1906 sd_core_iostart, /* Index: 34 */ 1907 1908 }; 1909 1910 /* 1911 * Macros to locate the first function of each iostart chain in the 1912 * sd_iostart_chain[] array. These are located by the index in the array. 1913 */ 1914 #define SD_CHAIN_DISK_IOSTART 0 1915 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1916 #define SD_CHAIN_MSS_DISK_IOSTART 5 1917 #define SD_CHAIN_RMMEDIA_IOSTART 5 1918 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1919 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1920 #define SD_CHAIN_CHKSUM_IOSTART 12 1921 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1922 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1923 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1924 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1925 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1926 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1927 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1928 1929 1930 /* 1931 * Table of function pointers for the iodone-side routines for the driver- 1932 * internal layering mechanism. The calling sequence for iodone routines 1933 * uses a decrementing table index, so the last routine called in a chain 1934 * must be at the lowest array index location for that chain. The last 1935 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1936 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1937 * of the functions in an iodone side chain must correspond to the ordering 1938 * of the iostart routines for that chain. Note that there is no iodone 1939 * side routine that corresponds to sd_core_iostart(), so there is no 1940 * entry in the table for this. 1941 */ 1942 1943 static sd_chain_t sd_iodone_chain[] = { 1944 1945 /* Chain for buf IO for disk drive targets (PM enabled) */ 1946 sd_buf_iodone, /* Index: 0 */ 1947 sd_mapblockaddr_iodone, /* Index: 1 */ 1948 sd_pm_iodone, /* Index: 2 */ 1949 1950 /* Chain for buf IO for disk drive targets (PM disabled) */ 1951 sd_buf_iodone, /* Index: 3 */ 1952 sd_mapblockaddr_iodone, /* Index: 4 */ 1953 1954 /* 1955 * Chain for buf IO for removable-media or large sector size 1956 * disk drive targets with RMW needed (PM enabled) 1957 */ 1958 sd_buf_iodone, /* Index: 5 */ 1959 sd_mapblockaddr_iodone, /* Index: 6 */ 1960 sd_mapblocksize_iodone, /* Index: 7 */ 1961 sd_pm_iodone, /* Index: 8 */ 1962 1963 /* 1964 * Chain for buf IO for removable-media or large sector size 1965 * disk drive targets with RMW needed (PM disabled) 1966 */ 1967 sd_buf_iodone, /* Index: 9 */ 1968 sd_mapblockaddr_iodone, /* Index: 10 */ 1969 sd_mapblocksize_iodone, /* Index: 11 */ 1970 1971 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1972 sd_buf_iodone, /* Index: 12 */ 1973 sd_mapblockaddr_iodone, /* Index: 13 */ 1974 sd_checksum_iodone, /* Index: 14 */ 1975 sd_pm_iodone, /* Index: 15 */ 1976 1977 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1978 sd_buf_iodone, /* Index: 16 */ 1979 sd_mapblockaddr_iodone, /* Index: 17 */ 1980 sd_checksum_iodone, /* Index: 18 */ 1981 1982 /* Chain for USCSI commands (non-checksum targets) */ 1983 sd_uscsi_iodone, /* Index: 19 */ 1984 sd_pm_iodone, /* Index: 20 */ 1985 1986 /* Chain for USCSI commands (checksum targets) */ 1987 sd_uscsi_iodone, /* Index: 21 */ 1988 sd_checksum_uscsi_iodone, /* Index: 22 */ 1989 sd_pm_iodone, /* Index: 22 */ 1990 1991 /* Chain for "direct" USCSI commands (all targets) */ 1992 sd_uscsi_iodone, /* Index: 24 */ 1993 1994 /* Chain for "direct priority" USCSI commands (all targets) */ 1995 sd_uscsi_iodone, /* Index: 25 */ 1996 1997 /* 1998 * Chain for buf IO for large sector size disk drive targets 1999 * with checksumming (PM enabled) 2000 */ 2001 sd_buf_iodone, /* Index: 26 */ 2002 sd_mapblockaddr_iodone, /* Index: 27 */ 2003 sd_mapblocksize_iodone, /* Index: 28 */ 2004 sd_checksum_iodone, /* Index: 29 */ 2005 sd_pm_iodone, /* Index: 30 */ 2006 2007 /* 2008 * Chain for buf IO for large sector size disk drive targets 2009 * with checksumming (PM disabled) 2010 */ 2011 sd_buf_iodone, /* Index: 31 */ 2012 sd_mapblockaddr_iodone, /* Index: 32 */ 2013 sd_mapblocksize_iodone, /* Index: 33 */ 2014 sd_checksum_iodone, /* Index: 34 */ 2015 }; 2016 2017 2018 /* 2019 * Macros to locate the "first" function in the sd_iodone_chain[] array for 2020 * each iodone-side chain. These are located by the array index, but as the 2021 * iodone side functions are called in a decrementing-index order, the 2022 * highest index number in each chain must be specified (as these correspond 2023 * to the first function in the iodone chain that will be called by the core 2024 * at IO completion time). 2025 */ 2026 2027 #define SD_CHAIN_DISK_IODONE 2 2028 #define SD_CHAIN_DISK_IODONE_NO_PM 4 2029 #define SD_CHAIN_RMMEDIA_IODONE 8 2030 #define SD_CHAIN_MSS_DISK_IODONE 8 2031 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 2032 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 2033 #define SD_CHAIN_CHKSUM_IODONE 15 2034 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 2035 #define SD_CHAIN_USCSI_CMD_IODONE 20 2036 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 2037 #define SD_CHAIN_DIRECT_CMD_IODONE 24 2038 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 2039 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 2040 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 2041 2042 2043 2044 /* 2045 * Array to map a layering chain index to the appropriate initpkt routine. 2046 * The redundant entries are present so that the index used for accessing 2047 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2048 * with this table as well. 2049 */ 2050 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 2051 2052 static sd_initpkt_t sd_initpkt_map[] = { 2053 2054 /* Chain for buf IO for disk drive targets (PM enabled) */ 2055 sd_initpkt_for_buf, /* Index: 0 */ 2056 sd_initpkt_for_buf, /* Index: 1 */ 2057 sd_initpkt_for_buf, /* Index: 2 */ 2058 2059 /* Chain for buf IO for disk drive targets (PM disabled) */ 2060 sd_initpkt_for_buf, /* Index: 3 */ 2061 sd_initpkt_for_buf, /* Index: 4 */ 2062 2063 /* 2064 * Chain for buf IO for removable-media or large sector size 2065 * disk drive targets (PM enabled) 2066 */ 2067 sd_initpkt_for_buf, /* Index: 5 */ 2068 sd_initpkt_for_buf, /* Index: 6 */ 2069 sd_initpkt_for_buf, /* Index: 7 */ 2070 sd_initpkt_for_buf, /* Index: 8 */ 2071 2072 /* 2073 * Chain for buf IO for removable-media or large sector size 2074 * disk drive targets (PM disabled) 2075 */ 2076 sd_initpkt_for_buf, /* Index: 9 */ 2077 sd_initpkt_for_buf, /* Index: 10 */ 2078 sd_initpkt_for_buf, /* Index: 11 */ 2079 2080 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2081 sd_initpkt_for_buf, /* Index: 12 */ 2082 sd_initpkt_for_buf, /* Index: 13 */ 2083 sd_initpkt_for_buf, /* Index: 14 */ 2084 sd_initpkt_for_buf, /* Index: 15 */ 2085 2086 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2087 sd_initpkt_for_buf, /* Index: 16 */ 2088 sd_initpkt_for_buf, /* Index: 17 */ 2089 sd_initpkt_for_buf, /* Index: 18 */ 2090 2091 /* Chain for USCSI commands (non-checksum targets) */ 2092 sd_initpkt_for_uscsi, /* Index: 19 */ 2093 sd_initpkt_for_uscsi, /* Index: 20 */ 2094 2095 /* Chain for USCSI commands (checksum targets) */ 2096 sd_initpkt_for_uscsi, /* Index: 21 */ 2097 sd_initpkt_for_uscsi, /* Index: 22 */ 2098 sd_initpkt_for_uscsi, /* Index: 22 */ 2099 2100 /* Chain for "direct" USCSI commands (all targets) */ 2101 sd_initpkt_for_uscsi, /* Index: 24 */ 2102 2103 /* Chain for "direct priority" USCSI commands (all targets) */ 2104 sd_initpkt_for_uscsi, /* Index: 25 */ 2105 2106 /* 2107 * Chain for buf IO for large sector size disk drive targets 2108 * with checksumming (PM enabled) 2109 */ 2110 sd_initpkt_for_buf, /* Index: 26 */ 2111 sd_initpkt_for_buf, /* Index: 27 */ 2112 sd_initpkt_for_buf, /* Index: 28 */ 2113 sd_initpkt_for_buf, /* Index: 29 */ 2114 sd_initpkt_for_buf, /* Index: 30 */ 2115 2116 /* 2117 * Chain for buf IO for large sector size disk drive targets 2118 * with checksumming (PM disabled) 2119 */ 2120 sd_initpkt_for_buf, /* Index: 31 */ 2121 sd_initpkt_for_buf, /* Index: 32 */ 2122 sd_initpkt_for_buf, /* Index: 33 */ 2123 sd_initpkt_for_buf, /* Index: 34 */ 2124 }; 2125 2126 2127 /* 2128 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2129 * The redundant entries are present so that the index used for accessing 2130 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2131 * with this table as well. 2132 */ 2133 typedef void (*sd_destroypkt_t)(struct buf *); 2134 2135 static sd_destroypkt_t sd_destroypkt_map[] = { 2136 2137 /* Chain for buf IO for disk drive targets (PM enabled) */ 2138 sd_destroypkt_for_buf, /* Index: 0 */ 2139 sd_destroypkt_for_buf, /* Index: 1 */ 2140 sd_destroypkt_for_buf, /* Index: 2 */ 2141 2142 /* Chain for buf IO for disk drive targets (PM disabled) */ 2143 sd_destroypkt_for_buf, /* Index: 3 */ 2144 sd_destroypkt_for_buf, /* Index: 4 */ 2145 2146 /* 2147 * Chain for buf IO for removable-media or large sector size 2148 * disk drive targets (PM enabled) 2149 */ 2150 sd_destroypkt_for_buf, /* Index: 5 */ 2151 sd_destroypkt_for_buf, /* Index: 6 */ 2152 sd_destroypkt_for_buf, /* Index: 7 */ 2153 sd_destroypkt_for_buf, /* Index: 8 */ 2154 2155 /* 2156 * Chain for buf IO for removable-media or large sector size 2157 * disk drive targets (PM disabled) 2158 */ 2159 sd_destroypkt_for_buf, /* Index: 9 */ 2160 sd_destroypkt_for_buf, /* Index: 10 */ 2161 sd_destroypkt_for_buf, /* Index: 11 */ 2162 2163 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2164 sd_destroypkt_for_buf, /* Index: 12 */ 2165 sd_destroypkt_for_buf, /* Index: 13 */ 2166 sd_destroypkt_for_buf, /* Index: 14 */ 2167 sd_destroypkt_for_buf, /* Index: 15 */ 2168 2169 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2170 sd_destroypkt_for_buf, /* Index: 16 */ 2171 sd_destroypkt_for_buf, /* Index: 17 */ 2172 sd_destroypkt_for_buf, /* Index: 18 */ 2173 2174 /* Chain for USCSI commands (non-checksum targets) */ 2175 sd_destroypkt_for_uscsi, /* Index: 19 */ 2176 sd_destroypkt_for_uscsi, /* Index: 20 */ 2177 2178 /* Chain for USCSI commands (checksum targets) */ 2179 sd_destroypkt_for_uscsi, /* Index: 21 */ 2180 sd_destroypkt_for_uscsi, /* Index: 22 */ 2181 sd_destroypkt_for_uscsi, /* Index: 22 */ 2182 2183 /* Chain for "direct" USCSI commands (all targets) */ 2184 sd_destroypkt_for_uscsi, /* Index: 24 */ 2185 2186 /* Chain for "direct priority" USCSI commands (all targets) */ 2187 sd_destroypkt_for_uscsi, /* Index: 25 */ 2188 2189 /* 2190 * Chain for buf IO for large sector size disk drive targets 2191 * with checksumming (PM disabled) 2192 */ 2193 sd_destroypkt_for_buf, /* Index: 26 */ 2194 sd_destroypkt_for_buf, /* Index: 27 */ 2195 sd_destroypkt_for_buf, /* Index: 28 */ 2196 sd_destroypkt_for_buf, /* Index: 29 */ 2197 sd_destroypkt_for_buf, /* Index: 30 */ 2198 2199 /* 2200 * Chain for buf IO for large sector size disk drive targets 2201 * with checksumming (PM enabled) 2202 */ 2203 sd_destroypkt_for_buf, /* Index: 31 */ 2204 sd_destroypkt_for_buf, /* Index: 32 */ 2205 sd_destroypkt_for_buf, /* Index: 33 */ 2206 sd_destroypkt_for_buf, /* Index: 34 */ 2207 }; 2208 2209 2210 2211 /* 2212 * Array to map a layering chain index to the appropriate chain "type". 2213 * The chain type indicates a specific property/usage of the chain. 2214 * The redundant entries are present so that the index used for accessing 2215 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2216 * with this table as well. 2217 */ 2218 2219 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2220 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2221 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2222 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2223 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2224 /* (for error recovery) */ 2225 2226 static int sd_chain_type_map[] = { 2227 2228 /* Chain for buf IO for disk drive targets (PM enabled) */ 2229 SD_CHAIN_BUFIO, /* Index: 0 */ 2230 SD_CHAIN_BUFIO, /* Index: 1 */ 2231 SD_CHAIN_BUFIO, /* Index: 2 */ 2232 2233 /* Chain for buf IO for disk drive targets (PM disabled) */ 2234 SD_CHAIN_BUFIO, /* Index: 3 */ 2235 SD_CHAIN_BUFIO, /* Index: 4 */ 2236 2237 /* 2238 * Chain for buf IO for removable-media or large sector size 2239 * disk drive targets (PM enabled) 2240 */ 2241 SD_CHAIN_BUFIO, /* Index: 5 */ 2242 SD_CHAIN_BUFIO, /* Index: 6 */ 2243 SD_CHAIN_BUFIO, /* Index: 7 */ 2244 SD_CHAIN_BUFIO, /* Index: 8 */ 2245 2246 /* 2247 * Chain for buf IO for removable-media or large sector size 2248 * disk drive targets (PM disabled) 2249 */ 2250 SD_CHAIN_BUFIO, /* Index: 9 */ 2251 SD_CHAIN_BUFIO, /* Index: 10 */ 2252 SD_CHAIN_BUFIO, /* Index: 11 */ 2253 2254 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2255 SD_CHAIN_BUFIO, /* Index: 12 */ 2256 SD_CHAIN_BUFIO, /* Index: 13 */ 2257 SD_CHAIN_BUFIO, /* Index: 14 */ 2258 SD_CHAIN_BUFIO, /* Index: 15 */ 2259 2260 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2261 SD_CHAIN_BUFIO, /* Index: 16 */ 2262 SD_CHAIN_BUFIO, /* Index: 17 */ 2263 SD_CHAIN_BUFIO, /* Index: 18 */ 2264 2265 /* Chain for USCSI commands (non-checksum targets) */ 2266 SD_CHAIN_USCSI, /* Index: 19 */ 2267 SD_CHAIN_USCSI, /* Index: 20 */ 2268 2269 /* Chain for USCSI commands (checksum targets) */ 2270 SD_CHAIN_USCSI, /* Index: 21 */ 2271 SD_CHAIN_USCSI, /* Index: 22 */ 2272 SD_CHAIN_USCSI, /* Index: 23 */ 2273 2274 /* Chain for "direct" USCSI commands (all targets) */ 2275 SD_CHAIN_DIRECT, /* Index: 24 */ 2276 2277 /* Chain for "direct priority" USCSI commands (all targets) */ 2278 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2279 2280 /* 2281 * Chain for buf IO for large sector size disk drive targets 2282 * with checksumming (PM enabled) 2283 */ 2284 SD_CHAIN_BUFIO, /* Index: 26 */ 2285 SD_CHAIN_BUFIO, /* Index: 27 */ 2286 SD_CHAIN_BUFIO, /* Index: 28 */ 2287 SD_CHAIN_BUFIO, /* Index: 29 */ 2288 SD_CHAIN_BUFIO, /* Index: 30 */ 2289 2290 /* 2291 * Chain for buf IO for large sector size disk drive targets 2292 * with checksumming (PM disabled) 2293 */ 2294 SD_CHAIN_BUFIO, /* Index: 31 */ 2295 SD_CHAIN_BUFIO, /* Index: 32 */ 2296 SD_CHAIN_BUFIO, /* Index: 33 */ 2297 SD_CHAIN_BUFIO, /* Index: 34 */ 2298 }; 2299 2300 2301 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2302 #define SD_IS_BUFIO(xp) \ 2303 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2304 2305 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2306 #define SD_IS_DIRECT_PRIORITY(xp) \ 2307 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2308 2309 2310 2311 /* 2312 * Struct, array, and macros to map a specific chain to the appropriate 2313 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2314 * 2315 * The sd_chain_index_map[] array is used at attach time to set the various 2316 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2317 * chain to be used with the instance. This allows different instances to use 2318 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2319 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2320 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2321 * dynamically & without the use of locking; and (2) a layer may update the 2322 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2323 * to allow for deferred processing of an IO within the same chain from a 2324 * different execution context. 2325 */ 2326 2327 struct sd_chain_index { 2328 int sci_iostart_index; 2329 int sci_iodone_index; 2330 }; 2331 2332 static struct sd_chain_index sd_chain_index_map[] = { 2333 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2334 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2335 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2336 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2337 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2338 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2339 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2340 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2341 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2342 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2343 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2344 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2345 2346 }; 2347 2348 2349 /* 2350 * The following are indexes into the sd_chain_index_map[] array. 2351 */ 2352 2353 /* un->un_buf_chain_type must be set to one of these */ 2354 #define SD_CHAIN_INFO_DISK 0 2355 #define SD_CHAIN_INFO_DISK_NO_PM 1 2356 #define SD_CHAIN_INFO_RMMEDIA 2 2357 #define SD_CHAIN_INFO_MSS_DISK 2 2358 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2359 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2360 #define SD_CHAIN_INFO_CHKSUM 4 2361 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2362 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2363 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2364 2365 /* un->un_uscsi_chain_type must be set to one of these */ 2366 #define SD_CHAIN_INFO_USCSI_CMD 6 2367 /* USCSI with PM disabled is the same as DIRECT */ 2368 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2369 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2370 2371 /* un->un_direct_chain_type must be set to one of these */ 2372 #define SD_CHAIN_INFO_DIRECT_CMD 8 2373 2374 /* un->un_priority_chain_type must be set to one of these */ 2375 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2376 2377 /* size for devid inquiries */ 2378 #define MAX_INQUIRY_SIZE 0xF0 2379 2380 /* 2381 * Macros used by functions to pass a given buf(9S) struct along to the 2382 * next function in the layering chain for further processing. 2383 * 2384 * In the following macros, passing more than three arguments to the called 2385 * routines causes the optimizer for the SPARC compiler to stop doing tail 2386 * call elimination which results in significant performance degradation. 2387 */ 2388 #define SD_BEGIN_IOSTART(index, un, bp) \ 2389 ((*(sd_iostart_chain[index]))(index, un, bp)) 2390 2391 #define SD_BEGIN_IODONE(index, un, bp) \ 2392 ((*(sd_iodone_chain[index]))(index, un, bp)) 2393 2394 #define SD_NEXT_IOSTART(index, un, bp) \ 2395 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2396 2397 #define SD_NEXT_IODONE(index, un, bp) \ 2398 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2399 2400 /* 2401 * Function: _init 2402 * 2403 * Description: This is the driver _init(9E) entry point. 2404 * 2405 * Return Code: Returns the value from mod_install(9F) or 2406 * ddi_soft_state_init(9F) as appropriate. 2407 * 2408 * Context: Called when driver module loaded. 2409 */ 2410 2411 int 2412 _init(void) 2413 { 2414 int err; 2415 2416 /* establish driver name from module name */ 2417 sd_label = (char *)mod_modname(&modlinkage); 2418 2419 #ifndef XPV_HVM_DRIVER 2420 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2421 SD_MAXUNIT); 2422 if (err != 0) { 2423 return (err); 2424 } 2425 2426 #else /* XPV_HVM_DRIVER */ 2427 /* Remove the leading "hvm_" from the module name */ 2428 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2429 sd_label += strlen("hvm_"); 2430 2431 #endif /* XPV_HVM_DRIVER */ 2432 2433 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2434 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2435 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2436 2437 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2438 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2439 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2440 2441 /* 2442 * it's ok to init here even for fibre device 2443 */ 2444 sd_scsi_probe_cache_init(); 2445 2446 sd_scsi_target_lun_init(); 2447 2448 /* 2449 * Creating taskq before mod_install ensures that all callers (threads) 2450 * that enter the module after a successful mod_install encounter 2451 * a valid taskq. 2452 */ 2453 sd_taskq_create(); 2454 2455 err = mod_install(&modlinkage); 2456 if (err != 0) { 2457 /* delete taskq if install fails */ 2458 sd_taskq_delete(); 2459 2460 mutex_destroy(&sd_detach_mutex); 2461 mutex_destroy(&sd_log_mutex); 2462 mutex_destroy(&sd_label_mutex); 2463 2464 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2465 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2466 cv_destroy(&sd_tr.srq_inprocess_cv); 2467 2468 sd_scsi_probe_cache_fini(); 2469 2470 sd_scsi_target_lun_fini(); 2471 2472 #ifndef XPV_HVM_DRIVER 2473 ddi_soft_state_fini(&sd_state); 2474 #endif /* !XPV_HVM_DRIVER */ 2475 return (err); 2476 } 2477 2478 return (err); 2479 } 2480 2481 2482 /* 2483 * Function: _fini 2484 * 2485 * Description: This is the driver _fini(9E) entry point. 2486 * 2487 * Return Code: Returns the value from mod_remove(9F) 2488 * 2489 * Context: Called when driver module is unloaded. 2490 */ 2491 2492 int 2493 _fini(void) 2494 { 2495 int err; 2496 2497 if ((err = mod_remove(&modlinkage)) != 0) { 2498 return (err); 2499 } 2500 2501 sd_taskq_delete(); 2502 2503 mutex_destroy(&sd_detach_mutex); 2504 mutex_destroy(&sd_log_mutex); 2505 mutex_destroy(&sd_label_mutex); 2506 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2507 2508 sd_scsi_probe_cache_fini(); 2509 2510 sd_scsi_target_lun_fini(); 2511 2512 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2513 cv_destroy(&sd_tr.srq_inprocess_cv); 2514 2515 #ifndef XPV_HVM_DRIVER 2516 ddi_soft_state_fini(&sd_state); 2517 #endif /* !XPV_HVM_DRIVER */ 2518 2519 return (err); 2520 } 2521 2522 2523 /* 2524 * Function: _info 2525 * 2526 * Description: This is the driver _info(9E) entry point. 2527 * 2528 * Arguments: modinfop - pointer to the driver modinfo structure 2529 * 2530 * Return Code: Returns the value from mod_info(9F). 2531 * 2532 * Context: Kernel thread context 2533 */ 2534 2535 int 2536 _info(struct modinfo *modinfop) 2537 { 2538 return (mod_info(&modlinkage, modinfop)); 2539 } 2540 2541 2542 /* 2543 * The following routines implement the driver message logging facility. 2544 * They provide component- and level- based debug output filtering. 2545 * Output may also be restricted to messages for a single instance by 2546 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2547 * to NULL, then messages for all instances are printed. 2548 * 2549 * These routines have been cloned from each other due to the language 2550 * constraints of macros and variable argument list processing. 2551 */ 2552 2553 2554 /* 2555 * Function: sd_log_err 2556 * 2557 * Description: This routine is called by the SD_ERROR macro for debug 2558 * logging of error conditions. 2559 * 2560 * Arguments: comp - driver component being logged 2561 * dev - pointer to driver info structure 2562 * fmt - error string and format to be logged 2563 */ 2564 2565 static void 2566 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2567 { 2568 va_list ap; 2569 dev_info_t *dev; 2570 2571 ASSERT(un != NULL); 2572 dev = SD_DEVINFO(un); 2573 ASSERT(dev != NULL); 2574 2575 /* 2576 * Filter messages based on the global component and level masks. 2577 * Also print if un matches the value of sd_debug_un, or if 2578 * sd_debug_un is set to NULL. 2579 */ 2580 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2581 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2582 mutex_enter(&sd_log_mutex); 2583 va_start(ap, fmt); 2584 (void) vsprintf(sd_log_buf, fmt, ap); 2585 va_end(ap); 2586 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2587 mutex_exit(&sd_log_mutex); 2588 } 2589 #ifdef SD_FAULT_INJECTION 2590 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2591 if (un->sd_injection_mask & comp) { 2592 mutex_enter(&sd_log_mutex); 2593 va_start(ap, fmt); 2594 (void) vsprintf(sd_log_buf, fmt, ap); 2595 va_end(ap); 2596 sd_injection_log(sd_log_buf, un); 2597 mutex_exit(&sd_log_mutex); 2598 } 2599 #endif 2600 } 2601 2602 2603 /* 2604 * Function: sd_log_info 2605 * 2606 * Description: This routine is called by the SD_INFO macro for debug 2607 * logging of general purpose informational conditions. 2608 * 2609 * Arguments: comp - driver component being logged 2610 * dev - pointer to driver info structure 2611 * fmt - info string and format to be logged 2612 */ 2613 2614 static void 2615 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2616 { 2617 va_list ap; 2618 dev_info_t *dev; 2619 2620 ASSERT(un != NULL); 2621 dev = SD_DEVINFO(un); 2622 ASSERT(dev != NULL); 2623 2624 /* 2625 * Filter messages based on the global component and level masks. 2626 * Also print if un matches the value of sd_debug_un, or if 2627 * sd_debug_un is set to NULL. 2628 */ 2629 if ((sd_component_mask & component) && 2630 (sd_level_mask & SD_LOGMASK_INFO) && 2631 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2632 mutex_enter(&sd_log_mutex); 2633 va_start(ap, fmt); 2634 (void) vsprintf(sd_log_buf, fmt, ap); 2635 va_end(ap); 2636 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2637 mutex_exit(&sd_log_mutex); 2638 } 2639 #ifdef SD_FAULT_INJECTION 2640 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2641 if (un->sd_injection_mask & component) { 2642 mutex_enter(&sd_log_mutex); 2643 va_start(ap, fmt); 2644 (void) vsprintf(sd_log_buf, fmt, ap); 2645 va_end(ap); 2646 sd_injection_log(sd_log_buf, un); 2647 mutex_exit(&sd_log_mutex); 2648 } 2649 #endif 2650 } 2651 2652 2653 /* 2654 * Function: sd_log_trace 2655 * 2656 * Description: This routine is called by the SD_TRACE macro for debug 2657 * logging of trace conditions (i.e. function entry/exit). 2658 * 2659 * Arguments: comp - driver component being logged 2660 * dev - pointer to driver info structure 2661 * fmt - trace string and format to be logged 2662 */ 2663 2664 static void 2665 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2666 { 2667 va_list ap; 2668 dev_info_t *dev; 2669 2670 ASSERT(un != NULL); 2671 dev = SD_DEVINFO(un); 2672 ASSERT(dev != NULL); 2673 2674 /* 2675 * Filter messages based on the global component and level masks. 2676 * Also print if un matches the value of sd_debug_un, or if 2677 * sd_debug_un is set to NULL. 2678 */ 2679 if ((sd_component_mask & component) && 2680 (sd_level_mask & SD_LOGMASK_TRACE) && 2681 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2682 mutex_enter(&sd_log_mutex); 2683 va_start(ap, fmt); 2684 (void) vsprintf(sd_log_buf, fmt, ap); 2685 va_end(ap); 2686 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2687 mutex_exit(&sd_log_mutex); 2688 } 2689 #ifdef SD_FAULT_INJECTION 2690 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2691 if (un->sd_injection_mask & component) { 2692 mutex_enter(&sd_log_mutex); 2693 va_start(ap, fmt); 2694 (void) vsprintf(sd_log_buf, fmt, ap); 2695 va_end(ap); 2696 sd_injection_log(sd_log_buf, un); 2697 mutex_exit(&sd_log_mutex); 2698 } 2699 #endif 2700 } 2701 2702 2703 /* 2704 * Function: sdprobe 2705 * 2706 * Description: This is the driver probe(9e) entry point function. 2707 * 2708 * Arguments: devi - opaque device info handle 2709 * 2710 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2711 * DDI_PROBE_FAILURE: If the probe failed. 2712 * DDI_PROBE_PARTIAL: If the instance is not present now, 2713 * but may be present in the future. 2714 */ 2715 2716 static int 2717 sdprobe(dev_info_t *devi) 2718 { 2719 struct scsi_device *devp; 2720 int rval; 2721 #ifndef XPV_HVM_DRIVER 2722 int instance = ddi_get_instance(devi); 2723 #endif /* !XPV_HVM_DRIVER */ 2724 2725 /* 2726 * if it wasn't for pln, sdprobe could actually be nulldev 2727 * in the "__fibre" case. 2728 */ 2729 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2730 return (DDI_PROBE_DONTCARE); 2731 } 2732 2733 devp = ddi_get_driver_private(devi); 2734 2735 if (devp == NULL) { 2736 /* Ooops... nexus driver is mis-configured... */ 2737 return (DDI_PROBE_FAILURE); 2738 } 2739 2740 #ifndef XPV_HVM_DRIVER 2741 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2742 return (DDI_PROBE_PARTIAL); 2743 } 2744 #endif /* !XPV_HVM_DRIVER */ 2745 2746 /* 2747 * Call the SCSA utility probe routine to see if we actually 2748 * have a target at this SCSI nexus. 2749 */ 2750 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2751 case SCSIPROBE_EXISTS: 2752 switch (devp->sd_inq->inq_dtype) { 2753 case DTYPE_DIRECT: 2754 rval = DDI_PROBE_SUCCESS; 2755 break; 2756 case DTYPE_RODIRECT: 2757 /* CDs etc. Can be removable media */ 2758 rval = DDI_PROBE_SUCCESS; 2759 break; 2760 case DTYPE_OPTICAL: 2761 /* 2762 * Rewritable optical driver HP115AA 2763 * Can also be removable media 2764 */ 2765 2766 /* 2767 * Do not attempt to bind to DTYPE_OPTICAL if 2768 * pre solaris 9 sparc sd behavior is required 2769 * 2770 * If first time through and sd_dtype_optical_bind 2771 * has not been set in /etc/system check properties 2772 */ 2773 2774 if (sd_dtype_optical_bind < 0) { 2775 sd_dtype_optical_bind = ddi_prop_get_int 2776 (DDI_DEV_T_ANY, devi, 0, 2777 "optical-device-bind", 1); 2778 } 2779 2780 if (sd_dtype_optical_bind == 0) { 2781 rval = DDI_PROBE_FAILURE; 2782 } else { 2783 rval = DDI_PROBE_SUCCESS; 2784 } 2785 break; 2786 2787 case DTYPE_NOTPRESENT: 2788 default: 2789 rval = DDI_PROBE_FAILURE; 2790 break; 2791 } 2792 break; 2793 default: 2794 rval = DDI_PROBE_PARTIAL; 2795 break; 2796 } 2797 2798 /* 2799 * This routine checks for resource allocation prior to freeing, 2800 * so it will take care of the "smart probing" case where a 2801 * scsi_probe() may or may not have been issued and will *not* 2802 * free previously-freed resources. 2803 */ 2804 scsi_unprobe(devp); 2805 return (rval); 2806 } 2807 2808 2809 /* 2810 * Function: sdinfo 2811 * 2812 * Description: This is the driver getinfo(9e) entry point function. 2813 * Given the device number, return the devinfo pointer from 2814 * the scsi_device structure or the instance number 2815 * associated with the dev_t. 2816 * 2817 * Arguments: dip - pointer to device info structure 2818 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2819 * DDI_INFO_DEVT2INSTANCE) 2820 * arg - driver dev_t 2821 * resultp - user buffer for request response 2822 * 2823 * Return Code: DDI_SUCCESS 2824 * DDI_FAILURE 2825 */ 2826 /* ARGSUSED */ 2827 static int 2828 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2829 { 2830 struct sd_lun *un; 2831 dev_t dev; 2832 int instance; 2833 int error; 2834 2835 switch (infocmd) { 2836 case DDI_INFO_DEVT2DEVINFO: 2837 dev = (dev_t)arg; 2838 instance = SDUNIT(dev); 2839 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2840 return (DDI_FAILURE); 2841 } 2842 *result = (void *) SD_DEVINFO(un); 2843 error = DDI_SUCCESS; 2844 break; 2845 case DDI_INFO_DEVT2INSTANCE: 2846 dev = (dev_t)arg; 2847 instance = SDUNIT(dev); 2848 *result = (void *)(uintptr_t)instance; 2849 error = DDI_SUCCESS; 2850 break; 2851 default: 2852 error = DDI_FAILURE; 2853 } 2854 return (error); 2855 } 2856 2857 /* 2858 * Function: sd_prop_op 2859 * 2860 * Description: This is the driver prop_op(9e) entry point function. 2861 * Return the number of blocks for the partition in question 2862 * or forward the request to the property facilities. 2863 * 2864 * Arguments: dev - device number 2865 * dip - pointer to device info structure 2866 * prop_op - property operator 2867 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2868 * name - pointer to property name 2869 * valuep - pointer or address of the user buffer 2870 * lengthp - property length 2871 * 2872 * Return Code: DDI_PROP_SUCCESS 2873 * DDI_PROP_NOT_FOUND 2874 * DDI_PROP_UNDEFINED 2875 * DDI_PROP_NO_MEMORY 2876 * DDI_PROP_BUF_TOO_SMALL 2877 */ 2878 2879 static int 2880 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2881 char *name, caddr_t valuep, int *lengthp) 2882 { 2883 struct sd_lun *un; 2884 2885 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2886 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2887 name, valuep, lengthp)); 2888 2889 return (cmlb_prop_op(un->un_cmlbhandle, 2890 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2891 SDPART(dev), (void *)SD_PATH_DIRECT)); 2892 } 2893 2894 /* 2895 * The following functions are for smart probing: 2896 * sd_scsi_probe_cache_init() 2897 * sd_scsi_probe_cache_fini() 2898 * sd_scsi_clear_probe_cache() 2899 * sd_scsi_probe_with_cache() 2900 */ 2901 2902 /* 2903 * Function: sd_scsi_probe_cache_init 2904 * 2905 * Description: Initializes the probe response cache mutex and head pointer. 2906 * 2907 * Context: Kernel thread context 2908 */ 2909 2910 static void 2911 sd_scsi_probe_cache_init(void) 2912 { 2913 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2914 sd_scsi_probe_cache_head = NULL; 2915 } 2916 2917 2918 /* 2919 * Function: sd_scsi_probe_cache_fini 2920 * 2921 * Description: Frees all resources associated with the probe response cache. 2922 * 2923 * Context: Kernel thread context 2924 */ 2925 2926 static void 2927 sd_scsi_probe_cache_fini(void) 2928 { 2929 struct sd_scsi_probe_cache *cp; 2930 struct sd_scsi_probe_cache *ncp; 2931 2932 /* Clean up our smart probing linked list */ 2933 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2934 ncp = cp->next; 2935 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2936 } 2937 sd_scsi_probe_cache_head = NULL; 2938 mutex_destroy(&sd_scsi_probe_cache_mutex); 2939 } 2940 2941 2942 /* 2943 * Function: sd_scsi_clear_probe_cache 2944 * 2945 * Description: This routine clears the probe response cache. This is 2946 * done when open() returns ENXIO so that when deferred 2947 * attach is attempted (possibly after a device has been 2948 * turned on) we will retry the probe. Since we don't know 2949 * which target we failed to open, we just clear the 2950 * entire cache. 2951 * 2952 * Context: Kernel thread context 2953 */ 2954 2955 static void 2956 sd_scsi_clear_probe_cache(void) 2957 { 2958 struct sd_scsi_probe_cache *cp; 2959 int i; 2960 2961 mutex_enter(&sd_scsi_probe_cache_mutex); 2962 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2963 /* 2964 * Reset all entries to SCSIPROBE_EXISTS. This will 2965 * force probing to be performed the next time 2966 * sd_scsi_probe_with_cache is called. 2967 */ 2968 for (i = 0; i < NTARGETS_WIDE; i++) { 2969 cp->cache[i] = SCSIPROBE_EXISTS; 2970 } 2971 } 2972 mutex_exit(&sd_scsi_probe_cache_mutex); 2973 } 2974 2975 2976 /* 2977 * Function: sd_scsi_probe_with_cache 2978 * 2979 * Description: This routine implements support for a scsi device probe 2980 * with cache. The driver maintains a cache of the target 2981 * responses to scsi probes. If we get no response from a 2982 * target during a probe inquiry, we remember that, and we 2983 * avoid additional calls to scsi_probe on non-zero LUNs 2984 * on the same target until the cache is cleared. By doing 2985 * so we avoid the 1/4 sec selection timeout for nonzero 2986 * LUNs. lun0 of a target is always probed. 2987 * 2988 * Arguments: devp - Pointer to a scsi_device(9S) structure 2989 * waitfunc - indicates what the allocator routines should 2990 * do when resources are not available. This value 2991 * is passed on to scsi_probe() when that routine 2992 * is called. 2993 * 2994 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2995 * otherwise the value returned by scsi_probe(9F). 2996 * 2997 * Context: Kernel thread context 2998 */ 2999 3000 static int 3001 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 3002 { 3003 struct sd_scsi_probe_cache *cp; 3004 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 3005 int lun, tgt; 3006 3007 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3008 SCSI_ADDR_PROP_LUN, 0); 3009 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3010 SCSI_ADDR_PROP_TARGET, -1); 3011 3012 /* Make sure caching enabled and target in range */ 3013 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 3014 /* do it the old way (no cache) */ 3015 return (scsi_probe(devp, waitfn)); 3016 } 3017 3018 mutex_enter(&sd_scsi_probe_cache_mutex); 3019 3020 /* Find the cache for this scsi bus instance */ 3021 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 3022 if (cp->pdip == pdip) { 3023 break; 3024 } 3025 } 3026 3027 /* If we can't find a cache for this pdip, create one */ 3028 if (cp == NULL) { 3029 int i; 3030 3031 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 3032 KM_SLEEP); 3033 cp->pdip = pdip; 3034 cp->next = sd_scsi_probe_cache_head; 3035 sd_scsi_probe_cache_head = cp; 3036 for (i = 0; i < NTARGETS_WIDE; i++) { 3037 cp->cache[i] = SCSIPROBE_EXISTS; 3038 } 3039 } 3040 3041 mutex_exit(&sd_scsi_probe_cache_mutex); 3042 3043 /* Recompute the cache for this target if LUN zero */ 3044 if (lun == 0) { 3045 cp->cache[tgt] = SCSIPROBE_EXISTS; 3046 } 3047 3048 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 3049 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 3050 return (SCSIPROBE_NORESP); 3051 } 3052 3053 /* Do the actual probe; save & return the result */ 3054 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3055 } 3056 3057 3058 /* 3059 * Function: sd_scsi_target_lun_init 3060 * 3061 * Description: Initializes the attached lun chain mutex and head pointer. 3062 * 3063 * Context: Kernel thread context 3064 */ 3065 3066 static void 3067 sd_scsi_target_lun_init(void) 3068 { 3069 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3070 sd_scsi_target_lun_head = NULL; 3071 } 3072 3073 3074 /* 3075 * Function: sd_scsi_target_lun_fini 3076 * 3077 * Description: Frees all resources associated with the attached lun 3078 * chain 3079 * 3080 * Context: Kernel thread context 3081 */ 3082 3083 static void 3084 sd_scsi_target_lun_fini(void) 3085 { 3086 struct sd_scsi_hba_tgt_lun *cp; 3087 struct sd_scsi_hba_tgt_lun *ncp; 3088 3089 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3090 ncp = cp->next; 3091 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3092 } 3093 sd_scsi_target_lun_head = NULL; 3094 mutex_destroy(&sd_scsi_target_lun_mutex); 3095 } 3096 3097 3098 /* 3099 * Function: sd_scsi_get_target_lun_count 3100 * 3101 * Description: This routine will check in the attached lun chain to see 3102 * how many luns are attached on the required SCSI controller 3103 * and target. Currently, some capabilities like tagged queue 3104 * are supported per target based by HBA. So all luns in a 3105 * target have the same capabilities. Based on this assumption, 3106 * sd should only set these capabilities once per target. This 3107 * function is called when sd needs to decide how many luns 3108 * already attached on a target. 3109 * 3110 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3111 * controller device. 3112 * target - The target ID on the controller's SCSI bus. 3113 * 3114 * Return Code: The number of luns attached on the required target and 3115 * controller. 3116 * -1 if target ID is not in parallel SCSI scope or the given 3117 * dip is not in the chain. 3118 * 3119 * Context: Kernel thread context 3120 */ 3121 3122 static int 3123 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3124 { 3125 struct sd_scsi_hba_tgt_lun *cp; 3126 3127 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3128 return (-1); 3129 } 3130 3131 mutex_enter(&sd_scsi_target_lun_mutex); 3132 3133 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3134 if (cp->pdip == dip) { 3135 break; 3136 } 3137 } 3138 3139 mutex_exit(&sd_scsi_target_lun_mutex); 3140 3141 if (cp == NULL) { 3142 return (-1); 3143 } 3144 3145 return (cp->nlun[target]); 3146 } 3147 3148 3149 /* 3150 * Function: sd_scsi_update_lun_on_target 3151 * 3152 * Description: This routine is used to update the attached lun chain when a 3153 * lun is attached or detached on a target. 3154 * 3155 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3156 * controller device. 3157 * target - The target ID on the controller's SCSI bus. 3158 * flag - Indicate the lun is attached or detached. 3159 * 3160 * Context: Kernel thread context 3161 */ 3162 3163 static void 3164 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3165 { 3166 struct sd_scsi_hba_tgt_lun *cp; 3167 3168 mutex_enter(&sd_scsi_target_lun_mutex); 3169 3170 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3171 if (cp->pdip == dip) { 3172 break; 3173 } 3174 } 3175 3176 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3177 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3178 KM_SLEEP); 3179 cp->pdip = dip; 3180 cp->next = sd_scsi_target_lun_head; 3181 sd_scsi_target_lun_head = cp; 3182 } 3183 3184 mutex_exit(&sd_scsi_target_lun_mutex); 3185 3186 if (cp != NULL) { 3187 if (flag == SD_SCSI_LUN_ATTACH) { 3188 cp->nlun[target] ++; 3189 } else { 3190 cp->nlun[target] --; 3191 } 3192 } 3193 } 3194 3195 3196 /* 3197 * Function: sd_spin_up_unit 3198 * 3199 * Description: Issues the following commands to spin-up the device: 3200 * START STOP UNIT, and INQUIRY. 3201 * 3202 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3203 * structure for this target. 3204 * 3205 * Return Code: 0 - success 3206 * EIO - failure 3207 * EACCES - reservation conflict 3208 * 3209 * Context: Kernel thread context 3210 */ 3211 3212 static int 3213 sd_spin_up_unit(sd_ssc_t *ssc) 3214 { 3215 size_t resid = 0; 3216 int has_conflict = FALSE; 3217 uchar_t *bufaddr; 3218 int status; 3219 struct sd_lun *un; 3220 3221 ASSERT(ssc != NULL); 3222 un = ssc->ssc_un; 3223 ASSERT(un != NULL); 3224 3225 /* 3226 * Send a throwaway START UNIT command. 3227 * 3228 * If we fail on this, we don't care presently what precisely 3229 * is wrong. EMC's arrays will also fail this with a check 3230 * condition (0x2/0x4/0x3) if the device is "inactive," but 3231 * we don't want to fail the attach because it may become 3232 * "active" later. 3233 * We don't know if power condition is supported or not at 3234 * this stage, use START STOP bit. 3235 */ 3236 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 3237 SD_TARGET_START, SD_PATH_DIRECT); 3238 3239 if (status != 0) { 3240 if (status == EACCES) 3241 has_conflict = TRUE; 3242 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3243 } 3244 3245 /* 3246 * Send another INQUIRY command to the target. This is necessary for 3247 * non-removable media direct access devices because their INQUIRY data 3248 * may not be fully qualified until they are spun up (perhaps via the 3249 * START command above). Note: This seems to be needed for some 3250 * legacy devices only.) The INQUIRY command should succeed even if a 3251 * Reservation Conflict is present. 3252 */ 3253 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3254 3255 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3256 != 0) { 3257 kmem_free(bufaddr, SUN_INQSIZE); 3258 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3259 return (EIO); 3260 } 3261 3262 /* 3263 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3264 * Note that this routine does not return a failure here even if the 3265 * INQUIRY command did not return any data. This is a legacy behavior. 3266 */ 3267 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3268 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3269 } 3270 3271 kmem_free(bufaddr, SUN_INQSIZE); 3272 3273 /* If we hit a reservation conflict above, tell the caller. */ 3274 if (has_conflict == TRUE) { 3275 return (EACCES); 3276 } 3277 3278 return (0); 3279 } 3280 3281 #ifdef _LP64 3282 /* 3283 * Function: sd_enable_descr_sense 3284 * 3285 * Description: This routine attempts to select descriptor sense format 3286 * using the Control mode page. Devices that support 64 bit 3287 * LBAs (for >2TB luns) should also implement descriptor 3288 * sense data so we will call this function whenever we see 3289 * a lun larger than 2TB. If for some reason the device 3290 * supports 64 bit LBAs but doesn't support descriptor sense 3291 * presumably the mode select will fail. Everything will 3292 * continue to work normally except that we will not get 3293 * complete sense data for commands that fail with an LBA 3294 * larger than 32 bits. 3295 * 3296 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3297 * structure for this target. 3298 * 3299 * Context: Kernel thread context only 3300 */ 3301 3302 static void 3303 sd_enable_descr_sense(sd_ssc_t *ssc) 3304 { 3305 uchar_t *header; 3306 struct mode_control_scsi3 *ctrl_bufp; 3307 size_t buflen; 3308 size_t bd_len; 3309 int status; 3310 struct sd_lun *un; 3311 3312 ASSERT(ssc != NULL); 3313 un = ssc->ssc_un; 3314 ASSERT(un != NULL); 3315 3316 /* 3317 * Read MODE SENSE page 0xA, Control Mode Page 3318 */ 3319 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3320 sizeof (struct mode_control_scsi3); 3321 header = kmem_zalloc(buflen, KM_SLEEP); 3322 3323 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3324 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3325 3326 if (status != 0) { 3327 SD_ERROR(SD_LOG_COMMON, un, 3328 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3329 goto eds_exit; 3330 } 3331 3332 /* 3333 * Determine size of Block Descriptors in order to locate 3334 * the mode page data. ATAPI devices return 0, SCSI devices 3335 * should return MODE_BLK_DESC_LENGTH. 3336 */ 3337 bd_len = ((struct mode_header *)header)->bdesc_length; 3338 3339 /* Clear the mode data length field for MODE SELECT */ 3340 ((struct mode_header *)header)->length = 0; 3341 3342 ctrl_bufp = (struct mode_control_scsi3 *) 3343 (header + MODE_HEADER_LENGTH + bd_len); 3344 3345 /* 3346 * If the page length is smaller than the expected value, 3347 * the target device doesn't support D_SENSE. Bail out here. 3348 */ 3349 if (ctrl_bufp->mode_page.length < 3350 sizeof (struct mode_control_scsi3) - 2) { 3351 SD_ERROR(SD_LOG_COMMON, un, 3352 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3353 goto eds_exit; 3354 } 3355 3356 /* 3357 * Clear PS bit for MODE SELECT 3358 */ 3359 ctrl_bufp->mode_page.ps = 0; 3360 3361 /* 3362 * Set D_SENSE to enable descriptor sense format. 3363 */ 3364 ctrl_bufp->d_sense = 1; 3365 3366 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3367 3368 /* 3369 * Use MODE SELECT to commit the change to the D_SENSE bit 3370 */ 3371 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3372 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3373 3374 if (status != 0) { 3375 SD_INFO(SD_LOG_COMMON, un, 3376 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3377 } else { 3378 kmem_free(header, buflen); 3379 return; 3380 } 3381 3382 eds_exit: 3383 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3384 kmem_free(header, buflen); 3385 } 3386 3387 /* 3388 * Function: sd_reenable_dsense_task 3389 * 3390 * Description: Re-enable descriptor sense after device or bus reset 3391 * 3392 * Context: Executes in a taskq() thread context 3393 */ 3394 static void 3395 sd_reenable_dsense_task(void *arg) 3396 { 3397 struct sd_lun *un = arg; 3398 sd_ssc_t *ssc; 3399 3400 ASSERT(un != NULL); 3401 3402 ssc = sd_ssc_init(un); 3403 sd_enable_descr_sense(ssc); 3404 sd_ssc_fini(ssc); 3405 } 3406 #endif /* _LP64 */ 3407 3408 /* 3409 * Function: sd_set_mmc_caps 3410 * 3411 * Description: This routine determines if the device is MMC compliant and if 3412 * the device supports CDDA via a mode sense of the CDVD 3413 * capabilities mode page. Also checks if the device is a 3414 * dvdram writable device. 3415 * 3416 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3417 * structure for this target. 3418 * 3419 * Context: Kernel thread context only 3420 */ 3421 3422 static void 3423 sd_set_mmc_caps(sd_ssc_t *ssc) 3424 { 3425 struct mode_header_grp2 *sense_mhp; 3426 uchar_t *sense_page; 3427 caddr_t buf; 3428 int bd_len; 3429 int status; 3430 struct uscsi_cmd com; 3431 int rtn; 3432 uchar_t *out_data_rw, *out_data_hd; 3433 uchar_t *rqbuf_rw, *rqbuf_hd; 3434 uchar_t *out_data_gesn; 3435 int gesn_len; 3436 struct sd_lun *un; 3437 3438 ASSERT(ssc != NULL); 3439 un = ssc->ssc_un; 3440 ASSERT(un != NULL); 3441 3442 /* 3443 * The flags which will be set in this function are - mmc compliant, 3444 * dvdram writable device, cdda support. Initialize them to FALSE 3445 * and if a capability is detected - it will be set to TRUE. 3446 */ 3447 un->un_f_mmc_cap = FALSE; 3448 un->un_f_dvdram_writable_device = FALSE; 3449 un->un_f_cfg_cdda = FALSE; 3450 3451 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3452 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3453 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3454 3455 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3456 3457 if (status != 0) { 3458 /* command failed; just return */ 3459 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3460 return; 3461 } 3462 /* 3463 * If the mode sense request for the CDROM CAPABILITIES 3464 * page (0x2A) succeeds the device is assumed to be MMC. 3465 */ 3466 un->un_f_mmc_cap = TRUE; 3467 3468 /* See if GET STATUS EVENT NOTIFICATION is supported */ 3469 if (un->un_f_mmc_gesn_polling) { 3470 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN; 3471 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP); 3472 3473 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc, 3474 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS); 3475 3476 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3477 3478 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) { 3479 un->un_f_mmc_gesn_polling = FALSE; 3480 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3481 "sd_set_mmc_caps: gesn not supported " 3482 "%d %x %x %x %x\n", rtn, 3483 out_data_gesn[0], out_data_gesn[1], 3484 out_data_gesn[2], out_data_gesn[3]); 3485 } 3486 3487 kmem_free(out_data_gesn, gesn_len); 3488 } 3489 3490 /* Get to the page data */ 3491 sense_mhp = (struct mode_header_grp2 *)buf; 3492 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3493 sense_mhp->bdesc_length_lo; 3494 if (bd_len > MODE_BLK_DESC_LENGTH) { 3495 /* 3496 * We did not get back the expected block descriptor 3497 * length so we cannot determine if the device supports 3498 * CDDA. However, we still indicate the device is MMC 3499 * according to the successful response to the page 3500 * 0x2A mode sense request. 3501 */ 3502 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3503 "sd_set_mmc_caps: Mode Sense returned " 3504 "invalid block descriptor length\n"); 3505 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3506 return; 3507 } 3508 3509 /* See if read CDDA is supported */ 3510 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3511 bd_len); 3512 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3513 3514 /* See if writing DVD RAM is supported. */ 3515 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3516 if (un->un_f_dvdram_writable_device == TRUE) { 3517 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3518 return; 3519 } 3520 3521 /* 3522 * If the device presents DVD or CD capabilities in the mode 3523 * page, we can return here since a RRD will not have 3524 * these capabilities. 3525 */ 3526 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3527 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3528 return; 3529 } 3530 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3531 3532 /* 3533 * If un->un_f_dvdram_writable_device is still FALSE, 3534 * check for a Removable Rigid Disk (RRD). A RRD 3535 * device is identified by the features RANDOM_WRITABLE and 3536 * HARDWARE_DEFECT_MANAGEMENT. 3537 */ 3538 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3539 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3540 3541 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3542 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3543 RANDOM_WRITABLE, SD_PATH_STANDARD); 3544 3545 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3546 3547 if (rtn != 0) { 3548 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3549 kmem_free(rqbuf_rw, SENSE_LENGTH); 3550 return; 3551 } 3552 3553 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3554 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3555 3556 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3557 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3558 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3559 3560 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3561 3562 if (rtn == 0) { 3563 /* 3564 * We have good information, check for random writable 3565 * and hardware defect features. 3566 */ 3567 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3568 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3569 un->un_f_dvdram_writable_device = TRUE; 3570 } 3571 } 3572 3573 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3574 kmem_free(rqbuf_rw, SENSE_LENGTH); 3575 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3576 kmem_free(rqbuf_hd, SENSE_LENGTH); 3577 } 3578 3579 /* 3580 * Function: sd_check_for_writable_cd 3581 * 3582 * Description: This routine determines if the media in the device is 3583 * writable or not. It uses the get configuration command (0x46) 3584 * to determine if the media is writable 3585 * 3586 * Arguments: un - driver soft state (unit) structure 3587 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3588 * chain and the normal command waitq, or 3589 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3590 * "direct" chain and bypass the normal command 3591 * waitq. 3592 * 3593 * Context: Never called at interrupt context. 3594 */ 3595 3596 static void 3597 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3598 { 3599 struct uscsi_cmd com; 3600 uchar_t *out_data; 3601 uchar_t *rqbuf; 3602 int rtn; 3603 uchar_t *out_data_rw, *out_data_hd; 3604 uchar_t *rqbuf_rw, *rqbuf_hd; 3605 struct mode_header_grp2 *sense_mhp; 3606 uchar_t *sense_page; 3607 caddr_t buf; 3608 int bd_len; 3609 int status; 3610 struct sd_lun *un; 3611 3612 ASSERT(ssc != NULL); 3613 un = ssc->ssc_un; 3614 ASSERT(un != NULL); 3615 ASSERT(mutex_owned(SD_MUTEX(un))); 3616 3617 /* 3618 * Initialize the writable media to false, if configuration info. 3619 * tells us otherwise then only we will set it. 3620 */ 3621 un->un_f_mmc_writable_media = FALSE; 3622 mutex_exit(SD_MUTEX(un)); 3623 3624 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3625 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3626 3627 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3628 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3629 3630 if (rtn != 0) 3631 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3632 3633 mutex_enter(SD_MUTEX(un)); 3634 if (rtn == 0) { 3635 /* 3636 * We have good information, check for writable DVD. 3637 */ 3638 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3639 un->un_f_mmc_writable_media = TRUE; 3640 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3641 kmem_free(rqbuf, SENSE_LENGTH); 3642 return; 3643 } 3644 } 3645 3646 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3647 kmem_free(rqbuf, SENSE_LENGTH); 3648 3649 /* 3650 * Determine if this is a RRD type device. 3651 */ 3652 mutex_exit(SD_MUTEX(un)); 3653 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3654 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3655 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3656 3657 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3658 3659 mutex_enter(SD_MUTEX(un)); 3660 if (status != 0) { 3661 /* command failed; just return */ 3662 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3663 return; 3664 } 3665 3666 /* Get to the page data */ 3667 sense_mhp = (struct mode_header_grp2 *)buf; 3668 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3669 if (bd_len > MODE_BLK_DESC_LENGTH) { 3670 /* 3671 * We did not get back the expected block descriptor length so 3672 * we cannot check the mode page. 3673 */ 3674 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3675 "sd_check_for_writable_cd: Mode Sense returned " 3676 "invalid block descriptor length\n"); 3677 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3678 return; 3679 } 3680 3681 /* 3682 * If the device presents DVD or CD capabilities in the mode 3683 * page, we can return here since a RRD device will not have 3684 * these capabilities. 3685 */ 3686 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3687 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3688 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3689 return; 3690 } 3691 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3692 3693 /* 3694 * If un->un_f_mmc_writable_media is still FALSE, 3695 * check for RRD type media. A RRD device is identified 3696 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3697 */ 3698 mutex_exit(SD_MUTEX(un)); 3699 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3700 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3701 3702 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3703 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3704 RANDOM_WRITABLE, path_flag); 3705 3706 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3707 if (rtn != 0) { 3708 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3709 kmem_free(rqbuf_rw, SENSE_LENGTH); 3710 mutex_enter(SD_MUTEX(un)); 3711 return; 3712 } 3713 3714 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3715 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3716 3717 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3718 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3719 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3720 3721 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3722 mutex_enter(SD_MUTEX(un)); 3723 if (rtn == 0) { 3724 /* 3725 * We have good information, check for random writable 3726 * and hardware defect features as current. 3727 */ 3728 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3729 (out_data_rw[10] & 0x1) && 3730 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3731 (out_data_hd[10] & 0x1)) { 3732 un->un_f_mmc_writable_media = TRUE; 3733 } 3734 } 3735 3736 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3737 kmem_free(rqbuf_rw, SENSE_LENGTH); 3738 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3739 kmem_free(rqbuf_hd, SENSE_LENGTH); 3740 } 3741 3742 /* 3743 * Function: sd_read_unit_properties 3744 * 3745 * Description: The following implements a property lookup mechanism. 3746 * Properties for particular disks (keyed on vendor, model 3747 * and rev numbers) are sought in the sd.conf file via 3748 * sd_process_sdconf_file(), and if not found there, are 3749 * looked for in a list hardcoded in this driver via 3750 * sd_process_sdconf_table() Once located the properties 3751 * are used to update the driver unit structure. 3752 * 3753 * Arguments: un - driver soft state (unit) structure 3754 */ 3755 3756 static void 3757 sd_read_unit_properties(struct sd_lun *un) 3758 { 3759 /* 3760 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3761 * the "sd-config-list" property (from the sd.conf file) or if 3762 * there was not a match for the inquiry vid/pid. If this event 3763 * occurs the static driver configuration table is searched for 3764 * a match. 3765 */ 3766 ASSERT(un != NULL); 3767 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3768 sd_process_sdconf_table(un); 3769 } 3770 3771 /* check for LSI device */ 3772 sd_is_lsi(un); 3773 3774 3775 } 3776 3777 3778 /* 3779 * Function: sd_process_sdconf_file 3780 * 3781 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3782 * driver's config file (ie, sd.conf) and update the driver 3783 * soft state structure accordingly. 3784 * 3785 * Arguments: un - driver soft state (unit) structure 3786 * 3787 * Return Code: SD_SUCCESS - The properties were successfully set according 3788 * to the driver configuration file. 3789 * SD_FAILURE - The driver config list was not obtained or 3790 * there was no vid/pid match. This indicates that 3791 * the static config table should be used. 3792 * 3793 * The config file has a property, "sd-config-list". Currently we support 3794 * two kinds of formats. For both formats, the value of this property 3795 * is a list of duplets: 3796 * 3797 * sd-config-list= 3798 * <duplet>, 3799 * [,<duplet>]*; 3800 * 3801 * For the improved format, where 3802 * 3803 * <duplet>:= "<vid+pid>","<tunable-list>" 3804 * 3805 * and 3806 * 3807 * <tunable-list>:= <tunable> [, <tunable> ]*; 3808 * <tunable> = <name> : <value> 3809 * 3810 * The <vid+pid> is the string that is returned by the target device on a 3811 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3812 * to apply to all target devices with the specified <vid+pid>. 3813 * 3814 * Each <tunable> is a "<name> : <value>" pair. 3815 * 3816 * For the old format, the structure of each duplet is as follows: 3817 * 3818 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3819 * 3820 * The first entry of the duplet is the device ID string (the concatenated 3821 * vid & pid; not to be confused with a device_id). This is defined in 3822 * the same way as in the sd_disk_table. 3823 * 3824 * The second part of the duplet is a string that identifies a 3825 * data-property-name-list. The data-property-name-list is defined as 3826 * follows: 3827 * 3828 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3829 * 3830 * The syntax of <data-property-name> depends on the <version> field. 3831 * 3832 * If version = SD_CONF_VERSION_1 we have the following syntax: 3833 * 3834 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3835 * 3836 * where the prop0 value will be used to set prop0 if bit0 set in the 3837 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3838 * 3839 */ 3840 3841 static int 3842 sd_process_sdconf_file(struct sd_lun *un) 3843 { 3844 char **config_list = NULL; 3845 uint_t nelements; 3846 char *vidptr; 3847 int vidlen; 3848 char *dnlist_ptr; 3849 char *dataname_ptr; 3850 char *dataname_lasts; 3851 int *data_list = NULL; 3852 uint_t data_list_len; 3853 int rval = SD_FAILURE; 3854 int i; 3855 3856 ASSERT(un != NULL); 3857 3858 /* Obtain the configuration list associated with the .conf file */ 3859 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3860 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3861 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3862 return (SD_FAILURE); 3863 } 3864 3865 /* 3866 * Compare vids in each duplet to the inquiry vid - if a match is 3867 * made, get the data value and update the soft state structure 3868 * accordingly. 3869 * 3870 * Each duplet should show as a pair of strings, return SD_FAILURE 3871 * otherwise. 3872 */ 3873 if (nelements & 1) { 3874 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3875 "sd-config-list should show as pairs of strings.\n"); 3876 if (config_list) 3877 ddi_prop_free(config_list); 3878 return (SD_FAILURE); 3879 } 3880 3881 for (i = 0; i < nelements; i += 2) { 3882 /* 3883 * Note: The assumption here is that each vid entry is on 3884 * a unique line from its associated duplet. 3885 */ 3886 vidptr = config_list[i]; 3887 vidlen = (int)strlen(vidptr); 3888 if ((vidlen == 0) || 3889 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3890 continue; 3891 } 3892 3893 /* 3894 * dnlist contains 1 or more blank separated 3895 * data-property-name entries 3896 */ 3897 dnlist_ptr = config_list[i + 1]; 3898 3899 if (strchr(dnlist_ptr, ':') != NULL) { 3900 /* 3901 * Decode the improved format sd-config-list. 3902 */ 3903 sd_nvpair_str_decode(un, dnlist_ptr); 3904 } else { 3905 /* 3906 * The old format sd-config-list, loop through all 3907 * data-property-name entries in the 3908 * data-property-name-list 3909 * setting the properties for each. 3910 */ 3911 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3912 &dataname_lasts); dataname_ptr != NULL; 3913 dataname_ptr = sd_strtok_r(NULL, " \t", 3914 &dataname_lasts)) { 3915 int version; 3916 3917 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3918 "sd_process_sdconf_file: disk:%s, " 3919 "data:%s\n", vidptr, dataname_ptr); 3920 3921 /* Get the data list */ 3922 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3923 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3924 &data_list_len) != DDI_PROP_SUCCESS) { 3925 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3926 "sd_process_sdconf_file: data " 3927 "property (%s) has no value\n", 3928 dataname_ptr); 3929 continue; 3930 } 3931 3932 version = data_list[0]; 3933 3934 if (version == SD_CONF_VERSION_1) { 3935 sd_tunables values; 3936 3937 /* Set the properties */ 3938 if (sd_chk_vers1_data(un, data_list[1], 3939 &data_list[2], data_list_len, 3940 dataname_ptr) == SD_SUCCESS) { 3941 sd_get_tunables_from_conf(un, 3942 data_list[1], &data_list[2], 3943 &values); 3944 sd_set_vers1_properties(un, 3945 data_list[1], &values); 3946 rval = SD_SUCCESS; 3947 } else { 3948 rval = SD_FAILURE; 3949 } 3950 } else { 3951 scsi_log(SD_DEVINFO(un), sd_label, 3952 CE_WARN, "data property %s version " 3953 "0x%x is invalid.", 3954 dataname_ptr, version); 3955 rval = SD_FAILURE; 3956 } 3957 if (data_list) 3958 ddi_prop_free(data_list); 3959 } 3960 } 3961 } 3962 3963 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3964 if (config_list) { 3965 ddi_prop_free(config_list); 3966 } 3967 3968 return (rval); 3969 } 3970 3971 /* 3972 * Function: sd_nvpair_str_decode() 3973 * 3974 * Description: Parse the improved format sd-config-list to get 3975 * each entry of tunable, which includes a name-value pair. 3976 * Then call sd_set_properties() to set the property. 3977 * 3978 * Arguments: un - driver soft state (unit) structure 3979 * nvpair_str - the tunable list 3980 */ 3981 static void 3982 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3983 { 3984 char *nv, *name, *value, *token; 3985 char *nv_lasts, *v_lasts, *x_lasts; 3986 3987 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3988 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3989 token = sd_strtok_r(nv, ":", &v_lasts); 3990 name = sd_strtok_r(token, " \t", &x_lasts); 3991 token = sd_strtok_r(NULL, ":", &v_lasts); 3992 value = sd_strtok_r(token, " \t", &x_lasts); 3993 if (name == NULL || value == NULL) { 3994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3995 "sd_nvpair_str_decode: " 3996 "name or value is not valid!\n"); 3997 } else { 3998 sd_set_properties(un, name, value); 3999 } 4000 } 4001 } 4002 4003 /* 4004 * Function: sd_strtok_r() 4005 * 4006 * Description: This function uses strpbrk and strspn to break 4007 * string into tokens on sequentially subsequent calls. Return 4008 * NULL when no non-separator characters remain. The first 4009 * argument is NULL for subsequent calls. 4010 */ 4011 static char * 4012 sd_strtok_r(char *string, const char *sepset, char **lasts) 4013 { 4014 char *q, *r; 4015 4016 /* First or subsequent call */ 4017 if (string == NULL) 4018 string = *lasts; 4019 4020 if (string == NULL) 4021 return (NULL); 4022 4023 /* Skip leading separators */ 4024 q = string + strspn(string, sepset); 4025 4026 if (*q == '\0') 4027 return (NULL); 4028 4029 if ((r = strpbrk(q, sepset)) == NULL) 4030 *lasts = NULL; 4031 else { 4032 *r = '\0'; 4033 *lasts = r + 1; 4034 } 4035 return (q); 4036 } 4037 4038 /* 4039 * Function: sd_set_properties() 4040 * 4041 * Description: Set device properties based on the improved 4042 * format sd-config-list. 4043 * 4044 * Arguments: un - driver soft state (unit) structure 4045 * name - supported tunable name 4046 * value - tunable value 4047 */ 4048 static void 4049 sd_set_properties(struct sd_lun *un, char *name, char *value) 4050 { 4051 char *endptr = NULL; 4052 long val = 0; 4053 4054 if (strcasecmp(name, "cache-nonvolatile") == 0) { 4055 if (strcasecmp(value, "true") == 0) { 4056 un->un_f_suppress_cache_flush = TRUE; 4057 } else if (strcasecmp(value, "false") == 0) { 4058 un->un_f_suppress_cache_flush = FALSE; 4059 } else { 4060 goto value_invalid; 4061 } 4062 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4063 "suppress_cache_flush flag set to %d\n", 4064 un->un_f_suppress_cache_flush); 4065 return; 4066 } 4067 4068 if (strcasecmp(name, "controller-type") == 0) { 4069 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4070 un->un_ctype = val; 4071 } else { 4072 goto value_invalid; 4073 } 4074 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4075 "ctype set to %d\n", un->un_ctype); 4076 return; 4077 } 4078 4079 if (strcasecmp(name, "delay-busy") == 0) { 4080 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4081 un->un_busy_timeout = drv_usectohz(val / 1000); 4082 } else { 4083 goto value_invalid; 4084 } 4085 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4086 "busy_timeout set to %d\n", un->un_busy_timeout); 4087 return; 4088 } 4089 4090 if (strcasecmp(name, "disksort") == 0) { 4091 if (strcasecmp(value, "true") == 0) { 4092 un->un_f_disksort_disabled = FALSE; 4093 } else if (strcasecmp(value, "false") == 0) { 4094 un->un_f_disksort_disabled = TRUE; 4095 } else { 4096 goto value_invalid; 4097 } 4098 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4099 "disksort disabled flag set to %d\n", 4100 un->un_f_disksort_disabled); 4101 return; 4102 } 4103 4104 if (strcasecmp(name, "power-condition") == 0) { 4105 if (strcasecmp(value, "true") == 0) { 4106 un->un_f_power_condition_disabled = FALSE; 4107 } else if (strcasecmp(value, "false") == 0) { 4108 un->un_f_power_condition_disabled = TRUE; 4109 } else { 4110 goto value_invalid; 4111 } 4112 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4113 "power condition disabled flag set to %d\n", 4114 un->un_f_power_condition_disabled); 4115 return; 4116 } 4117 4118 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4119 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4120 un->un_reserve_release_time = val; 4121 } else { 4122 goto value_invalid; 4123 } 4124 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4125 "reservation release timeout set to %d\n", 4126 un->un_reserve_release_time); 4127 return; 4128 } 4129 4130 if (strcasecmp(name, "reset-lun") == 0) { 4131 if (strcasecmp(value, "true") == 0) { 4132 un->un_f_lun_reset_enabled = TRUE; 4133 } else if (strcasecmp(value, "false") == 0) { 4134 un->un_f_lun_reset_enabled = FALSE; 4135 } else { 4136 goto value_invalid; 4137 } 4138 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4139 "lun reset enabled flag set to %d\n", 4140 un->un_f_lun_reset_enabled); 4141 return; 4142 } 4143 4144 if (strcasecmp(name, "retries-busy") == 0) { 4145 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4146 un->un_busy_retry_count = val; 4147 } else { 4148 goto value_invalid; 4149 } 4150 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4151 "busy retry count set to %d\n", un->un_busy_retry_count); 4152 return; 4153 } 4154 4155 if (strcasecmp(name, "retries-timeout") == 0) { 4156 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4157 un->un_retry_count = val; 4158 } else { 4159 goto value_invalid; 4160 } 4161 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4162 "timeout retry count set to %d\n", un->un_retry_count); 4163 return; 4164 } 4165 4166 if (strcasecmp(name, "retries-notready") == 0) { 4167 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4168 un->un_notready_retry_count = val; 4169 } else { 4170 goto value_invalid; 4171 } 4172 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4173 "notready retry count set to %d\n", 4174 un->un_notready_retry_count); 4175 return; 4176 } 4177 4178 if (strcasecmp(name, "retries-reset") == 0) { 4179 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4180 un->un_reset_retry_count = val; 4181 } else { 4182 goto value_invalid; 4183 } 4184 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4185 "reset retry count set to %d\n", 4186 un->un_reset_retry_count); 4187 return; 4188 } 4189 4190 if (strcasecmp(name, "throttle-max") == 0) { 4191 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4192 un->un_saved_throttle = un->un_throttle = val; 4193 } else { 4194 goto value_invalid; 4195 } 4196 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4197 "throttle set to %d\n", un->un_throttle); 4198 } 4199 4200 if (strcasecmp(name, "throttle-min") == 0) { 4201 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4202 un->un_min_throttle = val; 4203 } else { 4204 goto value_invalid; 4205 } 4206 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4207 "min throttle set to %d\n", un->un_min_throttle); 4208 } 4209 4210 if (strcasecmp(name, "rmw-type") == 0) { 4211 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4212 un->un_f_rmw_type = val; 4213 } else { 4214 goto value_invalid; 4215 } 4216 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4217 "RMW type set to %d\n", un->un_f_rmw_type); 4218 } 4219 4220 /* 4221 * Validate the throttle values. 4222 * If any of the numbers are invalid, set everything to defaults. 4223 */ 4224 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4225 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4226 (un->un_min_throttle > un->un_throttle)) { 4227 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4228 un->un_min_throttle = sd_min_throttle; 4229 } 4230 4231 if (strcasecmp(name, "mmc-gesn-polling") == 0) { 4232 if (strcasecmp(value, "true") == 0) { 4233 un->un_f_mmc_gesn_polling = TRUE; 4234 } else if (strcasecmp(value, "false") == 0) { 4235 un->un_f_mmc_gesn_polling = FALSE; 4236 } else { 4237 goto value_invalid; 4238 } 4239 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4240 "mmc-gesn-polling set to %d\n", 4241 un->un_f_mmc_gesn_polling); 4242 } 4243 4244 return; 4245 4246 value_invalid: 4247 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4248 "value of prop %s is invalid\n", name); 4249 } 4250 4251 /* 4252 * Function: sd_get_tunables_from_conf() 4253 * 4254 * 4255 * This function reads the data list from the sd.conf file and pulls 4256 * the values that can have numeric values as arguments and places 4257 * the values in the appropriate sd_tunables member. 4258 * Since the order of the data list members varies across platforms 4259 * This function reads them from the data list in a platform specific 4260 * order and places them into the correct sd_tunable member that is 4261 * consistent across all platforms. 4262 */ 4263 static void 4264 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4265 sd_tunables *values) 4266 { 4267 int i; 4268 int mask; 4269 4270 bzero(values, sizeof (sd_tunables)); 4271 4272 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4273 4274 mask = 1 << i; 4275 if (mask > flags) { 4276 break; 4277 } 4278 4279 switch (mask & flags) { 4280 case 0: /* This mask bit not set in flags */ 4281 continue; 4282 case SD_CONF_BSET_THROTTLE: 4283 values->sdt_throttle = data_list[i]; 4284 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4285 "sd_get_tunables_from_conf: throttle = %d\n", 4286 values->sdt_throttle); 4287 break; 4288 case SD_CONF_BSET_CTYPE: 4289 values->sdt_ctype = data_list[i]; 4290 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4291 "sd_get_tunables_from_conf: ctype = %d\n", 4292 values->sdt_ctype); 4293 break; 4294 case SD_CONF_BSET_NRR_COUNT: 4295 values->sdt_not_rdy_retries = data_list[i]; 4296 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4297 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4298 values->sdt_not_rdy_retries); 4299 break; 4300 case SD_CONF_BSET_BSY_RETRY_COUNT: 4301 values->sdt_busy_retries = data_list[i]; 4302 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4303 "sd_get_tunables_from_conf: busy_retries = %d\n", 4304 values->sdt_busy_retries); 4305 break; 4306 case SD_CONF_BSET_RST_RETRIES: 4307 values->sdt_reset_retries = data_list[i]; 4308 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4309 "sd_get_tunables_from_conf: reset_retries = %d\n", 4310 values->sdt_reset_retries); 4311 break; 4312 case SD_CONF_BSET_RSV_REL_TIME: 4313 values->sdt_reserv_rel_time = data_list[i]; 4314 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4315 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4316 values->sdt_reserv_rel_time); 4317 break; 4318 case SD_CONF_BSET_MIN_THROTTLE: 4319 values->sdt_min_throttle = data_list[i]; 4320 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4321 "sd_get_tunables_from_conf: min_throttle = %d\n", 4322 values->sdt_min_throttle); 4323 break; 4324 case SD_CONF_BSET_DISKSORT_DISABLED: 4325 values->sdt_disk_sort_dis = data_list[i]; 4326 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4327 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4328 values->sdt_disk_sort_dis); 4329 break; 4330 case SD_CONF_BSET_LUN_RESET_ENABLED: 4331 values->sdt_lun_reset_enable = data_list[i]; 4332 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4333 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4334 "\n", values->sdt_lun_reset_enable); 4335 break; 4336 case SD_CONF_BSET_CACHE_IS_NV: 4337 values->sdt_suppress_cache_flush = data_list[i]; 4338 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4339 "sd_get_tunables_from_conf: \ 4340 suppress_cache_flush = %d" 4341 "\n", values->sdt_suppress_cache_flush); 4342 break; 4343 case SD_CONF_BSET_PC_DISABLED: 4344 values->sdt_disk_sort_dis = data_list[i]; 4345 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4346 "sd_get_tunables_from_conf: power_condition_dis = " 4347 "%d\n", values->sdt_power_condition_dis); 4348 break; 4349 } 4350 } 4351 } 4352 4353 /* 4354 * Function: sd_process_sdconf_table 4355 * 4356 * Description: Search the static configuration table for a match on the 4357 * inquiry vid/pid and update the driver soft state structure 4358 * according to the table property values for the device. 4359 * 4360 * The form of a configuration table entry is: 4361 * <vid+pid>,<flags>,<property-data> 4362 * "SEAGATE ST42400N",1,0x40000, 4363 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4364 * 4365 * Arguments: un - driver soft state (unit) structure 4366 */ 4367 4368 static void 4369 sd_process_sdconf_table(struct sd_lun *un) 4370 { 4371 char *id = NULL; 4372 int table_index; 4373 int idlen; 4374 4375 ASSERT(un != NULL); 4376 for (table_index = 0; table_index < sd_disk_table_size; 4377 table_index++) { 4378 id = sd_disk_table[table_index].device_id; 4379 idlen = strlen(id); 4380 if (idlen == 0) { 4381 continue; 4382 } 4383 4384 /* 4385 * The static configuration table currently does not 4386 * implement version 10 properties. Additionally, 4387 * multiple data-property-name entries are not 4388 * implemented in the static configuration table. 4389 */ 4390 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4392 "sd_process_sdconf_table: disk %s\n", id); 4393 sd_set_vers1_properties(un, 4394 sd_disk_table[table_index].flags, 4395 sd_disk_table[table_index].properties); 4396 break; 4397 } 4398 } 4399 } 4400 4401 4402 /* 4403 * Function: sd_sdconf_id_match 4404 * 4405 * Description: This local function implements a case sensitive vid/pid 4406 * comparison as well as the boundary cases of wild card and 4407 * multiple blanks. 4408 * 4409 * Note: An implicit assumption made here is that the scsi 4410 * inquiry structure will always keep the vid, pid and 4411 * revision strings in consecutive sequence, so they can be 4412 * read as a single string. If this assumption is not the 4413 * case, a separate string, to be used for the check, needs 4414 * to be built with these strings concatenated. 4415 * 4416 * Arguments: un - driver soft state (unit) structure 4417 * id - table or config file vid/pid 4418 * idlen - length of the vid/pid (bytes) 4419 * 4420 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4421 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4422 */ 4423 4424 static int 4425 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4426 { 4427 struct scsi_inquiry *sd_inq; 4428 int rval = SD_SUCCESS; 4429 4430 ASSERT(un != NULL); 4431 sd_inq = un->un_sd->sd_inq; 4432 ASSERT(id != NULL); 4433 4434 /* 4435 * We use the inq_vid as a pointer to a buffer containing the 4436 * vid and pid and use the entire vid/pid length of the table 4437 * entry for the comparison. This works because the inq_pid 4438 * data member follows inq_vid in the scsi_inquiry structure. 4439 */ 4440 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4441 /* 4442 * The user id string is compared to the inquiry vid/pid 4443 * using a case insensitive comparison and ignoring 4444 * multiple spaces. 4445 */ 4446 rval = sd_blank_cmp(un, id, idlen); 4447 if (rval != SD_SUCCESS) { 4448 /* 4449 * User id strings that start and end with a "*" 4450 * are a special case. These do not have a 4451 * specific vendor, and the product string can 4452 * appear anywhere in the 16 byte PID portion of 4453 * the inquiry data. This is a simple strstr() 4454 * type search for the user id in the inquiry data. 4455 */ 4456 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4457 char *pidptr = &id[1]; 4458 int i; 4459 int j; 4460 int pidstrlen = idlen - 2; 4461 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4462 pidstrlen; 4463 4464 if (j < 0) { 4465 return (SD_FAILURE); 4466 } 4467 for (i = 0; i < j; i++) { 4468 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4469 pidptr, pidstrlen) == 0) { 4470 rval = SD_SUCCESS; 4471 break; 4472 } 4473 } 4474 } 4475 } 4476 } 4477 return (rval); 4478 } 4479 4480 4481 /* 4482 * Function: sd_blank_cmp 4483 * 4484 * Description: If the id string starts and ends with a space, treat 4485 * multiple consecutive spaces as equivalent to a single 4486 * space. For example, this causes a sd_disk_table entry 4487 * of " NEC CDROM " to match a device's id string of 4488 * "NEC CDROM". 4489 * 4490 * Note: The success exit condition for this routine is if 4491 * the pointer to the table entry is '\0' and the cnt of 4492 * the inquiry length is zero. This will happen if the inquiry 4493 * string returned by the device is padded with spaces to be 4494 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4495 * SCSI spec states that the inquiry string is to be padded with 4496 * spaces. 4497 * 4498 * Arguments: un - driver soft state (unit) structure 4499 * id - table or config file vid/pid 4500 * idlen - length of the vid/pid (bytes) 4501 * 4502 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4503 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4504 */ 4505 4506 static int 4507 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4508 { 4509 char *p1; 4510 char *p2; 4511 int cnt; 4512 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4513 sizeof (SD_INQUIRY(un)->inq_pid); 4514 4515 ASSERT(un != NULL); 4516 p2 = un->un_sd->sd_inq->inq_vid; 4517 ASSERT(id != NULL); 4518 p1 = id; 4519 4520 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4521 /* 4522 * Note: string p1 is terminated by a NUL but string p2 4523 * isn't. The end of p2 is determined by cnt. 4524 */ 4525 for (;;) { 4526 /* skip over any extra blanks in both strings */ 4527 while ((*p1 != '\0') && (*p1 == ' ')) { 4528 p1++; 4529 } 4530 while ((cnt != 0) && (*p2 == ' ')) { 4531 p2++; 4532 cnt--; 4533 } 4534 4535 /* compare the two strings */ 4536 if ((cnt == 0) || 4537 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4538 break; 4539 } 4540 while ((cnt > 0) && 4541 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4542 p1++; 4543 p2++; 4544 cnt--; 4545 } 4546 } 4547 } 4548 4549 /* return SD_SUCCESS if both strings match */ 4550 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4551 } 4552 4553 4554 /* 4555 * Function: sd_chk_vers1_data 4556 * 4557 * Description: Verify the version 1 device properties provided by the 4558 * user via the configuration file 4559 * 4560 * Arguments: un - driver soft state (unit) structure 4561 * flags - integer mask indicating properties to be set 4562 * prop_list - integer list of property values 4563 * list_len - number of the elements 4564 * 4565 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4566 * SD_FAILURE - Indicates the user provided data is invalid 4567 */ 4568 4569 static int 4570 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4571 int list_len, char *dataname_ptr) 4572 { 4573 int i; 4574 int mask = 1; 4575 int index = 0; 4576 4577 ASSERT(un != NULL); 4578 4579 /* Check for a NULL property name and list */ 4580 if (dataname_ptr == NULL) { 4581 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4582 "sd_chk_vers1_data: NULL data property name."); 4583 return (SD_FAILURE); 4584 } 4585 if (prop_list == NULL) { 4586 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4587 "sd_chk_vers1_data: %s NULL data property list.", 4588 dataname_ptr); 4589 return (SD_FAILURE); 4590 } 4591 4592 /* Display a warning if undefined bits are set in the flags */ 4593 if (flags & ~SD_CONF_BIT_MASK) { 4594 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4595 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4596 "Properties not set.", 4597 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4598 return (SD_FAILURE); 4599 } 4600 4601 /* 4602 * Verify the length of the list by identifying the highest bit set 4603 * in the flags and validating that the property list has a length 4604 * up to the index of this bit. 4605 */ 4606 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4607 if (flags & mask) { 4608 index++; 4609 } 4610 mask = 1 << i; 4611 } 4612 if (list_len < (index + 2)) { 4613 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4614 "sd_chk_vers1_data: " 4615 "Data property list %s size is incorrect. " 4616 "Properties not set.", dataname_ptr); 4617 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4618 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4619 return (SD_FAILURE); 4620 } 4621 return (SD_SUCCESS); 4622 } 4623 4624 4625 /* 4626 * Function: sd_set_vers1_properties 4627 * 4628 * Description: Set version 1 device properties based on a property list 4629 * retrieved from the driver configuration file or static 4630 * configuration table. Version 1 properties have the format: 4631 * 4632 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4633 * 4634 * where the prop0 value will be used to set prop0 if bit0 4635 * is set in the flags 4636 * 4637 * Arguments: un - driver soft state (unit) structure 4638 * flags - integer mask indicating properties to be set 4639 * prop_list - integer list of property values 4640 */ 4641 4642 static void 4643 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4644 { 4645 ASSERT(un != NULL); 4646 4647 /* 4648 * Set the flag to indicate cache is to be disabled. An attempt 4649 * to disable the cache via sd_cache_control() will be made 4650 * later during attach once the basic initialization is complete. 4651 */ 4652 if (flags & SD_CONF_BSET_NOCACHE) { 4653 un->un_f_opt_disable_cache = TRUE; 4654 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4655 "sd_set_vers1_properties: caching disabled flag set\n"); 4656 } 4657 4658 /* CD-specific configuration parameters */ 4659 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4660 un->un_f_cfg_playmsf_bcd = TRUE; 4661 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4662 "sd_set_vers1_properties: playmsf_bcd set\n"); 4663 } 4664 if (flags & SD_CONF_BSET_READSUB_BCD) { 4665 un->un_f_cfg_readsub_bcd = TRUE; 4666 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4667 "sd_set_vers1_properties: readsub_bcd set\n"); 4668 } 4669 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4670 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4672 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4673 } 4674 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4675 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4676 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4677 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4678 } 4679 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4680 un->un_f_cfg_no_read_header = TRUE; 4681 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4682 "sd_set_vers1_properties: no_read_header set\n"); 4683 } 4684 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4685 un->un_f_cfg_read_cd_xd4 = TRUE; 4686 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4687 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4688 } 4689 4690 /* Support for devices which do not have valid/unique serial numbers */ 4691 if (flags & SD_CONF_BSET_FAB_DEVID) { 4692 un->un_f_opt_fab_devid = TRUE; 4693 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4694 "sd_set_vers1_properties: fab_devid bit set\n"); 4695 } 4696 4697 /* Support for user throttle configuration */ 4698 if (flags & SD_CONF_BSET_THROTTLE) { 4699 ASSERT(prop_list != NULL); 4700 un->un_saved_throttle = un->un_throttle = 4701 prop_list->sdt_throttle; 4702 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4703 "sd_set_vers1_properties: throttle set to %d\n", 4704 prop_list->sdt_throttle); 4705 } 4706 4707 /* Set the per disk retry count according to the conf file or table. */ 4708 if (flags & SD_CONF_BSET_NRR_COUNT) { 4709 ASSERT(prop_list != NULL); 4710 if (prop_list->sdt_not_rdy_retries) { 4711 un->un_notready_retry_count = 4712 prop_list->sdt_not_rdy_retries; 4713 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4714 "sd_set_vers1_properties: not ready retry count" 4715 " set to %d\n", un->un_notready_retry_count); 4716 } 4717 } 4718 4719 /* The controller type is reported for generic disk driver ioctls */ 4720 if (flags & SD_CONF_BSET_CTYPE) { 4721 ASSERT(prop_list != NULL); 4722 switch (prop_list->sdt_ctype) { 4723 case CTYPE_CDROM: 4724 un->un_ctype = prop_list->sdt_ctype; 4725 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4726 "sd_set_vers1_properties: ctype set to " 4727 "CTYPE_CDROM\n"); 4728 break; 4729 case CTYPE_CCS: 4730 un->un_ctype = prop_list->sdt_ctype; 4731 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4732 "sd_set_vers1_properties: ctype set to " 4733 "CTYPE_CCS\n"); 4734 break; 4735 case CTYPE_ROD: /* RW optical */ 4736 un->un_ctype = prop_list->sdt_ctype; 4737 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4738 "sd_set_vers1_properties: ctype set to " 4739 "CTYPE_ROD\n"); 4740 break; 4741 default: 4742 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4743 "sd_set_vers1_properties: Could not set " 4744 "invalid ctype value (%d)", 4745 prop_list->sdt_ctype); 4746 } 4747 } 4748 4749 /* Purple failover timeout */ 4750 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4751 ASSERT(prop_list != NULL); 4752 un->un_busy_retry_count = 4753 prop_list->sdt_busy_retries; 4754 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4755 "sd_set_vers1_properties: " 4756 "busy retry count set to %d\n", 4757 un->un_busy_retry_count); 4758 } 4759 4760 /* Purple reset retry count */ 4761 if (flags & SD_CONF_BSET_RST_RETRIES) { 4762 ASSERT(prop_list != NULL); 4763 un->un_reset_retry_count = 4764 prop_list->sdt_reset_retries; 4765 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4766 "sd_set_vers1_properties: " 4767 "reset retry count set to %d\n", 4768 un->un_reset_retry_count); 4769 } 4770 4771 /* Purple reservation release timeout */ 4772 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4773 ASSERT(prop_list != NULL); 4774 un->un_reserve_release_time = 4775 prop_list->sdt_reserv_rel_time; 4776 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4777 "sd_set_vers1_properties: " 4778 "reservation release timeout set to %d\n", 4779 un->un_reserve_release_time); 4780 } 4781 4782 /* 4783 * Driver flag telling the driver to verify that no commands are pending 4784 * for a device before issuing a Test Unit Ready. This is a workaround 4785 * for a firmware bug in some Seagate eliteI drives. 4786 */ 4787 if (flags & SD_CONF_BSET_TUR_CHECK) { 4788 un->un_f_cfg_tur_check = TRUE; 4789 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4790 "sd_set_vers1_properties: tur queue check set\n"); 4791 } 4792 4793 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4794 un->un_min_throttle = prop_list->sdt_min_throttle; 4795 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4796 "sd_set_vers1_properties: min throttle set to %d\n", 4797 un->un_min_throttle); 4798 } 4799 4800 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4801 un->un_f_disksort_disabled = 4802 (prop_list->sdt_disk_sort_dis != 0) ? 4803 TRUE : FALSE; 4804 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4805 "sd_set_vers1_properties: disksort disabled " 4806 "flag set to %d\n", 4807 prop_list->sdt_disk_sort_dis); 4808 } 4809 4810 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4811 un->un_f_lun_reset_enabled = 4812 (prop_list->sdt_lun_reset_enable != 0) ? 4813 TRUE : FALSE; 4814 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4815 "sd_set_vers1_properties: lun reset enabled " 4816 "flag set to %d\n", 4817 prop_list->sdt_lun_reset_enable); 4818 } 4819 4820 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4821 un->un_f_suppress_cache_flush = 4822 (prop_list->sdt_suppress_cache_flush != 0) ? 4823 TRUE : FALSE; 4824 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4825 "sd_set_vers1_properties: suppress_cache_flush " 4826 "flag set to %d\n", 4827 prop_list->sdt_suppress_cache_flush); 4828 } 4829 4830 if (flags & SD_CONF_BSET_PC_DISABLED) { 4831 un->un_f_power_condition_disabled = 4832 (prop_list->sdt_power_condition_dis != 0) ? 4833 TRUE : FALSE; 4834 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4835 "sd_set_vers1_properties: power_condition_disabled " 4836 "flag set to %d\n", 4837 prop_list->sdt_power_condition_dis); 4838 } 4839 4840 /* 4841 * Validate the throttle values. 4842 * If any of the numbers are invalid, set everything to defaults. 4843 */ 4844 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4845 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4846 (un->un_min_throttle > un->un_throttle)) { 4847 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4848 un->un_min_throttle = sd_min_throttle; 4849 } 4850 } 4851 4852 /* 4853 * Function: sd_is_lsi() 4854 * 4855 * Description: Check for lsi devices, step through the static device 4856 * table to match vid/pid. 4857 * 4858 * Args: un - ptr to sd_lun 4859 * 4860 * Notes: When creating new LSI property, need to add the new LSI property 4861 * to this function. 4862 */ 4863 static void 4864 sd_is_lsi(struct sd_lun *un) 4865 { 4866 char *id = NULL; 4867 int table_index; 4868 int idlen; 4869 void *prop; 4870 4871 ASSERT(un != NULL); 4872 for (table_index = 0; table_index < sd_disk_table_size; 4873 table_index++) { 4874 id = sd_disk_table[table_index].device_id; 4875 idlen = strlen(id); 4876 if (idlen == 0) { 4877 continue; 4878 } 4879 4880 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4881 prop = sd_disk_table[table_index].properties; 4882 if (prop == &lsi_properties || 4883 prop == &lsi_oem_properties || 4884 prop == &lsi_properties_scsi || 4885 prop == &symbios_properties) { 4886 un->un_f_cfg_is_lsi = TRUE; 4887 } 4888 break; 4889 } 4890 } 4891 } 4892 4893 /* 4894 * Function: sd_get_physical_geometry 4895 * 4896 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4897 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4898 * target, and use this information to initialize the physical 4899 * geometry cache specified by pgeom_p. 4900 * 4901 * MODE SENSE is an optional command, so failure in this case 4902 * does not necessarily denote an error. We want to use the 4903 * MODE SENSE commands to derive the physical geometry of the 4904 * device, but if either command fails, the logical geometry is 4905 * used as the fallback for disk label geometry in cmlb. 4906 * 4907 * This requires that un->un_blockcount and un->un_tgt_blocksize 4908 * have already been initialized for the current target and 4909 * that the current values be passed as args so that we don't 4910 * end up ever trying to use -1 as a valid value. This could 4911 * happen if either value is reset while we're not holding 4912 * the mutex. 4913 * 4914 * Arguments: un - driver soft state (unit) structure 4915 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4916 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4917 * to use the USCSI "direct" chain and bypass the normal 4918 * command waitq. 4919 * 4920 * Context: Kernel thread only (can sleep). 4921 */ 4922 4923 static int 4924 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4925 diskaddr_t capacity, int lbasize, int path_flag) 4926 { 4927 struct mode_format *page3p; 4928 struct mode_geometry *page4p; 4929 struct mode_header *headerp; 4930 int sector_size; 4931 int nsect; 4932 int nhead; 4933 int ncyl; 4934 int intrlv; 4935 int spc; 4936 diskaddr_t modesense_capacity; 4937 int rpm; 4938 int bd_len; 4939 int mode_header_length; 4940 uchar_t *p3bufp; 4941 uchar_t *p4bufp; 4942 int cdbsize; 4943 int ret = EIO; 4944 sd_ssc_t *ssc; 4945 int status; 4946 4947 ASSERT(un != NULL); 4948 4949 if (lbasize == 0) { 4950 if (ISCD(un)) { 4951 lbasize = 2048; 4952 } else { 4953 lbasize = un->un_sys_blocksize; 4954 } 4955 } 4956 pgeom_p->g_secsize = (unsigned short)lbasize; 4957 4958 /* 4959 * If the unit is a cd/dvd drive MODE SENSE page three 4960 * and MODE SENSE page four are reserved (see SBC spec 4961 * and MMC spec). To prevent soft errors just return 4962 * using the default LBA size. 4963 */ 4964 if (ISCD(un)) 4965 return (ret); 4966 4967 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4968 4969 /* 4970 * Retrieve MODE SENSE page 3 - Format Device Page 4971 */ 4972 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4973 ssc = sd_ssc_init(un); 4974 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4975 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4976 if (status != 0) { 4977 SD_ERROR(SD_LOG_COMMON, un, 4978 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4979 goto page3_exit; 4980 } 4981 4982 /* 4983 * Determine size of Block Descriptors in order to locate the mode 4984 * page data. ATAPI devices return 0, SCSI devices should return 4985 * MODE_BLK_DESC_LENGTH. 4986 */ 4987 headerp = (struct mode_header *)p3bufp; 4988 if (un->un_f_cfg_is_atapi == TRUE) { 4989 struct mode_header_grp2 *mhp = 4990 (struct mode_header_grp2 *)headerp; 4991 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4992 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4993 } else { 4994 mode_header_length = MODE_HEADER_LENGTH; 4995 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4996 } 4997 4998 if (bd_len > MODE_BLK_DESC_LENGTH) { 4999 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5000 "sd_get_physical_geometry: received unexpected bd_len " 5001 "of %d, page3\n", bd_len); 5002 status = EIO; 5003 goto page3_exit; 5004 } 5005 5006 page3p = (struct mode_format *) 5007 ((caddr_t)headerp + mode_header_length + bd_len); 5008 5009 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 5010 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5011 "sd_get_physical_geometry: mode sense pg3 code mismatch " 5012 "%d\n", page3p->mode_page.code); 5013 status = EIO; 5014 goto page3_exit; 5015 } 5016 5017 /* 5018 * Use this physical geometry data only if BOTH MODE SENSE commands 5019 * complete successfully; otherwise, revert to the logical geometry. 5020 * So, we need to save everything in temporary variables. 5021 */ 5022 sector_size = BE_16(page3p->data_bytes_sect); 5023 5024 /* 5025 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 5026 */ 5027 if (sector_size == 0) { 5028 sector_size = un->un_sys_blocksize; 5029 } else { 5030 sector_size &= ~(un->un_sys_blocksize - 1); 5031 } 5032 5033 nsect = BE_16(page3p->sect_track); 5034 intrlv = BE_16(page3p->interleave); 5035 5036 SD_INFO(SD_LOG_COMMON, un, 5037 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 5038 SD_INFO(SD_LOG_COMMON, un, 5039 " mode page: %d; nsect: %d; sector size: %d;\n", 5040 page3p->mode_page.code, nsect, sector_size); 5041 SD_INFO(SD_LOG_COMMON, un, 5042 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 5043 BE_16(page3p->track_skew), 5044 BE_16(page3p->cylinder_skew)); 5045 5046 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5047 5048 /* 5049 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 5050 */ 5051 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 5052 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 5053 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 5054 if (status != 0) { 5055 SD_ERROR(SD_LOG_COMMON, un, 5056 "sd_get_physical_geometry: mode sense page 4 failed\n"); 5057 goto page4_exit; 5058 } 5059 5060 /* 5061 * Determine size of Block Descriptors in order to locate the mode 5062 * page data. ATAPI devices return 0, SCSI devices should return 5063 * MODE_BLK_DESC_LENGTH. 5064 */ 5065 headerp = (struct mode_header *)p4bufp; 5066 if (un->un_f_cfg_is_atapi == TRUE) { 5067 struct mode_header_grp2 *mhp = 5068 (struct mode_header_grp2 *)headerp; 5069 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5070 } else { 5071 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5072 } 5073 5074 if (bd_len > MODE_BLK_DESC_LENGTH) { 5075 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5076 "sd_get_physical_geometry: received unexpected bd_len of " 5077 "%d, page4\n", bd_len); 5078 status = EIO; 5079 goto page4_exit; 5080 } 5081 5082 page4p = (struct mode_geometry *) 5083 ((caddr_t)headerp + mode_header_length + bd_len); 5084 5085 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 5086 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5087 "sd_get_physical_geometry: mode sense pg4 code mismatch " 5088 "%d\n", page4p->mode_page.code); 5089 status = EIO; 5090 goto page4_exit; 5091 } 5092 5093 /* 5094 * Stash the data now, after we know that both commands completed. 5095 */ 5096 5097 5098 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 5099 spc = nhead * nsect; 5100 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 5101 rpm = BE_16(page4p->rpm); 5102 5103 modesense_capacity = spc * ncyl; 5104 5105 SD_INFO(SD_LOG_COMMON, un, 5106 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 5107 SD_INFO(SD_LOG_COMMON, un, 5108 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 5109 SD_INFO(SD_LOG_COMMON, un, 5110 " computed capacity(h*s*c): %d;\n", modesense_capacity); 5111 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 5112 (void *)pgeom_p, capacity); 5113 5114 /* 5115 * Compensate if the drive's geometry is not rectangular, i.e., 5116 * the product of C * H * S returned by MODE SENSE >= that returned 5117 * by read capacity. This is an idiosyncrasy of the original x86 5118 * disk subsystem. 5119 */ 5120 if (modesense_capacity >= capacity) { 5121 SD_INFO(SD_LOG_COMMON, un, 5122 "sd_get_physical_geometry: adjusting acyl; " 5123 "old: %d; new: %d\n", pgeom_p->g_acyl, 5124 (modesense_capacity - capacity + spc - 1) / spc); 5125 if (sector_size != 0) { 5126 /* 1243403: NEC D38x7 drives don't support sec size */ 5127 pgeom_p->g_secsize = (unsigned short)sector_size; 5128 } 5129 pgeom_p->g_nsect = (unsigned short)nsect; 5130 pgeom_p->g_nhead = (unsigned short)nhead; 5131 pgeom_p->g_capacity = capacity; 5132 pgeom_p->g_acyl = 5133 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5134 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5135 } 5136 5137 pgeom_p->g_rpm = (unsigned short)rpm; 5138 pgeom_p->g_intrlv = (unsigned short)intrlv; 5139 ret = 0; 5140 5141 SD_INFO(SD_LOG_COMMON, un, 5142 "sd_get_physical_geometry: mode sense geometry:\n"); 5143 SD_INFO(SD_LOG_COMMON, un, 5144 " nsect: %d; sector size: %d; interlv: %d\n", 5145 nsect, sector_size, intrlv); 5146 SD_INFO(SD_LOG_COMMON, un, 5147 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5148 nhead, ncyl, rpm, modesense_capacity); 5149 SD_INFO(SD_LOG_COMMON, un, 5150 "sd_get_physical_geometry: (cached)\n"); 5151 SD_INFO(SD_LOG_COMMON, un, 5152 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5153 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5154 pgeom_p->g_nhead, pgeom_p->g_nsect); 5155 SD_INFO(SD_LOG_COMMON, un, 5156 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5157 pgeom_p->g_secsize, pgeom_p->g_capacity, 5158 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5159 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5160 5161 page4_exit: 5162 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5163 5164 page3_exit: 5165 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5166 5167 if (status != 0) { 5168 if (status == EIO) { 5169 /* 5170 * Some disks do not support mode sense(6), we 5171 * should ignore this kind of error(sense key is 5172 * 0x5 - illegal request). 5173 */ 5174 uint8_t *sensep; 5175 int senlen; 5176 5177 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5178 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5179 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5180 5181 if (senlen > 0 && 5182 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5183 sd_ssc_assessment(ssc, 5184 SD_FMT_IGNORE_COMPROMISE); 5185 } else { 5186 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5187 } 5188 } else { 5189 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5190 } 5191 } 5192 sd_ssc_fini(ssc); 5193 return (ret); 5194 } 5195 5196 /* 5197 * Function: sd_get_virtual_geometry 5198 * 5199 * Description: Ask the controller to tell us about the target device. 5200 * 5201 * Arguments: un - pointer to softstate 5202 * capacity - disk capacity in #blocks 5203 * lbasize - disk block size in bytes 5204 * 5205 * Context: Kernel thread only 5206 */ 5207 5208 static int 5209 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5210 diskaddr_t capacity, int lbasize) 5211 { 5212 uint_t geombuf; 5213 int spc; 5214 5215 ASSERT(un != NULL); 5216 5217 /* Set sector size, and total number of sectors */ 5218 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5219 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5220 5221 /* Let the HBA tell us its geometry */ 5222 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5223 5224 /* A value of -1 indicates an undefined "geometry" property */ 5225 if (geombuf == (-1)) { 5226 return (EINVAL); 5227 } 5228 5229 /* Initialize the logical geometry cache. */ 5230 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5231 lgeom_p->g_nsect = geombuf & 0xffff; 5232 lgeom_p->g_secsize = un->un_sys_blocksize; 5233 5234 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5235 5236 /* 5237 * Note: The driver originally converted the capacity value from 5238 * target blocks to system blocks. However, the capacity value passed 5239 * to this routine is already in terms of system blocks (this scaling 5240 * is done when the READ CAPACITY command is issued and processed). 5241 * This 'error' may have gone undetected because the usage of g_ncyl 5242 * (which is based upon g_capacity) is very limited within the driver 5243 */ 5244 lgeom_p->g_capacity = capacity; 5245 5246 /* 5247 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5248 * hba may return zero values if the device has been removed. 5249 */ 5250 if (spc == 0) { 5251 lgeom_p->g_ncyl = 0; 5252 } else { 5253 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5254 } 5255 lgeom_p->g_acyl = 0; 5256 5257 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5258 return (0); 5259 5260 } 5261 /* 5262 * Function: sd_update_block_info 5263 * 5264 * Description: Calculate a byte count to sector count bitshift value 5265 * from sector size. 5266 * 5267 * Arguments: un: unit struct. 5268 * lbasize: new target sector size 5269 * capacity: new target capacity, ie. block count 5270 * 5271 * Context: Kernel thread context 5272 */ 5273 5274 static void 5275 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5276 { 5277 if (lbasize != 0) { 5278 un->un_tgt_blocksize = lbasize; 5279 un->un_f_tgt_blocksize_is_valid = TRUE; 5280 if (!un->un_f_has_removable_media) { 5281 un->un_sys_blocksize = lbasize; 5282 } 5283 } 5284 5285 if (capacity != 0) { 5286 un->un_blockcount = capacity; 5287 un->un_f_blockcount_is_valid = TRUE; 5288 } 5289 } 5290 5291 5292 /* 5293 * Function: sd_register_devid 5294 * 5295 * Description: This routine will obtain the device id information from the 5296 * target, obtain the serial number, and register the device 5297 * id with the ddi framework. 5298 * 5299 * Arguments: devi - the system's dev_info_t for the device. 5300 * un - driver soft state (unit) structure 5301 * reservation_flag - indicates if a reservation conflict 5302 * occurred during attach 5303 * 5304 * Context: Kernel Thread 5305 */ 5306 static void 5307 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5308 { 5309 int rval = 0; 5310 uchar_t *inq80 = NULL; 5311 size_t inq80_len = MAX_INQUIRY_SIZE; 5312 size_t inq80_resid = 0; 5313 uchar_t *inq83 = NULL; 5314 size_t inq83_len = MAX_INQUIRY_SIZE; 5315 size_t inq83_resid = 0; 5316 int dlen, len; 5317 char *sn; 5318 struct sd_lun *un; 5319 5320 ASSERT(ssc != NULL); 5321 un = ssc->ssc_un; 5322 ASSERT(un != NULL); 5323 ASSERT(mutex_owned(SD_MUTEX(un))); 5324 ASSERT((SD_DEVINFO(un)) == devi); 5325 5326 5327 /* 5328 * We check the availability of the World Wide Name (0x83) and Unit 5329 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5330 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5331 * 0x83 is available, that is the best choice. Our next choice is 5332 * 0x80. If neither are available, we munge the devid from the device 5333 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5334 * to fabricate a devid for non-Sun qualified disks. 5335 */ 5336 if (sd_check_vpd_page_support(ssc) == 0) { 5337 /* collect page 80 data if available */ 5338 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5339 5340 mutex_exit(SD_MUTEX(un)); 5341 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5342 5343 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5344 0x01, 0x80, &inq80_resid); 5345 5346 if (rval != 0) { 5347 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5348 kmem_free(inq80, inq80_len); 5349 inq80 = NULL; 5350 inq80_len = 0; 5351 } else if (ddi_prop_exists( 5352 DDI_DEV_T_NONE, SD_DEVINFO(un), 5353 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5354 INQUIRY_SERIAL_NO) == 0) { 5355 /* 5356 * If we don't already have a serial number 5357 * property, do quick verify of data returned 5358 * and define property. 5359 */ 5360 dlen = inq80_len - inq80_resid; 5361 len = (size_t)inq80[3]; 5362 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5363 /* 5364 * Ensure sn termination, skip leading 5365 * blanks, and create property 5366 * 'inquiry-serial-no'. 5367 */ 5368 sn = (char *)&inq80[4]; 5369 sn[len] = 0; 5370 while (*sn && (*sn == ' ')) 5371 sn++; 5372 if (*sn) { 5373 (void) ddi_prop_update_string( 5374 DDI_DEV_T_NONE, 5375 SD_DEVINFO(un), 5376 INQUIRY_SERIAL_NO, sn); 5377 } 5378 } 5379 } 5380 mutex_enter(SD_MUTEX(un)); 5381 } 5382 5383 /* collect page 83 data if available */ 5384 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5385 mutex_exit(SD_MUTEX(un)); 5386 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5387 5388 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5389 0x01, 0x83, &inq83_resid); 5390 5391 if (rval != 0) { 5392 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5393 kmem_free(inq83, inq83_len); 5394 inq83 = NULL; 5395 inq83_len = 0; 5396 } 5397 mutex_enter(SD_MUTEX(un)); 5398 } 5399 } 5400 5401 /* 5402 * If transport has already registered a devid for this target 5403 * then that takes precedence over the driver's determination 5404 * of the devid. 5405 * 5406 * NOTE: The reason this check is done here instead of at the beginning 5407 * of the function is to allow the code above to create the 5408 * 'inquiry-serial-no' property. 5409 */ 5410 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5411 ASSERT(un->un_devid); 5412 un->un_f_devid_transport_defined = TRUE; 5413 goto cleanup; /* use devid registered by the transport */ 5414 } 5415 5416 /* 5417 * This is the case of antiquated Sun disk drives that have the 5418 * FAB_DEVID property set in the disk_table. These drives 5419 * manage the devid's by storing them in last 2 available sectors 5420 * on the drive and have them fabricated by the ddi layer by calling 5421 * ddi_devid_init and passing the DEVID_FAB flag. 5422 */ 5423 if (un->un_f_opt_fab_devid == TRUE) { 5424 /* 5425 * Depending on EINVAL isn't reliable, since a reserved disk 5426 * may result in invalid geometry, so check to make sure a 5427 * reservation conflict did not occur during attach. 5428 */ 5429 if ((sd_get_devid(ssc) == EINVAL) && 5430 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5431 /* 5432 * The devid is invalid AND there is no reservation 5433 * conflict. Fabricate a new devid. 5434 */ 5435 (void) sd_create_devid(ssc); 5436 } 5437 5438 /* Register the devid if it exists */ 5439 if (un->un_devid != NULL) { 5440 (void) ddi_devid_register(SD_DEVINFO(un), 5441 un->un_devid); 5442 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5443 "sd_register_devid: Devid Fabricated\n"); 5444 } 5445 goto cleanup; 5446 } 5447 5448 /* encode best devid possible based on data available */ 5449 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5450 (char *)ddi_driver_name(SD_DEVINFO(un)), 5451 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5452 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5453 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5454 5455 /* devid successfully encoded, register devid */ 5456 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5457 5458 } else { 5459 /* 5460 * Unable to encode a devid based on data available. 5461 * This is not a Sun qualified disk. Older Sun disk 5462 * drives that have the SD_FAB_DEVID property 5463 * set in the disk_table and non Sun qualified 5464 * disks are treated in the same manner. These 5465 * drives manage the devid's by storing them in 5466 * last 2 available sectors on the drive and 5467 * have them fabricated by the ddi layer by 5468 * calling ddi_devid_init and passing the 5469 * DEVID_FAB flag. 5470 * Create a fabricate devid only if there's no 5471 * fabricate devid existed. 5472 */ 5473 if (sd_get_devid(ssc) == EINVAL) { 5474 (void) sd_create_devid(ssc); 5475 } 5476 un->un_f_opt_fab_devid = TRUE; 5477 5478 /* Register the devid if it exists */ 5479 if (un->un_devid != NULL) { 5480 (void) ddi_devid_register(SD_DEVINFO(un), 5481 un->un_devid); 5482 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5483 "sd_register_devid: devid fabricated using " 5484 "ddi framework\n"); 5485 } 5486 } 5487 5488 cleanup: 5489 /* clean up resources */ 5490 if (inq80 != NULL) { 5491 kmem_free(inq80, inq80_len); 5492 } 5493 if (inq83 != NULL) { 5494 kmem_free(inq83, inq83_len); 5495 } 5496 } 5497 5498 5499 5500 /* 5501 * Function: sd_get_devid 5502 * 5503 * Description: This routine will return 0 if a valid device id has been 5504 * obtained from the target and stored in the soft state. If a 5505 * valid device id has not been previously read and stored, a 5506 * read attempt will be made. 5507 * 5508 * Arguments: un - driver soft state (unit) structure 5509 * 5510 * Return Code: 0 if we successfully get the device id 5511 * 5512 * Context: Kernel Thread 5513 */ 5514 5515 static int 5516 sd_get_devid(sd_ssc_t *ssc) 5517 { 5518 struct dk_devid *dkdevid; 5519 ddi_devid_t tmpid; 5520 uint_t *ip; 5521 size_t sz; 5522 diskaddr_t blk; 5523 int status; 5524 int chksum; 5525 int i; 5526 size_t buffer_size; 5527 struct sd_lun *un; 5528 5529 ASSERT(ssc != NULL); 5530 un = ssc->ssc_un; 5531 ASSERT(un != NULL); 5532 ASSERT(mutex_owned(SD_MUTEX(un))); 5533 5534 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5535 un); 5536 5537 if (un->un_devid != NULL) { 5538 return (0); 5539 } 5540 5541 mutex_exit(SD_MUTEX(un)); 5542 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5543 (void *)SD_PATH_DIRECT) != 0) { 5544 mutex_enter(SD_MUTEX(un)); 5545 return (EINVAL); 5546 } 5547 5548 /* 5549 * Read and verify device id, stored in the reserved cylinders at the 5550 * end of the disk. Backup label is on the odd sectors of the last 5551 * track of the last cylinder. Device id will be on track of the next 5552 * to last cylinder. 5553 */ 5554 mutex_enter(SD_MUTEX(un)); 5555 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5556 mutex_exit(SD_MUTEX(un)); 5557 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5558 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5559 SD_PATH_DIRECT); 5560 5561 if (status != 0) { 5562 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5563 goto error; 5564 } 5565 5566 /* Validate the revision */ 5567 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5568 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5569 status = EINVAL; 5570 goto error; 5571 } 5572 5573 /* Calculate the checksum */ 5574 chksum = 0; 5575 ip = (uint_t *)dkdevid; 5576 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5577 i++) { 5578 chksum ^= ip[i]; 5579 } 5580 5581 /* Compare the checksums */ 5582 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5583 status = EINVAL; 5584 goto error; 5585 } 5586 5587 /* Validate the device id */ 5588 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5589 status = EINVAL; 5590 goto error; 5591 } 5592 5593 /* 5594 * Store the device id in the driver soft state 5595 */ 5596 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5597 tmpid = kmem_alloc(sz, KM_SLEEP); 5598 5599 mutex_enter(SD_MUTEX(un)); 5600 5601 un->un_devid = tmpid; 5602 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5603 5604 kmem_free(dkdevid, buffer_size); 5605 5606 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5607 5608 return (status); 5609 error: 5610 mutex_enter(SD_MUTEX(un)); 5611 kmem_free(dkdevid, buffer_size); 5612 return (status); 5613 } 5614 5615 5616 /* 5617 * Function: sd_create_devid 5618 * 5619 * Description: This routine will fabricate the device id and write it 5620 * to the disk. 5621 * 5622 * Arguments: un - driver soft state (unit) structure 5623 * 5624 * Return Code: value of the fabricated device id 5625 * 5626 * Context: Kernel Thread 5627 */ 5628 5629 static ddi_devid_t 5630 sd_create_devid(sd_ssc_t *ssc) 5631 { 5632 struct sd_lun *un; 5633 5634 ASSERT(ssc != NULL); 5635 un = ssc->ssc_un; 5636 ASSERT(un != NULL); 5637 5638 /* Fabricate the devid */ 5639 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5640 == DDI_FAILURE) { 5641 return (NULL); 5642 } 5643 5644 /* Write the devid to disk */ 5645 if (sd_write_deviceid(ssc) != 0) { 5646 ddi_devid_free(un->un_devid); 5647 un->un_devid = NULL; 5648 } 5649 5650 return (un->un_devid); 5651 } 5652 5653 5654 /* 5655 * Function: sd_write_deviceid 5656 * 5657 * Description: This routine will write the device id to the disk 5658 * reserved sector. 5659 * 5660 * Arguments: un - driver soft state (unit) structure 5661 * 5662 * Return Code: EINVAL 5663 * value returned by sd_send_scsi_cmd 5664 * 5665 * Context: Kernel Thread 5666 */ 5667 5668 static int 5669 sd_write_deviceid(sd_ssc_t *ssc) 5670 { 5671 struct dk_devid *dkdevid; 5672 uchar_t *buf; 5673 diskaddr_t blk; 5674 uint_t *ip, chksum; 5675 int status; 5676 int i; 5677 struct sd_lun *un; 5678 5679 ASSERT(ssc != NULL); 5680 un = ssc->ssc_un; 5681 ASSERT(un != NULL); 5682 ASSERT(mutex_owned(SD_MUTEX(un))); 5683 5684 mutex_exit(SD_MUTEX(un)); 5685 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5686 (void *)SD_PATH_DIRECT) != 0) { 5687 mutex_enter(SD_MUTEX(un)); 5688 return (-1); 5689 } 5690 5691 5692 /* Allocate the buffer */ 5693 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5694 dkdevid = (struct dk_devid *)buf; 5695 5696 /* Fill in the revision */ 5697 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5698 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5699 5700 /* Copy in the device id */ 5701 mutex_enter(SD_MUTEX(un)); 5702 bcopy(un->un_devid, &dkdevid->dkd_devid, 5703 ddi_devid_sizeof(un->un_devid)); 5704 mutex_exit(SD_MUTEX(un)); 5705 5706 /* Calculate the checksum */ 5707 chksum = 0; 5708 ip = (uint_t *)dkdevid; 5709 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5710 i++) { 5711 chksum ^= ip[i]; 5712 } 5713 5714 /* Fill-in checksum */ 5715 DKD_FORMCHKSUM(chksum, dkdevid); 5716 5717 /* Write the reserved sector */ 5718 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5719 SD_PATH_DIRECT); 5720 if (status != 0) 5721 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5722 5723 kmem_free(buf, un->un_sys_blocksize); 5724 5725 mutex_enter(SD_MUTEX(un)); 5726 return (status); 5727 } 5728 5729 5730 /* 5731 * Function: sd_check_vpd_page_support 5732 * 5733 * Description: This routine sends an inquiry command with the EVPD bit set and 5734 * a page code of 0x00 to the device. It is used to determine which 5735 * vital product pages are available to find the devid. We are 5736 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1, 5737 * the device does not support that command. 5738 * 5739 * Arguments: un - driver soft state (unit) structure 5740 * 5741 * Return Code: 0 - success 5742 * 1 - check condition 5743 * 5744 * Context: This routine can sleep. 5745 */ 5746 5747 static int 5748 sd_check_vpd_page_support(sd_ssc_t *ssc) 5749 { 5750 uchar_t *page_list = NULL; 5751 uchar_t page_length = 0xff; /* Use max possible length */ 5752 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5753 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5754 int rval = 0; 5755 int counter; 5756 struct sd_lun *un; 5757 5758 ASSERT(ssc != NULL); 5759 un = ssc->ssc_un; 5760 ASSERT(un != NULL); 5761 ASSERT(mutex_owned(SD_MUTEX(un))); 5762 5763 mutex_exit(SD_MUTEX(un)); 5764 5765 /* 5766 * We'll set the page length to the maximum to save figuring it out 5767 * with an additional call. 5768 */ 5769 page_list = kmem_zalloc(page_length, KM_SLEEP); 5770 5771 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5772 page_code, NULL); 5773 5774 if (rval != 0) 5775 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5776 5777 mutex_enter(SD_MUTEX(un)); 5778 5779 /* 5780 * Now we must validate that the device accepted the command, as some 5781 * drives do not support it. If the drive does support it, we will 5782 * return 0, and the supported pages will be in un_vpd_page_mask. If 5783 * not, we return -1. 5784 */ 5785 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5786 /* Loop to find one of the 2 pages we need */ 5787 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5788 5789 /* 5790 * Pages are returned in ascending order, and 0x83 is what we 5791 * are hoping for. 5792 */ 5793 while ((page_list[counter] <= 0xB1) && 5794 (counter <= (page_list[VPD_PAGE_LENGTH] + 5795 VPD_HEAD_OFFSET))) { 5796 /* 5797 * Add 3 because page_list[3] is the number of 5798 * pages minus 3 5799 */ 5800 5801 switch (page_list[counter]) { 5802 case 0x00: 5803 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5804 break; 5805 case 0x80: 5806 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5807 break; 5808 case 0x81: 5809 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5810 break; 5811 case 0x82: 5812 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5813 break; 5814 case 0x83: 5815 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5816 break; 5817 case 0x86: 5818 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5819 break; 5820 case 0xB1: 5821 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG; 5822 break; 5823 } 5824 counter++; 5825 } 5826 5827 } else { 5828 rval = -1; 5829 5830 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5831 "sd_check_vpd_page_support: This drive does not implement " 5832 "VPD pages.\n"); 5833 } 5834 5835 kmem_free(page_list, page_length); 5836 5837 return (rval); 5838 } 5839 5840 5841 /* 5842 * Function: sd_setup_pm 5843 * 5844 * Description: Initialize Power Management on the device 5845 * 5846 * Context: Kernel Thread 5847 */ 5848 5849 static void 5850 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5851 { 5852 uint_t log_page_size; 5853 uchar_t *log_page_data; 5854 int rval = 0; 5855 struct sd_lun *un; 5856 5857 ASSERT(ssc != NULL); 5858 un = ssc->ssc_un; 5859 ASSERT(un != NULL); 5860 5861 /* 5862 * Since we are called from attach, holding a mutex for 5863 * un is unnecessary. Because some of the routines called 5864 * from here require SD_MUTEX to not be held, assert this 5865 * right up front. 5866 */ 5867 ASSERT(!mutex_owned(SD_MUTEX(un))); 5868 /* 5869 * Since the sd device does not have the 'reg' property, 5870 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5871 * The following code is to tell cpr that this device 5872 * DOES need to be suspended and resumed. 5873 */ 5874 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5875 "pm-hardware-state", "needs-suspend-resume"); 5876 5877 /* 5878 * This complies with the new power management framework 5879 * for certain desktop machines. Create the pm_components 5880 * property as a string array property. 5881 * If un_f_pm_supported is TRUE, that means the disk 5882 * attached HBA has set the "pm-capable" property and 5883 * the value of this property is bigger than 0. 5884 */ 5885 if (un->un_f_pm_supported) { 5886 /* 5887 * not all devices have a motor, try it first. 5888 * some devices may return ILLEGAL REQUEST, some 5889 * will hang 5890 * The following START_STOP_UNIT is used to check if target 5891 * device has a motor. 5892 */ 5893 un->un_f_start_stop_supported = TRUE; 5894 5895 if (un->un_f_power_condition_supported) { 5896 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5897 SD_POWER_CONDITION, SD_TARGET_ACTIVE, 5898 SD_PATH_DIRECT); 5899 if (rval != 0) { 5900 un->un_f_power_condition_supported = FALSE; 5901 } 5902 } 5903 if (!un->un_f_power_condition_supported) { 5904 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5905 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); 5906 } 5907 if (rval != 0) { 5908 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5909 un->un_f_start_stop_supported = FALSE; 5910 } 5911 5912 /* 5913 * create pm properties anyways otherwise the parent can't 5914 * go to sleep 5915 */ 5916 un->un_f_pm_is_enabled = TRUE; 5917 (void) sd_create_pm_components(devi, un); 5918 5919 /* 5920 * If it claims that log sense is supported, check it out. 5921 */ 5922 if (un->un_f_log_sense_supported) { 5923 rval = sd_log_page_supported(ssc, 5924 START_STOP_CYCLE_PAGE); 5925 if (rval == 1) { 5926 /* Page found, use it. */ 5927 un->un_start_stop_cycle_page = 5928 START_STOP_CYCLE_PAGE; 5929 } else { 5930 /* 5931 * Page not found or log sense is not 5932 * supported. 5933 * Notice we do not check the old style 5934 * START_STOP_CYCLE_VU_PAGE because this 5935 * code path does not apply to old disks. 5936 */ 5937 un->un_f_log_sense_supported = FALSE; 5938 un->un_f_pm_log_sense_smart = FALSE; 5939 } 5940 } 5941 5942 return; 5943 } 5944 5945 /* 5946 * For the disk whose attached HBA has not set the "pm-capable" 5947 * property, check if it supports the power management. 5948 */ 5949 if (!un->un_f_log_sense_supported) { 5950 un->un_power_level = SD_SPINDLE_ON; 5951 un->un_f_pm_is_enabled = FALSE; 5952 return; 5953 } 5954 5955 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5956 5957 #ifdef SDDEBUG 5958 if (sd_force_pm_supported) { 5959 /* Force a successful result */ 5960 rval = 1; 5961 } 5962 #endif 5963 5964 /* 5965 * If the start-stop cycle counter log page is not supported 5966 * or if the pm-capable property is set to be false (0), 5967 * then we should not create the pm_components property. 5968 */ 5969 if (rval == -1) { 5970 /* 5971 * Error. 5972 * Reading log sense failed, most likely this is 5973 * an older drive that does not support log sense. 5974 * If this fails auto-pm is not supported. 5975 */ 5976 un->un_power_level = SD_SPINDLE_ON; 5977 un->un_f_pm_is_enabled = FALSE; 5978 5979 } else if (rval == 0) { 5980 /* 5981 * Page not found. 5982 * The start stop cycle counter is implemented as page 5983 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5984 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5985 */ 5986 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5987 /* 5988 * Page found, use this one. 5989 */ 5990 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5991 un->un_f_pm_is_enabled = TRUE; 5992 } else { 5993 /* 5994 * Error or page not found. 5995 * auto-pm is not supported for this device. 5996 */ 5997 un->un_power_level = SD_SPINDLE_ON; 5998 un->un_f_pm_is_enabled = FALSE; 5999 } 6000 } else { 6001 /* 6002 * Page found, use it. 6003 */ 6004 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6005 un->un_f_pm_is_enabled = TRUE; 6006 } 6007 6008 6009 if (un->un_f_pm_is_enabled == TRUE) { 6010 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6011 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6012 6013 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6014 log_page_size, un->un_start_stop_cycle_page, 6015 0x01, 0, SD_PATH_DIRECT); 6016 6017 if (rval != 0) { 6018 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6019 } 6020 6021 #ifdef SDDEBUG 6022 if (sd_force_pm_supported) { 6023 /* Force a successful result */ 6024 rval = 0; 6025 } 6026 #endif 6027 6028 /* 6029 * If the Log sense for Page( Start/stop cycle counter page) 6030 * succeeds, then power management is supported and we can 6031 * enable auto-pm. 6032 */ 6033 if (rval == 0) { 6034 (void) sd_create_pm_components(devi, un); 6035 } else { 6036 un->un_power_level = SD_SPINDLE_ON; 6037 un->un_f_pm_is_enabled = FALSE; 6038 } 6039 6040 kmem_free(log_page_data, log_page_size); 6041 } 6042 } 6043 6044 6045 /* 6046 * Function: sd_create_pm_components 6047 * 6048 * Description: Initialize PM property. 6049 * 6050 * Context: Kernel thread context 6051 */ 6052 6053 static void 6054 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6055 { 6056 ASSERT(!mutex_owned(SD_MUTEX(un))); 6057 6058 if (un->un_f_power_condition_supported) { 6059 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6060 "pm-components", sd_pwr_pc.pm_comp, 5) 6061 != DDI_PROP_SUCCESS) { 6062 un->un_power_level = SD_SPINDLE_ACTIVE; 6063 un->un_f_pm_is_enabled = FALSE; 6064 return; 6065 } 6066 } else { 6067 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6068 "pm-components", sd_pwr_ss.pm_comp, 3) 6069 != DDI_PROP_SUCCESS) { 6070 un->un_power_level = SD_SPINDLE_ON; 6071 un->un_f_pm_is_enabled = FALSE; 6072 return; 6073 } 6074 } 6075 /* 6076 * When components are initially created they are idle, 6077 * power up any non-removables. 6078 * Note: the return value of pm_raise_power can't be used 6079 * for determining if PM should be enabled for this device. 6080 * Even if you check the return values and remove this 6081 * property created above, the PM framework will not honor the 6082 * change after the first call to pm_raise_power. Hence, 6083 * removal of that property does not help if pm_raise_power 6084 * fails. In the case of removable media, the start/stop 6085 * will fail if the media is not present. 6086 */ 6087 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 6088 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) { 6089 mutex_enter(SD_MUTEX(un)); 6090 un->un_power_level = SD_PM_STATE_ACTIVE(un); 6091 mutex_enter(&un->un_pm_mutex); 6092 /* Set to on and not busy. */ 6093 un->un_pm_count = 0; 6094 } else { 6095 mutex_enter(SD_MUTEX(un)); 6096 un->un_power_level = SD_PM_STATE_STOPPED(un); 6097 mutex_enter(&un->un_pm_mutex); 6098 /* Set to off. */ 6099 un->un_pm_count = -1; 6100 } 6101 mutex_exit(&un->un_pm_mutex); 6102 mutex_exit(SD_MUTEX(un)); 6103 } 6104 6105 6106 /* 6107 * Function: sd_ddi_suspend 6108 * 6109 * Description: Performs system power-down operations. This includes 6110 * setting the drive state to indicate its suspended so 6111 * that no new commands will be accepted. Also, wait for 6112 * all commands that are in transport or queued to a timer 6113 * for retry to complete. All timeout threads are cancelled. 6114 * 6115 * Return Code: DDI_FAILURE or DDI_SUCCESS 6116 * 6117 * Context: Kernel thread context 6118 */ 6119 6120 static int 6121 sd_ddi_suspend(dev_info_t *devi) 6122 { 6123 struct sd_lun *un; 6124 clock_t wait_cmds_complete; 6125 6126 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6127 if (un == NULL) { 6128 return (DDI_FAILURE); 6129 } 6130 6131 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6132 6133 mutex_enter(SD_MUTEX(un)); 6134 6135 /* Return success if the device is already suspended. */ 6136 if (un->un_state == SD_STATE_SUSPENDED) { 6137 mutex_exit(SD_MUTEX(un)); 6138 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6139 "device already suspended, exiting\n"); 6140 return (DDI_SUCCESS); 6141 } 6142 6143 /* Return failure if the device is being used by HA */ 6144 if (un->un_resvd_status & 6145 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6146 mutex_exit(SD_MUTEX(un)); 6147 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6148 "device in use by HA, exiting\n"); 6149 return (DDI_FAILURE); 6150 } 6151 6152 /* 6153 * Return failure if the device is in a resource wait 6154 * or power changing state. 6155 */ 6156 if ((un->un_state == SD_STATE_RWAIT) || 6157 (un->un_state == SD_STATE_PM_CHANGING)) { 6158 mutex_exit(SD_MUTEX(un)); 6159 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6160 "device in resource wait state, exiting\n"); 6161 return (DDI_FAILURE); 6162 } 6163 6164 6165 un->un_save_state = un->un_last_state; 6166 New_state(un, SD_STATE_SUSPENDED); 6167 6168 /* 6169 * Wait for all commands that are in transport or queued to a timer 6170 * for retry to complete. 6171 * 6172 * While waiting, no new commands will be accepted or sent because of 6173 * the new state we set above. 6174 * 6175 * Wait till current operation has completed. If we are in the resource 6176 * wait state (with an intr outstanding) then we need to wait till the 6177 * intr completes and starts the next cmd. We want to wait for 6178 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6179 */ 6180 wait_cmds_complete = ddi_get_lbolt() + 6181 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6182 6183 while (un->un_ncmds_in_transport != 0) { 6184 /* 6185 * Fail if commands do not finish in the specified time. 6186 */ 6187 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6188 wait_cmds_complete) == -1) { 6189 /* 6190 * Undo the state changes made above. Everything 6191 * must go back to it's original value. 6192 */ 6193 Restore_state(un); 6194 un->un_last_state = un->un_save_state; 6195 /* Wake up any threads that might be waiting. */ 6196 cv_broadcast(&un->un_suspend_cv); 6197 mutex_exit(SD_MUTEX(un)); 6198 SD_ERROR(SD_LOG_IO_PM, un, 6199 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6200 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6201 return (DDI_FAILURE); 6202 } 6203 } 6204 6205 /* 6206 * Cancel SCSI watch thread and timeouts, if any are active 6207 */ 6208 6209 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6210 opaque_t temp_token = un->un_swr_token; 6211 mutex_exit(SD_MUTEX(un)); 6212 scsi_watch_suspend(temp_token); 6213 mutex_enter(SD_MUTEX(un)); 6214 } 6215 6216 if (un->un_reset_throttle_timeid != NULL) { 6217 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6218 un->un_reset_throttle_timeid = NULL; 6219 mutex_exit(SD_MUTEX(un)); 6220 (void) untimeout(temp_id); 6221 mutex_enter(SD_MUTEX(un)); 6222 } 6223 6224 if (un->un_dcvb_timeid != NULL) { 6225 timeout_id_t temp_id = un->un_dcvb_timeid; 6226 un->un_dcvb_timeid = NULL; 6227 mutex_exit(SD_MUTEX(un)); 6228 (void) untimeout(temp_id); 6229 mutex_enter(SD_MUTEX(un)); 6230 } 6231 6232 mutex_enter(&un->un_pm_mutex); 6233 if (un->un_pm_timeid != NULL) { 6234 timeout_id_t temp_id = un->un_pm_timeid; 6235 un->un_pm_timeid = NULL; 6236 mutex_exit(&un->un_pm_mutex); 6237 mutex_exit(SD_MUTEX(un)); 6238 (void) untimeout(temp_id); 6239 mutex_enter(SD_MUTEX(un)); 6240 } else { 6241 mutex_exit(&un->un_pm_mutex); 6242 } 6243 6244 if (un->un_rmw_msg_timeid != NULL) { 6245 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6246 un->un_rmw_msg_timeid = NULL; 6247 mutex_exit(SD_MUTEX(un)); 6248 (void) untimeout(temp_id); 6249 mutex_enter(SD_MUTEX(un)); 6250 } 6251 6252 if (un->un_retry_timeid != NULL) { 6253 timeout_id_t temp_id = un->un_retry_timeid; 6254 un->un_retry_timeid = NULL; 6255 mutex_exit(SD_MUTEX(un)); 6256 (void) untimeout(temp_id); 6257 mutex_enter(SD_MUTEX(un)); 6258 6259 if (un->un_retry_bp != NULL) { 6260 un->un_retry_bp->av_forw = un->un_waitq_headp; 6261 un->un_waitq_headp = un->un_retry_bp; 6262 if (un->un_waitq_tailp == NULL) { 6263 un->un_waitq_tailp = un->un_retry_bp; 6264 } 6265 un->un_retry_bp = NULL; 6266 un->un_retry_statp = NULL; 6267 } 6268 } 6269 6270 if (un->un_direct_priority_timeid != NULL) { 6271 timeout_id_t temp_id = un->un_direct_priority_timeid; 6272 un->un_direct_priority_timeid = NULL; 6273 mutex_exit(SD_MUTEX(un)); 6274 (void) untimeout(temp_id); 6275 mutex_enter(SD_MUTEX(un)); 6276 } 6277 6278 if (un->un_f_is_fibre == TRUE) { 6279 /* 6280 * Remove callbacks for insert and remove events 6281 */ 6282 if (un->un_insert_event != NULL) { 6283 mutex_exit(SD_MUTEX(un)); 6284 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6285 mutex_enter(SD_MUTEX(un)); 6286 un->un_insert_event = NULL; 6287 } 6288 6289 if (un->un_remove_event != NULL) { 6290 mutex_exit(SD_MUTEX(un)); 6291 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6292 mutex_enter(SD_MUTEX(un)); 6293 un->un_remove_event = NULL; 6294 } 6295 } 6296 6297 mutex_exit(SD_MUTEX(un)); 6298 6299 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6300 6301 return (DDI_SUCCESS); 6302 } 6303 6304 6305 /* 6306 * Function: sd_ddi_resume 6307 * 6308 * Description: Performs system power-up operations.. 6309 * 6310 * Return Code: DDI_SUCCESS 6311 * DDI_FAILURE 6312 * 6313 * Context: Kernel thread context 6314 */ 6315 6316 static int 6317 sd_ddi_resume(dev_info_t *devi) 6318 { 6319 struct sd_lun *un; 6320 6321 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6322 if (un == NULL) { 6323 return (DDI_FAILURE); 6324 } 6325 6326 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6327 6328 mutex_enter(SD_MUTEX(un)); 6329 Restore_state(un); 6330 6331 /* 6332 * Restore the state which was saved to give the 6333 * the right state in un_last_state 6334 */ 6335 un->un_last_state = un->un_save_state; 6336 /* 6337 * Note: throttle comes back at full. 6338 * Also note: this MUST be done before calling pm_raise_power 6339 * otherwise the system can get hung in biowait. The scenario where 6340 * this'll happen is under cpr suspend. Writing of the system 6341 * state goes through sddump, which writes 0 to un_throttle. If 6342 * writing the system state then fails, example if the partition is 6343 * too small, then cpr attempts a resume. If throttle isn't restored 6344 * from the saved value until after calling pm_raise_power then 6345 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6346 * in biowait. 6347 */ 6348 un->un_throttle = un->un_saved_throttle; 6349 6350 /* 6351 * The chance of failure is very rare as the only command done in power 6352 * entry point is START command when you transition from 0->1 or 6353 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6354 * which suspend was done. Ignore the return value as the resume should 6355 * not be failed. In the case of removable media the media need not be 6356 * inserted and hence there is a chance that raise power will fail with 6357 * media not present. 6358 */ 6359 if (un->un_f_attach_spinup) { 6360 mutex_exit(SD_MUTEX(un)); 6361 (void) pm_raise_power(SD_DEVINFO(un), 0, 6362 SD_PM_STATE_ACTIVE(un)); 6363 mutex_enter(SD_MUTEX(un)); 6364 } 6365 6366 /* 6367 * Don't broadcast to the suspend cv and therefore possibly 6368 * start I/O until after power has been restored. 6369 */ 6370 cv_broadcast(&un->un_suspend_cv); 6371 cv_broadcast(&un->un_state_cv); 6372 6373 /* restart thread */ 6374 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6375 scsi_watch_resume(un->un_swr_token); 6376 } 6377 6378 #if (defined(__fibre)) 6379 if (un->un_f_is_fibre == TRUE) { 6380 /* 6381 * Add callbacks for insert and remove events 6382 */ 6383 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6384 sd_init_event_callbacks(un); 6385 } 6386 } 6387 #endif 6388 6389 /* 6390 * Transport any pending commands to the target. 6391 * 6392 * If this is a low-activity device commands in queue will have to wait 6393 * until new commands come in, which may take awhile. Also, we 6394 * specifically don't check un_ncmds_in_transport because we know that 6395 * there really are no commands in progress after the unit was 6396 * suspended and we could have reached the throttle level, been 6397 * suspended, and have no new commands coming in for awhile. Highly 6398 * unlikely, but so is the low-activity disk scenario. 6399 */ 6400 ddi_xbuf_dispatch(un->un_xbuf_attr); 6401 6402 sd_start_cmds(un, NULL); 6403 mutex_exit(SD_MUTEX(un)); 6404 6405 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6406 6407 return (DDI_SUCCESS); 6408 } 6409 6410 6411 /* 6412 * Function: sd_pm_state_change 6413 * 6414 * Description: Change the driver power state. 6415 * Someone else is required to actually change the driver 6416 * power level. 6417 * 6418 * Arguments: un - driver soft state (unit) structure 6419 * level - the power level that is changed to 6420 * flag - to decide how to change the power state 6421 * 6422 * Return Code: DDI_SUCCESS 6423 * 6424 * Context: Kernel thread context 6425 */ 6426 static int 6427 sd_pm_state_change(struct sd_lun *un, int level, int flag) 6428 { 6429 ASSERT(un != NULL); 6430 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n"); 6431 6432 ASSERT(!mutex_owned(SD_MUTEX(un))); 6433 mutex_enter(SD_MUTEX(un)); 6434 6435 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) { 6436 un->un_power_level = level; 6437 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6438 mutex_enter(&un->un_pm_mutex); 6439 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6440 un->un_pm_count++; 6441 ASSERT(un->un_pm_count == 0); 6442 } 6443 mutex_exit(&un->un_pm_mutex); 6444 } else { 6445 /* 6446 * Exit if power management is not enabled for this device, 6447 * or if the device is being used by HA. 6448 */ 6449 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6450 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6451 mutex_exit(SD_MUTEX(un)); 6452 SD_TRACE(SD_LOG_POWER, un, 6453 "sd_pm_state_change: exiting\n"); 6454 return (DDI_FAILURE); 6455 } 6456 6457 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: " 6458 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver); 6459 6460 /* 6461 * See if the device is not busy, ie.: 6462 * - we have no commands in the driver for this device 6463 * - not waiting for resources 6464 */ 6465 if ((un->un_ncmds_in_driver == 0) && 6466 (un->un_state != SD_STATE_RWAIT)) { 6467 /* 6468 * The device is not busy, so it is OK to go to low 6469 * power state. Indicate low power, but rely on someone 6470 * else to actually change it. 6471 */ 6472 mutex_enter(&un->un_pm_mutex); 6473 un->un_pm_count = -1; 6474 mutex_exit(&un->un_pm_mutex); 6475 un->un_power_level = level; 6476 } 6477 } 6478 6479 mutex_exit(SD_MUTEX(un)); 6480 6481 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n"); 6482 6483 return (DDI_SUCCESS); 6484 } 6485 6486 6487 /* 6488 * Function: sd_pm_idletimeout_handler 6489 * 6490 * Description: A timer routine that's active only while a device is busy. 6491 * The purpose is to extend slightly the pm framework's busy 6492 * view of the device to prevent busy/idle thrashing for 6493 * back-to-back commands. Do this by comparing the current time 6494 * to the time at which the last command completed and when the 6495 * difference is greater than sd_pm_idletime, call 6496 * pm_idle_component. In addition to indicating idle to the pm 6497 * framework, update the chain type to again use the internal pm 6498 * layers of the driver. 6499 * 6500 * Arguments: arg - driver soft state (unit) structure 6501 * 6502 * Context: Executes in a timeout(9F) thread context 6503 */ 6504 6505 static void 6506 sd_pm_idletimeout_handler(void *arg) 6507 { 6508 struct sd_lun *un = arg; 6509 6510 time_t now; 6511 6512 mutex_enter(&sd_detach_mutex); 6513 if (un->un_detach_count != 0) { 6514 /* Abort if the instance is detaching */ 6515 mutex_exit(&sd_detach_mutex); 6516 return; 6517 } 6518 mutex_exit(&sd_detach_mutex); 6519 6520 now = ddi_get_time(); 6521 /* 6522 * Grab both mutexes, in the proper order, since we're accessing 6523 * both PM and softstate variables. 6524 */ 6525 mutex_enter(SD_MUTEX(un)); 6526 mutex_enter(&un->un_pm_mutex); 6527 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6528 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6529 /* 6530 * Update the chain types. 6531 * This takes affect on the next new command received. 6532 */ 6533 if (un->un_f_non_devbsize_supported) { 6534 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6535 } else { 6536 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6537 } 6538 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6539 6540 SD_TRACE(SD_LOG_IO_PM, un, 6541 "sd_pm_idletimeout_handler: idling device\n"); 6542 (void) pm_idle_component(SD_DEVINFO(un), 0); 6543 un->un_pm_idle_timeid = NULL; 6544 } else { 6545 un->un_pm_idle_timeid = 6546 timeout(sd_pm_idletimeout_handler, un, 6547 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6548 } 6549 mutex_exit(&un->un_pm_mutex); 6550 mutex_exit(SD_MUTEX(un)); 6551 } 6552 6553 6554 /* 6555 * Function: sd_pm_timeout_handler 6556 * 6557 * Description: Callback to tell framework we are idle. 6558 * 6559 * Context: timeout(9f) thread context. 6560 */ 6561 6562 static void 6563 sd_pm_timeout_handler(void *arg) 6564 { 6565 struct sd_lun *un = arg; 6566 6567 (void) pm_idle_component(SD_DEVINFO(un), 0); 6568 mutex_enter(&un->un_pm_mutex); 6569 un->un_pm_timeid = NULL; 6570 mutex_exit(&un->un_pm_mutex); 6571 } 6572 6573 6574 /* 6575 * Function: sdpower 6576 * 6577 * Description: PM entry point. 6578 * 6579 * Return Code: DDI_SUCCESS 6580 * DDI_FAILURE 6581 * 6582 * Context: Kernel thread context 6583 */ 6584 6585 static int 6586 sdpower(dev_info_t *devi, int component, int level) 6587 { 6588 struct sd_lun *un; 6589 int instance; 6590 int rval = DDI_SUCCESS; 6591 uint_t i, log_page_size, maxcycles, ncycles; 6592 uchar_t *log_page_data; 6593 int log_sense_page; 6594 int medium_present; 6595 time_t intvlp; 6596 struct pm_trans_data sd_pm_tran_data; 6597 uchar_t save_state; 6598 int sval; 6599 uchar_t state_before_pm; 6600 int got_semaphore_here; 6601 sd_ssc_t *ssc; 6602 int last_power_level; 6603 6604 instance = ddi_get_instance(devi); 6605 6606 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6607 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) { 6608 return (DDI_FAILURE); 6609 } 6610 6611 ssc = sd_ssc_init(un); 6612 6613 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6614 6615 /* 6616 * Must synchronize power down with close. 6617 * Attempt to decrement/acquire the open/close semaphore, 6618 * but do NOT wait on it. If it's not greater than zero, 6619 * ie. it can't be decremented without waiting, then 6620 * someone else, either open or close, already has it 6621 * and the try returns 0. Use that knowledge here to determine 6622 * if it's OK to change the device power level. 6623 * Also, only increment it on exit if it was decremented, ie. gotten, 6624 * here. 6625 */ 6626 got_semaphore_here = sema_tryp(&un->un_semoclose); 6627 6628 mutex_enter(SD_MUTEX(un)); 6629 6630 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6631 un->un_ncmds_in_driver); 6632 6633 /* 6634 * If un_ncmds_in_driver is non-zero it indicates commands are 6635 * already being processed in the driver, or if the semaphore was 6636 * not gotten here it indicates an open or close is being processed. 6637 * At the same time somebody is requesting to go to a lower power 6638 * that can't perform I/O, which can't happen, therefore we need to 6639 * return failure. 6640 */ 6641 if ((!SD_PM_IS_IO_CAPABLE(un, level)) && 6642 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6643 mutex_exit(SD_MUTEX(un)); 6644 6645 if (got_semaphore_here != 0) { 6646 sema_v(&un->un_semoclose); 6647 } 6648 SD_TRACE(SD_LOG_IO_PM, un, 6649 "sdpower: exit, device has queued cmds.\n"); 6650 6651 goto sdpower_failed; 6652 } 6653 6654 /* 6655 * if it is OFFLINE that means the disk is completely dead 6656 * in our case we have to put the disk in on or off by sending commands 6657 * Of course that will fail anyway so return back here. 6658 * 6659 * Power changes to a device that's OFFLINE or SUSPENDED 6660 * are not allowed. 6661 */ 6662 if ((un->un_state == SD_STATE_OFFLINE) || 6663 (un->un_state == SD_STATE_SUSPENDED)) { 6664 mutex_exit(SD_MUTEX(un)); 6665 6666 if (got_semaphore_here != 0) { 6667 sema_v(&un->un_semoclose); 6668 } 6669 SD_TRACE(SD_LOG_IO_PM, un, 6670 "sdpower: exit, device is off-line.\n"); 6671 6672 goto sdpower_failed; 6673 } 6674 6675 /* 6676 * Change the device's state to indicate it's power level 6677 * is being changed. Do this to prevent a power off in the 6678 * middle of commands, which is especially bad on devices 6679 * that are really powered off instead of just spun down. 6680 */ 6681 state_before_pm = un->un_state; 6682 un->un_state = SD_STATE_PM_CHANGING; 6683 6684 mutex_exit(SD_MUTEX(un)); 6685 6686 /* 6687 * If log sense command is not supported, bypass the 6688 * following checking, otherwise, check the log sense 6689 * information for this device. 6690 */ 6691 if (SD_PM_STOP_MOTOR_NEEDED(un, level) && 6692 un->un_f_log_sense_supported) { 6693 /* 6694 * Get the log sense information to understand whether the 6695 * the powercycle counts have gone beyond the threshhold. 6696 */ 6697 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6698 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6699 6700 mutex_enter(SD_MUTEX(un)); 6701 log_sense_page = un->un_start_stop_cycle_page; 6702 mutex_exit(SD_MUTEX(un)); 6703 6704 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6705 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6706 6707 if (rval != 0) { 6708 if (rval == EIO) 6709 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6710 else 6711 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6712 } 6713 6714 #ifdef SDDEBUG 6715 if (sd_force_pm_supported) { 6716 /* Force a successful result */ 6717 rval = 0; 6718 } 6719 #endif 6720 if (rval != 0) { 6721 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6722 "Log Sense Failed\n"); 6723 6724 kmem_free(log_page_data, log_page_size); 6725 /* Cannot support power management on those drives */ 6726 6727 if (got_semaphore_here != 0) { 6728 sema_v(&un->un_semoclose); 6729 } 6730 /* 6731 * On exit put the state back to it's original value 6732 * and broadcast to anyone waiting for the power 6733 * change completion. 6734 */ 6735 mutex_enter(SD_MUTEX(un)); 6736 un->un_state = state_before_pm; 6737 cv_broadcast(&un->un_suspend_cv); 6738 mutex_exit(SD_MUTEX(un)); 6739 SD_TRACE(SD_LOG_IO_PM, un, 6740 "sdpower: exit, Log Sense Failed.\n"); 6741 6742 goto sdpower_failed; 6743 } 6744 6745 /* 6746 * From the page data - Convert the essential information to 6747 * pm_trans_data 6748 */ 6749 maxcycles = 6750 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6751 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6752 6753 ncycles = 6754 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6755 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6756 6757 if (un->un_f_pm_log_sense_smart) { 6758 sd_pm_tran_data.un.smart_count.allowed = maxcycles; 6759 sd_pm_tran_data.un.smart_count.consumed = ncycles; 6760 sd_pm_tran_data.un.smart_count.flag = 0; 6761 sd_pm_tran_data.format = DC_SMART_FORMAT; 6762 } else { 6763 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6764 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6765 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6766 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6767 log_page_data[8+i]; 6768 } 6769 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6770 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6771 } 6772 6773 kmem_free(log_page_data, log_page_size); 6774 6775 /* 6776 * Call pm_trans_check routine to get the Ok from 6777 * the global policy 6778 */ 6779 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6780 #ifdef SDDEBUG 6781 if (sd_force_pm_supported) { 6782 /* Force a successful result */ 6783 rval = 1; 6784 } 6785 #endif 6786 switch (rval) { 6787 case 0: 6788 /* 6789 * Not Ok to Power cycle or error in parameters passed 6790 * Would have given the advised time to consider power 6791 * cycle. Based on the new intvlp parameter we are 6792 * supposed to pretend we are busy so that pm framework 6793 * will never call our power entry point. Because of 6794 * that install a timeout handler and wait for the 6795 * recommended time to elapse so that power management 6796 * can be effective again. 6797 * 6798 * To effect this behavior, call pm_busy_component to 6799 * indicate to the framework this device is busy. 6800 * By not adjusting un_pm_count the rest of PM in 6801 * the driver will function normally, and independent 6802 * of this but because the framework is told the device 6803 * is busy it won't attempt powering down until it gets 6804 * a matching idle. The timeout handler sends this. 6805 * Note: sd_pm_entry can't be called here to do this 6806 * because sdpower may have been called as a result 6807 * of a call to pm_raise_power from within sd_pm_entry. 6808 * 6809 * If a timeout handler is already active then 6810 * don't install another. 6811 */ 6812 mutex_enter(&un->un_pm_mutex); 6813 if (un->un_pm_timeid == NULL) { 6814 un->un_pm_timeid = 6815 timeout(sd_pm_timeout_handler, 6816 un, intvlp * drv_usectohz(1000000)); 6817 mutex_exit(&un->un_pm_mutex); 6818 (void) pm_busy_component(SD_DEVINFO(un), 0); 6819 } else { 6820 mutex_exit(&un->un_pm_mutex); 6821 } 6822 if (got_semaphore_here != 0) { 6823 sema_v(&un->un_semoclose); 6824 } 6825 /* 6826 * On exit put the state back to it's original value 6827 * and broadcast to anyone waiting for the power 6828 * change completion. 6829 */ 6830 mutex_enter(SD_MUTEX(un)); 6831 un->un_state = state_before_pm; 6832 cv_broadcast(&un->un_suspend_cv); 6833 mutex_exit(SD_MUTEX(un)); 6834 6835 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6836 "trans check Failed, not ok to power cycle.\n"); 6837 6838 goto sdpower_failed; 6839 case -1: 6840 if (got_semaphore_here != 0) { 6841 sema_v(&un->un_semoclose); 6842 } 6843 /* 6844 * On exit put the state back to it's original value 6845 * and broadcast to anyone waiting for the power 6846 * change completion. 6847 */ 6848 mutex_enter(SD_MUTEX(un)); 6849 un->un_state = state_before_pm; 6850 cv_broadcast(&un->un_suspend_cv); 6851 mutex_exit(SD_MUTEX(un)); 6852 SD_TRACE(SD_LOG_IO_PM, un, 6853 "sdpower: exit, trans check command Failed.\n"); 6854 6855 goto sdpower_failed; 6856 } 6857 } 6858 6859 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6860 /* 6861 * Save the last state... if the STOP FAILS we need it 6862 * for restoring 6863 */ 6864 mutex_enter(SD_MUTEX(un)); 6865 save_state = un->un_last_state; 6866 last_power_level = un->un_power_level; 6867 /* 6868 * There must not be any cmds. getting processed 6869 * in the driver when we get here. Power to the 6870 * device is potentially going off. 6871 */ 6872 ASSERT(un->un_ncmds_in_driver == 0); 6873 mutex_exit(SD_MUTEX(un)); 6874 6875 /* 6876 * For now PM suspend the device completely before spindle is 6877 * turned off 6878 */ 6879 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE)) 6880 == DDI_FAILURE) { 6881 if (got_semaphore_here != 0) { 6882 sema_v(&un->un_semoclose); 6883 } 6884 /* 6885 * On exit put the state back to it's original value 6886 * and broadcast to anyone waiting for the power 6887 * change completion. 6888 */ 6889 mutex_enter(SD_MUTEX(un)); 6890 un->un_state = state_before_pm; 6891 un->un_power_level = last_power_level; 6892 cv_broadcast(&un->un_suspend_cv); 6893 mutex_exit(SD_MUTEX(un)); 6894 SD_TRACE(SD_LOG_IO_PM, un, 6895 "sdpower: exit, PM suspend Failed.\n"); 6896 6897 goto sdpower_failed; 6898 } 6899 } 6900 6901 /* 6902 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6903 * close, or strategy. Dump no long uses this routine, it uses it's 6904 * own code so it can be done in polled mode. 6905 */ 6906 6907 medium_present = TRUE; 6908 6909 /* 6910 * When powering up, issue a TUR in case the device is at unit 6911 * attention. Don't do retries. Bypass the PM layer, otherwise 6912 * a deadlock on un_pm_busy_cv will occur. 6913 */ 6914 if (SD_PM_IS_IO_CAPABLE(un, level)) { 6915 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6916 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6917 if (sval != 0) 6918 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6919 } 6920 6921 if (un->un_f_power_condition_supported) { 6922 char *pm_condition_name[] = {"STOPPED", "STANDBY", 6923 "IDLE", "ACTIVE"}; 6924 SD_TRACE(SD_LOG_IO_PM, un, 6925 "sdpower: sending \'%s\' power condition", 6926 pm_condition_name[level]); 6927 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 6928 sd_pl2pc[level], SD_PATH_DIRECT); 6929 } else { 6930 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6931 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6932 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 6933 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : 6934 SD_TARGET_STOP), SD_PATH_DIRECT); 6935 } 6936 if (sval != 0) { 6937 if (sval == EIO) 6938 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6939 else 6940 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6941 } 6942 6943 /* Command failed, check for media present. */ 6944 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6945 medium_present = FALSE; 6946 } 6947 6948 /* 6949 * The conditions of interest here are: 6950 * if a spindle off with media present fails, 6951 * then restore the state and return an error. 6952 * else if a spindle on fails, 6953 * then return an error (there's no state to restore). 6954 * In all other cases we setup for the new state 6955 * and return success. 6956 */ 6957 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6958 if ((medium_present == TRUE) && (sval != 0)) { 6959 /* The stop command from above failed */ 6960 rval = DDI_FAILURE; 6961 /* 6962 * The stop command failed, and we have media 6963 * present. Put the level back by calling the 6964 * sd_pm_resume() and set the state back to 6965 * it's previous value. 6966 */ 6967 (void) sd_pm_state_change(un, last_power_level, 6968 SD_PM_STATE_ROLLBACK); 6969 mutex_enter(SD_MUTEX(un)); 6970 un->un_last_state = save_state; 6971 mutex_exit(SD_MUTEX(un)); 6972 } else if (un->un_f_monitor_media_state) { 6973 /* 6974 * The stop command from above succeeded. 6975 * Terminate watch thread in case of removable media 6976 * devices going into low power state. This is as per 6977 * the requirements of pm framework, otherwise commands 6978 * will be generated for the device (through watch 6979 * thread), even when the device is in low power state. 6980 */ 6981 mutex_enter(SD_MUTEX(un)); 6982 un->un_f_watcht_stopped = FALSE; 6983 if (un->un_swr_token != NULL) { 6984 opaque_t temp_token = un->un_swr_token; 6985 un->un_f_watcht_stopped = TRUE; 6986 un->un_swr_token = NULL; 6987 mutex_exit(SD_MUTEX(un)); 6988 (void) scsi_watch_request_terminate(temp_token, 6989 SCSI_WATCH_TERMINATE_ALL_WAIT); 6990 } else { 6991 mutex_exit(SD_MUTEX(un)); 6992 } 6993 } 6994 } else { 6995 /* 6996 * The level requested is I/O capable. 6997 * Legacy behavior: return success on a failed spinup 6998 * if there is no media in the drive. 6999 * Do this by looking at medium_present here. 7000 */ 7001 if ((sval != 0) && medium_present) { 7002 /* The start command from above failed */ 7003 rval = DDI_FAILURE; 7004 } else { 7005 /* 7006 * The start command from above succeeded 7007 * PM resume the devices now that we have 7008 * started the disks 7009 */ 7010 (void) sd_pm_state_change(un, level, 7011 SD_PM_STATE_CHANGE); 7012 7013 /* 7014 * Resume the watch thread since it was suspended 7015 * when the device went into low power mode. 7016 */ 7017 if (un->un_f_monitor_media_state) { 7018 mutex_enter(SD_MUTEX(un)); 7019 if (un->un_f_watcht_stopped == TRUE) { 7020 opaque_t temp_token; 7021 7022 un->un_f_watcht_stopped = FALSE; 7023 mutex_exit(SD_MUTEX(un)); 7024 temp_token = 7025 sd_watch_request_submit(un); 7026 mutex_enter(SD_MUTEX(un)); 7027 un->un_swr_token = temp_token; 7028 } 7029 mutex_exit(SD_MUTEX(un)); 7030 } 7031 } 7032 } 7033 7034 if (got_semaphore_here != 0) { 7035 sema_v(&un->un_semoclose); 7036 } 7037 /* 7038 * On exit put the state back to it's original value 7039 * and broadcast to anyone waiting for the power 7040 * change completion. 7041 */ 7042 mutex_enter(SD_MUTEX(un)); 7043 un->un_state = state_before_pm; 7044 cv_broadcast(&un->un_suspend_cv); 7045 mutex_exit(SD_MUTEX(un)); 7046 7047 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7048 7049 sd_ssc_fini(ssc); 7050 return (rval); 7051 7052 sdpower_failed: 7053 7054 sd_ssc_fini(ssc); 7055 return (DDI_FAILURE); 7056 } 7057 7058 7059 7060 /* 7061 * Function: sdattach 7062 * 7063 * Description: Driver's attach(9e) entry point function. 7064 * 7065 * Arguments: devi - opaque device info handle 7066 * cmd - attach type 7067 * 7068 * Return Code: DDI_SUCCESS 7069 * DDI_FAILURE 7070 * 7071 * Context: Kernel thread context 7072 */ 7073 7074 static int 7075 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7076 { 7077 switch (cmd) { 7078 case DDI_ATTACH: 7079 return (sd_unit_attach(devi)); 7080 case DDI_RESUME: 7081 return (sd_ddi_resume(devi)); 7082 default: 7083 break; 7084 } 7085 return (DDI_FAILURE); 7086 } 7087 7088 7089 /* 7090 * Function: sddetach 7091 * 7092 * Description: Driver's detach(9E) entry point function. 7093 * 7094 * Arguments: devi - opaque device info handle 7095 * cmd - detach type 7096 * 7097 * Return Code: DDI_SUCCESS 7098 * DDI_FAILURE 7099 * 7100 * Context: Kernel thread context 7101 */ 7102 7103 static int 7104 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7105 { 7106 switch (cmd) { 7107 case DDI_DETACH: 7108 return (sd_unit_detach(devi)); 7109 case DDI_SUSPEND: 7110 return (sd_ddi_suspend(devi)); 7111 default: 7112 break; 7113 } 7114 return (DDI_FAILURE); 7115 } 7116 7117 7118 /* 7119 * Function: sd_sync_with_callback 7120 * 7121 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7122 * state while the callback routine is active. 7123 * 7124 * Arguments: un: softstate structure for the instance 7125 * 7126 * Context: Kernel thread context 7127 */ 7128 7129 static void 7130 sd_sync_with_callback(struct sd_lun *un) 7131 { 7132 ASSERT(un != NULL); 7133 7134 mutex_enter(SD_MUTEX(un)); 7135 7136 ASSERT(un->un_in_callback >= 0); 7137 7138 while (un->un_in_callback > 0) { 7139 mutex_exit(SD_MUTEX(un)); 7140 delay(2); 7141 mutex_enter(SD_MUTEX(un)); 7142 } 7143 7144 mutex_exit(SD_MUTEX(un)); 7145 } 7146 7147 /* 7148 * Function: sd_unit_attach 7149 * 7150 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7151 * the soft state structure for the device and performs 7152 * all necessary structure and device initializations. 7153 * 7154 * Arguments: devi: the system's dev_info_t for the device. 7155 * 7156 * Return Code: DDI_SUCCESS if attach is successful. 7157 * DDI_FAILURE if any part of the attach fails. 7158 * 7159 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7160 * Kernel thread context only. Can sleep. 7161 */ 7162 7163 static int 7164 sd_unit_attach(dev_info_t *devi) 7165 { 7166 struct scsi_device *devp; 7167 struct sd_lun *un; 7168 char *variantp; 7169 char name_str[48]; 7170 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7171 int instance; 7172 int rval; 7173 int wc_enabled; 7174 int tgt; 7175 uint64_t capacity; 7176 uint_t lbasize = 0; 7177 dev_info_t *pdip = ddi_get_parent(devi); 7178 int offbyone = 0; 7179 int geom_label_valid = 0; 7180 sd_ssc_t *ssc; 7181 int status; 7182 struct sd_fm_internal *sfip = NULL; 7183 int max_xfer_size; 7184 7185 /* 7186 * Retrieve the target driver's private data area. This was set 7187 * up by the HBA. 7188 */ 7189 devp = ddi_get_driver_private(devi); 7190 7191 /* 7192 * Retrieve the target ID of the device. 7193 */ 7194 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7195 SCSI_ADDR_PROP_TARGET, -1); 7196 7197 /* 7198 * Since we have no idea what state things were left in by the last 7199 * user of the device, set up some 'default' settings, ie. turn 'em 7200 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7201 * Do this before the scsi_probe, which sends an inquiry. 7202 * This is a fix for bug (4430280). 7203 * Of special importance is wide-xfer. The drive could have been left 7204 * in wide transfer mode by the last driver to communicate with it, 7205 * this includes us. If that's the case, and if the following is not 7206 * setup properly or we don't re-negotiate with the drive prior to 7207 * transferring data to/from the drive, it causes bus parity errors, 7208 * data overruns, and unexpected interrupts. This first occurred when 7209 * the fix for bug (4378686) was made. 7210 */ 7211 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7212 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7213 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7214 7215 /* 7216 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7217 * on a target. Setting it per lun instance actually sets the 7218 * capability of this target, which affects those luns already 7219 * attached on the same target. So during attach, we can only disable 7220 * this capability only when no other lun has been attached on this 7221 * target. By doing this, we assume a target has the same tagged-qing 7222 * capability for every lun. The condition can be removed when HBA 7223 * is changed to support per lun based tagged-qing capability. 7224 */ 7225 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7226 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7227 } 7228 7229 /* 7230 * Use scsi_probe() to issue an INQUIRY command to the device. 7231 * This call will allocate and fill in the scsi_inquiry structure 7232 * and point the sd_inq member of the scsi_device structure to it. 7233 * If the attach succeeds, then this memory will not be de-allocated 7234 * (via scsi_unprobe()) until the instance is detached. 7235 */ 7236 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7237 goto probe_failed; 7238 } 7239 7240 /* 7241 * Check the device type as specified in the inquiry data and 7242 * claim it if it is of a type that we support. 7243 */ 7244 switch (devp->sd_inq->inq_dtype) { 7245 case DTYPE_DIRECT: 7246 break; 7247 case DTYPE_RODIRECT: 7248 break; 7249 case DTYPE_OPTICAL: 7250 break; 7251 case DTYPE_NOTPRESENT: 7252 default: 7253 /* Unsupported device type; fail the attach. */ 7254 goto probe_failed; 7255 } 7256 7257 /* 7258 * Allocate the soft state structure for this unit. 7259 * 7260 * We rely upon this memory being set to all zeroes by 7261 * ddi_soft_state_zalloc(). We assume that any member of the 7262 * soft state structure that is not explicitly initialized by 7263 * this routine will have a value of zero. 7264 */ 7265 instance = ddi_get_instance(devp->sd_dev); 7266 #ifndef XPV_HVM_DRIVER 7267 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7268 goto probe_failed; 7269 } 7270 #endif /* !XPV_HVM_DRIVER */ 7271 7272 /* 7273 * Retrieve a pointer to the newly-allocated soft state. 7274 * 7275 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7276 * was successful, unless something has gone horribly wrong and the 7277 * ddi's soft state internals are corrupt (in which case it is 7278 * probably better to halt here than just fail the attach....) 7279 */ 7280 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7281 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7282 instance); 7283 /*NOTREACHED*/ 7284 } 7285 7286 /* 7287 * Link the back ptr of the driver soft state to the scsi_device 7288 * struct for this lun. 7289 * Save a pointer to the softstate in the driver-private area of 7290 * the scsi_device struct. 7291 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7292 * we first set un->un_sd below. 7293 */ 7294 un->un_sd = devp; 7295 devp->sd_private = (opaque_t)un; 7296 7297 /* 7298 * The following must be after devp is stored in the soft state struct. 7299 */ 7300 #ifdef SDDEBUG 7301 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7302 "%s_unit_attach: un:0x%p instance:%d\n", 7303 ddi_driver_name(devi), un, instance); 7304 #endif 7305 7306 /* 7307 * Set up the device type and node type (for the minor nodes). 7308 * By default we assume that the device can at least support the 7309 * Common Command Set. Call it a CD-ROM if it reports itself 7310 * as a RODIRECT device. 7311 */ 7312 switch (devp->sd_inq->inq_dtype) { 7313 case DTYPE_RODIRECT: 7314 un->un_node_type = DDI_NT_CD_CHAN; 7315 un->un_ctype = CTYPE_CDROM; 7316 break; 7317 case DTYPE_OPTICAL: 7318 un->un_node_type = DDI_NT_BLOCK_CHAN; 7319 un->un_ctype = CTYPE_ROD; 7320 break; 7321 default: 7322 un->un_node_type = DDI_NT_BLOCK_CHAN; 7323 un->un_ctype = CTYPE_CCS; 7324 break; 7325 } 7326 7327 /* 7328 * Try to read the interconnect type from the HBA. 7329 * 7330 * Note: This driver is currently compiled as two binaries, a parallel 7331 * scsi version (sd) and a fibre channel version (ssd). All functional 7332 * differences are determined at compile time. In the future a single 7333 * binary will be provided and the interconnect type will be used to 7334 * differentiate between fibre and parallel scsi behaviors. At that time 7335 * it will be necessary for all fibre channel HBAs to support this 7336 * property. 7337 * 7338 * set un_f_is_fiber to TRUE ( default fiber ) 7339 */ 7340 un->un_f_is_fibre = TRUE; 7341 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7342 case INTERCONNECT_SSA: 7343 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7344 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7345 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7346 break; 7347 case INTERCONNECT_PARALLEL: 7348 un->un_f_is_fibre = FALSE; 7349 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7350 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7351 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7352 break; 7353 case INTERCONNECT_SAS: 7354 un->un_f_is_fibre = FALSE; 7355 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7356 un->un_node_type = DDI_NT_BLOCK_SAS; 7357 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7358 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7359 break; 7360 case INTERCONNECT_SATA: 7361 un->un_f_is_fibre = FALSE; 7362 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7363 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7364 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7365 break; 7366 case INTERCONNECT_FIBRE: 7367 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7368 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7369 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7370 break; 7371 case INTERCONNECT_FABRIC: 7372 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7373 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7374 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7375 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7376 break; 7377 default: 7378 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7379 /* 7380 * The HBA does not support the "interconnect-type" property 7381 * (or did not provide a recognized type). 7382 * 7383 * Note: This will be obsoleted when a single fibre channel 7384 * and parallel scsi driver is delivered. In the meantime the 7385 * interconnect type will be set to the platform default.If that 7386 * type is not parallel SCSI, it means that we should be 7387 * assuming "ssd" semantics. However, here this also means that 7388 * the FC HBA is not supporting the "interconnect-type" property 7389 * like we expect it to, so log this occurrence. 7390 */ 7391 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7392 if (!SD_IS_PARALLEL_SCSI(un)) { 7393 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7394 "sd_unit_attach: un:0x%p Assuming " 7395 "INTERCONNECT_FIBRE\n", un); 7396 } else { 7397 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7398 "sd_unit_attach: un:0x%p Assuming " 7399 "INTERCONNECT_PARALLEL\n", un); 7400 un->un_f_is_fibre = FALSE; 7401 } 7402 #else 7403 /* 7404 * Note: This source will be implemented when a single fibre 7405 * channel and parallel scsi driver is delivered. The default 7406 * will be to assume that if a device does not support the 7407 * "interconnect-type" property it is a parallel SCSI HBA and 7408 * we will set the interconnect type for parallel scsi. 7409 */ 7410 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7411 un->un_f_is_fibre = FALSE; 7412 #endif 7413 break; 7414 } 7415 7416 if (un->un_f_is_fibre == TRUE) { 7417 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7418 SCSI_VERSION_3) { 7419 switch (un->un_interconnect_type) { 7420 case SD_INTERCONNECT_FIBRE: 7421 case SD_INTERCONNECT_SSA: 7422 un->un_node_type = DDI_NT_BLOCK_WWN; 7423 break; 7424 default: 7425 break; 7426 } 7427 } 7428 } 7429 7430 /* 7431 * Initialize the Request Sense command for the target 7432 */ 7433 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7434 goto alloc_rqs_failed; 7435 } 7436 7437 /* 7438 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7439 * with separate binary for sd and ssd. 7440 * 7441 * x86 has 1 binary, un_retry_count is set base on connection type. 7442 * The hardcoded values will go away when Sparc uses 1 binary 7443 * for sd and ssd. This hardcoded values need to match 7444 * SD_RETRY_COUNT in sddef.h 7445 * The value used is base on interconnect type. 7446 * fibre = 3, parallel = 5 7447 */ 7448 #if defined(__i386) || defined(__amd64) 7449 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7450 #else 7451 un->un_retry_count = SD_RETRY_COUNT; 7452 #endif 7453 7454 /* 7455 * Set the per disk retry count to the default number of retries 7456 * for disks and CDROMs. This value can be overridden by the 7457 * disk property list or an entry in sd.conf. 7458 */ 7459 un->un_notready_retry_count = 7460 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7461 : DISK_NOT_READY_RETRY_COUNT(un); 7462 7463 /* 7464 * Set the busy retry count to the default value of un_retry_count. 7465 * This can be overridden by entries in sd.conf or the device 7466 * config table. 7467 */ 7468 un->un_busy_retry_count = un->un_retry_count; 7469 7470 /* 7471 * Init the reset threshold for retries. This number determines 7472 * how many retries must be performed before a reset can be issued 7473 * (for certain error conditions). This can be overridden by entries 7474 * in sd.conf or the device config table. 7475 */ 7476 un->un_reset_retry_count = (un->un_retry_count / 2); 7477 7478 /* 7479 * Set the victim_retry_count to the default un_retry_count 7480 */ 7481 un->un_victim_retry_count = (2 * un->un_retry_count); 7482 7483 /* 7484 * Set the reservation release timeout to the default value of 7485 * 5 seconds. This can be overridden by entries in ssd.conf or the 7486 * device config table. 7487 */ 7488 un->un_reserve_release_time = 5; 7489 7490 /* 7491 * Set up the default maximum transfer size. Note that this may 7492 * get updated later in the attach, when setting up default wide 7493 * operations for disks. 7494 */ 7495 #if defined(__i386) || defined(__amd64) 7496 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7497 un->un_partial_dma_supported = 1; 7498 #else 7499 un->un_max_xfer_size = (uint_t)maxphys; 7500 #endif 7501 7502 /* 7503 * Get "allow bus device reset" property (defaults to "enabled" if 7504 * the property was not defined). This is to disable bus resets for 7505 * certain kinds of error recovery. Note: In the future when a run-time 7506 * fibre check is available the soft state flag should default to 7507 * enabled. 7508 */ 7509 if (un->un_f_is_fibre == TRUE) { 7510 un->un_f_allow_bus_device_reset = TRUE; 7511 } else { 7512 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7513 "allow-bus-device-reset", 1) != 0) { 7514 un->un_f_allow_bus_device_reset = TRUE; 7515 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7516 "sd_unit_attach: un:0x%p Bus device reset " 7517 "enabled\n", un); 7518 } else { 7519 un->un_f_allow_bus_device_reset = FALSE; 7520 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7521 "sd_unit_attach: un:0x%p Bus device reset " 7522 "disabled\n", un); 7523 } 7524 } 7525 7526 /* 7527 * Check if this is an ATAPI device. ATAPI devices use Group 1 7528 * Read/Write commands and Group 2 Mode Sense/Select commands. 7529 * 7530 * Note: The "obsolete" way of doing this is to check for the "atapi" 7531 * property. The new "variant" property with a value of "atapi" has been 7532 * introduced so that future 'variants' of standard SCSI behavior (like 7533 * atapi) could be specified by the underlying HBA drivers by supplying 7534 * a new value for the "variant" property, instead of having to define a 7535 * new property. 7536 */ 7537 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7538 un->un_f_cfg_is_atapi = TRUE; 7539 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7540 "sd_unit_attach: un:0x%p Atapi device\n", un); 7541 } 7542 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7543 &variantp) == DDI_PROP_SUCCESS) { 7544 if (strcmp(variantp, "atapi") == 0) { 7545 un->un_f_cfg_is_atapi = TRUE; 7546 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7547 "sd_unit_attach: un:0x%p Atapi device\n", un); 7548 } 7549 ddi_prop_free(variantp); 7550 } 7551 7552 un->un_cmd_timeout = SD_IO_TIME; 7553 7554 un->un_busy_timeout = SD_BSY_TIMEOUT; 7555 7556 /* Info on current states, statuses, etc. (Updated frequently) */ 7557 un->un_state = SD_STATE_NORMAL; 7558 un->un_last_state = SD_STATE_NORMAL; 7559 7560 /* Control & status info for command throttling */ 7561 un->un_throttle = sd_max_throttle; 7562 un->un_saved_throttle = sd_max_throttle; 7563 un->un_min_throttle = sd_min_throttle; 7564 7565 if (un->un_f_is_fibre == TRUE) { 7566 un->un_f_use_adaptive_throttle = TRUE; 7567 } else { 7568 un->un_f_use_adaptive_throttle = FALSE; 7569 } 7570 7571 /* Removable media support. */ 7572 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7573 un->un_mediastate = DKIO_NONE; 7574 un->un_specified_mediastate = DKIO_NONE; 7575 7576 /* CVs for suspend/resume (PM or DR) */ 7577 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7578 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7579 7580 /* Power management support. */ 7581 un->un_power_level = SD_SPINDLE_UNINIT; 7582 7583 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7584 un->un_f_wcc_inprog = 0; 7585 7586 /* 7587 * The open/close semaphore is used to serialize threads executing 7588 * in the driver's open & close entry point routines for a given 7589 * instance. 7590 */ 7591 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7592 7593 /* 7594 * The conf file entry and softstate variable is a forceful override, 7595 * meaning a non-zero value must be entered to change the default. 7596 */ 7597 un->un_f_disksort_disabled = FALSE; 7598 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7599 un->un_f_enable_rmw = FALSE; 7600 7601 /* 7602 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but 7603 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property. 7604 */ 7605 un->un_f_mmc_gesn_polling = TRUE; 7606 7607 /* 7608 * Retrieve the properties from the static driver table or the driver 7609 * configuration file (.conf) for this unit and update the soft state 7610 * for the device as needed for the indicated properties. 7611 * Note: the property configuration needs to occur here as some of the 7612 * following routines may have dependencies on soft state flags set 7613 * as part of the driver property configuration. 7614 */ 7615 sd_read_unit_properties(un); 7616 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7617 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7618 7619 /* 7620 * Only if a device has "hotpluggable" property, it is 7621 * treated as hotpluggable device. Otherwise, it is 7622 * regarded as non-hotpluggable one. 7623 */ 7624 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7625 -1) != -1) { 7626 un->un_f_is_hotpluggable = TRUE; 7627 } 7628 7629 /* 7630 * set unit's attributes(flags) according to "hotpluggable" and 7631 * RMB bit in INQUIRY data. 7632 */ 7633 sd_set_unit_attributes(un, devi); 7634 7635 /* 7636 * By default, we mark the capacity, lbasize, and geometry 7637 * as invalid. Only if we successfully read a valid capacity 7638 * will we update the un_blockcount and un_tgt_blocksize with the 7639 * valid values (the geometry will be validated later). 7640 */ 7641 un->un_f_blockcount_is_valid = FALSE; 7642 un->un_f_tgt_blocksize_is_valid = FALSE; 7643 7644 /* 7645 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7646 * otherwise. 7647 */ 7648 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7649 un->un_blockcount = 0; 7650 7651 /* 7652 * physical sector size default to DEV_BSIZE currently. 7653 */ 7654 un->un_phy_blocksize = DEV_BSIZE; 7655 7656 /* 7657 * Set up the per-instance info needed to determine the correct 7658 * CDBs and other info for issuing commands to the target. 7659 */ 7660 sd_init_cdb_limits(un); 7661 7662 /* 7663 * Set up the IO chains to use, based upon the target type. 7664 */ 7665 if (un->un_f_non_devbsize_supported) { 7666 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7667 } else { 7668 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7669 } 7670 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7671 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7672 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7673 7674 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7675 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7676 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7677 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7678 7679 7680 if (ISCD(un)) { 7681 un->un_additional_codes = sd_additional_codes; 7682 } else { 7683 un->un_additional_codes = NULL; 7684 } 7685 7686 /* 7687 * Create the kstats here so they can be available for attach-time 7688 * routines that send commands to the unit (either polled or via 7689 * sd_send_scsi_cmd). 7690 * 7691 * Note: This is a critical sequence that needs to be maintained: 7692 * 1) Instantiate the kstats here, before any routines using the 7693 * iopath (i.e. sd_send_scsi_cmd). 7694 * 2) Instantiate and initialize the partition stats 7695 * (sd_set_pstats). 7696 * 3) Initialize the error stats (sd_set_errstats), following 7697 * sd_validate_geometry(),sd_register_devid(), 7698 * and sd_cache_control(). 7699 */ 7700 7701 un->un_stats = kstat_create(sd_label, instance, 7702 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7703 if (un->un_stats != NULL) { 7704 un->un_stats->ks_lock = SD_MUTEX(un); 7705 kstat_install(un->un_stats); 7706 } 7707 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7708 "sd_unit_attach: un:0x%p un_stats created\n", un); 7709 7710 sd_create_errstats(un, instance); 7711 if (un->un_errstats == NULL) { 7712 goto create_errstats_failed; 7713 } 7714 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7715 "sd_unit_attach: un:0x%p errstats created\n", un); 7716 7717 /* 7718 * The following if/else code was relocated here from below as part 7719 * of the fix for bug (4430280). However with the default setup added 7720 * on entry to this routine, it's no longer absolutely necessary for 7721 * this to be before the call to sd_spin_up_unit. 7722 */ 7723 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7724 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7725 (devp->sd_inq->inq_ansi == 5)) && 7726 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7727 7728 /* 7729 * If tagged queueing is supported by the target 7730 * and by the host adapter then we will enable it 7731 */ 7732 un->un_tagflags = 0; 7733 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7734 (un->un_f_arq_enabled == TRUE)) { 7735 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7736 1, 1) == 1) { 7737 un->un_tagflags = FLAG_STAG; 7738 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7739 "sd_unit_attach: un:0x%p tag queueing " 7740 "enabled\n", un); 7741 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7742 "untagged-qing", 0) == 1) { 7743 un->un_f_opt_queueing = TRUE; 7744 un->un_saved_throttle = un->un_throttle = 7745 min(un->un_throttle, 3); 7746 } else { 7747 un->un_f_opt_queueing = FALSE; 7748 un->un_saved_throttle = un->un_throttle = 1; 7749 } 7750 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7751 == 1) && (un->un_f_arq_enabled == TRUE)) { 7752 /* The Host Adapter supports internal queueing. */ 7753 un->un_f_opt_queueing = TRUE; 7754 un->un_saved_throttle = un->un_throttle = 7755 min(un->un_throttle, 3); 7756 } else { 7757 un->un_f_opt_queueing = FALSE; 7758 un->un_saved_throttle = un->un_throttle = 1; 7759 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7760 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7761 } 7762 7763 /* 7764 * Enable large transfers for SATA/SAS drives 7765 */ 7766 if (SD_IS_SERIAL(un)) { 7767 un->un_max_xfer_size = 7768 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7769 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7770 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7771 "sd_unit_attach: un:0x%p max transfer " 7772 "size=0x%x\n", un, un->un_max_xfer_size); 7773 7774 } 7775 7776 /* Setup or tear down default wide operations for disks */ 7777 7778 /* 7779 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7780 * and "ssd_max_xfer_size" to exist simultaneously on the same 7781 * system and be set to different values. In the future this 7782 * code may need to be updated when the ssd module is 7783 * obsoleted and removed from the system. (4299588) 7784 */ 7785 if (SD_IS_PARALLEL_SCSI(un) && 7786 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7787 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7788 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7789 1, 1) == 1) { 7790 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7791 "sd_unit_attach: un:0x%p Wide Transfer " 7792 "enabled\n", un); 7793 } 7794 7795 /* 7796 * If tagged queuing has also been enabled, then 7797 * enable large xfers 7798 */ 7799 if (un->un_saved_throttle == sd_max_throttle) { 7800 un->un_max_xfer_size = 7801 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7802 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7803 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7804 "sd_unit_attach: un:0x%p max transfer " 7805 "size=0x%x\n", un, un->un_max_xfer_size); 7806 } 7807 } else { 7808 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7809 0, 1) == 1) { 7810 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7811 "sd_unit_attach: un:0x%p " 7812 "Wide Transfer disabled\n", un); 7813 } 7814 } 7815 } else { 7816 un->un_tagflags = FLAG_STAG; 7817 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7818 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7819 } 7820 7821 /* 7822 * If this target supports LUN reset, try to enable it. 7823 */ 7824 if (un->un_f_lun_reset_enabled) { 7825 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7826 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7827 "un:0x%p lun_reset capability set\n", un); 7828 } else { 7829 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7830 "un:0x%p lun-reset capability not set\n", un); 7831 } 7832 } 7833 7834 /* 7835 * Adjust the maximum transfer size. This is to fix 7836 * the problem of partial DMA support on SPARC. Some 7837 * HBA driver, like aac, has very small dma_attr_maxxfer 7838 * size, which requires partial DMA support on SPARC. 7839 * In the future the SPARC pci nexus driver may solve 7840 * the problem instead of this fix. 7841 */ 7842 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7843 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7844 /* We need DMA partial even on sparc to ensure sddump() works */ 7845 un->un_max_xfer_size = max_xfer_size; 7846 if (un->un_partial_dma_supported == 0) 7847 un->un_partial_dma_supported = 1; 7848 } 7849 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7850 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7851 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7852 un->un_max_xfer_size) == 1) { 7853 un->un_buf_breakup_supported = 1; 7854 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7855 "un:0x%p Buf breakup enabled\n", un); 7856 } 7857 } 7858 7859 /* 7860 * Set PKT_DMA_PARTIAL flag. 7861 */ 7862 if (un->un_partial_dma_supported == 1) { 7863 un->un_pkt_flags = PKT_DMA_PARTIAL; 7864 } else { 7865 un->un_pkt_flags = 0; 7866 } 7867 7868 /* Initialize sd_ssc_t for internal uscsi commands */ 7869 ssc = sd_ssc_init(un); 7870 scsi_fm_init(devp); 7871 7872 /* 7873 * Allocate memory for SCSI FMA stuffs. 7874 */ 7875 un->un_fm_private = 7876 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7877 sfip = (struct sd_fm_internal *)un->un_fm_private; 7878 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7879 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7880 sfip->fm_ssc.ssc_un = un; 7881 7882 if (ISCD(un) || 7883 un->un_f_has_removable_media || 7884 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7885 /* 7886 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7887 * Their log are unchanged. 7888 */ 7889 sfip->fm_log_level = SD_FM_LOG_NSUP; 7890 } else { 7891 /* 7892 * If enter here, it should be non-CDROM and FM-capable 7893 * device, and it will not keep the old scsi_log as before 7894 * in /var/adm/messages. However, the property 7895 * "fm-scsi-log" will control whether the FM telemetry will 7896 * be logged in /var/adm/messages. 7897 */ 7898 int fm_scsi_log; 7899 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7900 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7901 7902 if (fm_scsi_log) 7903 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7904 else 7905 sfip->fm_log_level = SD_FM_LOG_SILENT; 7906 } 7907 7908 /* 7909 * At this point in the attach, we have enough info in the 7910 * soft state to be able to issue commands to the target. 7911 * 7912 * All command paths used below MUST issue their commands as 7913 * SD_PATH_DIRECT. This is important as intermediate layers 7914 * are not all initialized yet (such as PM). 7915 */ 7916 7917 /* 7918 * Send a TEST UNIT READY command to the device. This should clear 7919 * any outstanding UNIT ATTENTION that may be present. 7920 * 7921 * Note: Don't check for success, just track if there is a reservation, 7922 * this is a throw away command to clear any unit attentions. 7923 * 7924 * Note: This MUST be the first command issued to the target during 7925 * attach to ensure power on UNIT ATTENTIONS are cleared. 7926 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7927 * with attempts at spinning up a device with no media. 7928 */ 7929 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7930 if (status != 0) { 7931 if (status == EACCES) 7932 reservation_flag = SD_TARGET_IS_RESERVED; 7933 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7934 } 7935 7936 /* 7937 * If the device is NOT a removable media device, attempt to spin 7938 * it up (using the START_STOP_UNIT command) and read its capacity 7939 * (using the READ CAPACITY command). Note, however, that either 7940 * of these could fail and in some cases we would continue with 7941 * the attach despite the failure (see below). 7942 */ 7943 if (un->un_f_descr_format_supported) { 7944 7945 switch (sd_spin_up_unit(ssc)) { 7946 case 0: 7947 /* 7948 * Spin-up was successful; now try to read the 7949 * capacity. If successful then save the results 7950 * and mark the capacity & lbasize as valid. 7951 */ 7952 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7953 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7954 7955 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7956 &lbasize, SD_PATH_DIRECT); 7957 7958 switch (status) { 7959 case 0: { 7960 if (capacity > DK_MAX_BLOCKS) { 7961 #ifdef _LP64 7962 if ((capacity + 1) > 7963 SD_GROUP1_MAX_ADDRESS) { 7964 /* 7965 * Enable descriptor format 7966 * sense data so that we can 7967 * get 64 bit sense data 7968 * fields. 7969 */ 7970 sd_enable_descr_sense(ssc); 7971 } 7972 #else 7973 /* 32-bit kernels can't handle this */ 7974 scsi_log(SD_DEVINFO(un), 7975 sd_label, CE_WARN, 7976 "disk has %llu blocks, which " 7977 "is too large for a 32-bit " 7978 "kernel", capacity); 7979 7980 #if defined(__i386) || defined(__amd64) 7981 /* 7982 * 1TB disk was treated as (1T - 512)B 7983 * in the past, so that it might have 7984 * valid VTOC and solaris partitions, 7985 * we have to allow it to continue to 7986 * work. 7987 */ 7988 if (capacity -1 > DK_MAX_BLOCKS) 7989 #endif 7990 goto spinup_failed; 7991 #endif 7992 } 7993 7994 /* 7995 * Here it's not necessary to check the case: 7996 * the capacity of the device is bigger than 7997 * what the max hba cdb can support. Because 7998 * sd_send_scsi_READ_CAPACITY will retrieve 7999 * the capacity by sending USCSI command, which 8000 * is constrained by the max hba cdb. Actually, 8001 * sd_send_scsi_READ_CAPACITY will return 8002 * EINVAL when using bigger cdb than required 8003 * cdb length. Will handle this case in 8004 * "case EINVAL". 8005 */ 8006 8007 /* 8008 * The following relies on 8009 * sd_send_scsi_READ_CAPACITY never 8010 * returning 0 for capacity and/or lbasize. 8011 */ 8012 sd_update_block_info(un, lbasize, capacity); 8013 8014 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8015 "sd_unit_attach: un:0x%p capacity = %ld " 8016 "blocks; lbasize= %ld.\n", un, 8017 un->un_blockcount, un->un_tgt_blocksize); 8018 8019 break; 8020 } 8021 case EINVAL: 8022 /* 8023 * In the case where the max-cdb-length property 8024 * is smaller than the required CDB length for 8025 * a SCSI device, a target driver can fail to 8026 * attach to that device. 8027 */ 8028 scsi_log(SD_DEVINFO(un), 8029 sd_label, CE_WARN, 8030 "disk capacity is too large " 8031 "for current cdb length"); 8032 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8033 8034 goto spinup_failed; 8035 case EACCES: 8036 /* 8037 * Should never get here if the spin-up 8038 * succeeded, but code it in anyway. 8039 * From here, just continue with the attach... 8040 */ 8041 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8042 "sd_unit_attach: un:0x%p " 8043 "sd_send_scsi_READ_CAPACITY " 8044 "returned reservation conflict\n", un); 8045 reservation_flag = SD_TARGET_IS_RESERVED; 8046 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8047 break; 8048 default: 8049 /* 8050 * Likewise, should never get here if the 8051 * spin-up succeeded. Just continue with 8052 * the attach... 8053 */ 8054 if (status == EIO) 8055 sd_ssc_assessment(ssc, 8056 SD_FMT_STATUS_CHECK); 8057 else 8058 sd_ssc_assessment(ssc, 8059 SD_FMT_IGNORE); 8060 break; 8061 } 8062 break; 8063 case EACCES: 8064 /* 8065 * Device is reserved by another host. In this case 8066 * we could not spin it up or read the capacity, but 8067 * we continue with the attach anyway. 8068 */ 8069 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8070 "sd_unit_attach: un:0x%p spin-up reservation " 8071 "conflict.\n", un); 8072 reservation_flag = SD_TARGET_IS_RESERVED; 8073 break; 8074 default: 8075 /* Fail the attach if the spin-up failed. */ 8076 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8077 "sd_unit_attach: un:0x%p spin-up failed.", un); 8078 goto spinup_failed; 8079 } 8080 8081 } 8082 8083 /* 8084 * Check to see if this is a MMC drive 8085 */ 8086 if (ISCD(un)) { 8087 sd_set_mmc_caps(ssc); 8088 } 8089 8090 /* 8091 * Add a zero-length attribute to tell the world we support 8092 * kernel ioctls (for layered drivers) 8093 */ 8094 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8095 DDI_KERNEL_IOCTL, NULL, 0); 8096 8097 /* 8098 * Add a boolean property to tell the world we support 8099 * the B_FAILFAST flag (for layered drivers) 8100 */ 8101 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8102 "ddi-failfast-supported", NULL, 0); 8103 8104 /* 8105 * Initialize power management 8106 */ 8107 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8108 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8109 sd_setup_pm(ssc, devi); 8110 if (un->un_f_pm_is_enabled == FALSE) { 8111 /* 8112 * For performance, point to a jump table that does 8113 * not include pm. 8114 * The direct and priority chains don't change with PM. 8115 * 8116 * Note: this is currently done based on individual device 8117 * capabilities. When an interface for determining system 8118 * power enabled state becomes available, or when additional 8119 * layers are added to the command chain, these values will 8120 * have to be re-evaluated for correctness. 8121 */ 8122 if (un->un_f_non_devbsize_supported) { 8123 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8124 } else { 8125 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8126 } 8127 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8128 } 8129 8130 /* 8131 * This property is set to 0 by HA software to avoid retries 8132 * on a reserved disk. (The preferred property name is 8133 * "retry-on-reservation-conflict") (1189689) 8134 * 8135 * Note: The use of a global here can have unintended consequences. A 8136 * per instance variable is preferable to match the capabilities of 8137 * different underlying hba's (4402600) 8138 */ 8139 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8140 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8141 sd_retry_on_reservation_conflict); 8142 if (sd_retry_on_reservation_conflict != 0) { 8143 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8144 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8145 sd_retry_on_reservation_conflict); 8146 } 8147 8148 /* Set up options for QFULL handling. */ 8149 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8150 "qfull-retries", -1)) != -1) { 8151 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8152 rval, 1); 8153 } 8154 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8155 "qfull-retry-interval", -1)) != -1) { 8156 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8157 rval, 1); 8158 } 8159 8160 /* 8161 * This just prints a message that announces the existence of the 8162 * device. The message is always printed in the system logfile, but 8163 * only appears on the console if the system is booted with the 8164 * -v (verbose) argument. 8165 */ 8166 ddi_report_dev(devi); 8167 8168 un->un_mediastate = DKIO_NONE; 8169 8170 /* 8171 * Check if this is a SSD(Solid State Drive). 8172 */ 8173 sd_check_solid_state(ssc); 8174 8175 /* 8176 * Check whether the drive is in emulation mode. 8177 */ 8178 sd_check_emulation_mode(ssc); 8179 8180 cmlb_alloc_handle(&un->un_cmlbhandle); 8181 8182 #if defined(__i386) || defined(__amd64) 8183 /* 8184 * On x86, compensate for off-by-1 legacy error 8185 */ 8186 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8187 (lbasize == un->un_sys_blocksize)) 8188 offbyone = CMLB_OFF_BY_ONE; 8189 #endif 8190 8191 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8192 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8193 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8194 un->un_node_type, offbyone, un->un_cmlbhandle, 8195 (void *)SD_PATH_DIRECT) != 0) { 8196 goto cmlb_attach_failed; 8197 } 8198 8199 8200 /* 8201 * Read and validate the device's geometry (ie, disk label) 8202 * A new unformatted drive will not have a valid geometry, but 8203 * the driver needs to successfully attach to this device so 8204 * the drive can be formatted via ioctls. 8205 */ 8206 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8207 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8208 8209 mutex_enter(SD_MUTEX(un)); 8210 8211 /* 8212 * Read and initialize the devid for the unit. 8213 */ 8214 if (un->un_f_devid_supported) { 8215 sd_register_devid(ssc, devi, reservation_flag); 8216 } 8217 mutex_exit(SD_MUTEX(un)); 8218 8219 #if (defined(__fibre)) 8220 /* 8221 * Register callbacks for fibre only. You can't do this solely 8222 * on the basis of the devid_type because this is hba specific. 8223 * We need to query our hba capabilities to find out whether to 8224 * register or not. 8225 */ 8226 if (un->un_f_is_fibre) { 8227 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8228 sd_init_event_callbacks(un); 8229 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8230 "sd_unit_attach: un:0x%p event callbacks inserted", 8231 un); 8232 } 8233 } 8234 #endif 8235 8236 if (un->un_f_opt_disable_cache == TRUE) { 8237 /* 8238 * Disable both read cache and write cache. This is 8239 * the historic behavior of the keywords in the config file. 8240 */ 8241 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8242 0) { 8243 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8244 "sd_unit_attach: un:0x%p Could not disable " 8245 "caching", un); 8246 goto devid_failed; 8247 } 8248 } 8249 8250 /* 8251 * Check the value of the WCE bit now and 8252 * set un_f_write_cache_enabled accordingly. 8253 */ 8254 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8255 mutex_enter(SD_MUTEX(un)); 8256 un->un_f_write_cache_enabled = (wc_enabled != 0); 8257 mutex_exit(SD_MUTEX(un)); 8258 8259 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8260 un->un_tgt_blocksize != DEV_BSIZE) || 8261 un->un_f_enable_rmw) { 8262 if (!(un->un_wm_cache)) { 8263 (void) snprintf(name_str, sizeof (name_str), 8264 "%s%d_cache", 8265 ddi_driver_name(SD_DEVINFO(un)), 8266 ddi_get_instance(SD_DEVINFO(un))); 8267 un->un_wm_cache = kmem_cache_create( 8268 name_str, sizeof (struct sd_w_map), 8269 8, sd_wm_cache_constructor, 8270 sd_wm_cache_destructor, NULL, 8271 (void *)un, NULL, 0); 8272 if (!(un->un_wm_cache)) { 8273 goto wm_cache_failed; 8274 } 8275 } 8276 } 8277 8278 /* 8279 * Check the value of the NV_SUP bit and set 8280 * un_f_suppress_cache_flush accordingly. 8281 */ 8282 sd_get_nv_sup(ssc); 8283 8284 /* 8285 * Find out what type of reservation this disk supports. 8286 */ 8287 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8288 8289 switch (status) { 8290 case 0: 8291 /* 8292 * SCSI-3 reservations are supported. 8293 */ 8294 un->un_reservation_type = SD_SCSI3_RESERVATION; 8295 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8296 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8297 break; 8298 case ENOTSUP: 8299 /* 8300 * The PERSISTENT RESERVE IN command would not be recognized by 8301 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8302 */ 8303 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8304 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8305 un->un_reservation_type = SD_SCSI2_RESERVATION; 8306 8307 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8308 break; 8309 default: 8310 /* 8311 * default to SCSI-3 reservations 8312 */ 8313 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8314 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8315 un->un_reservation_type = SD_SCSI3_RESERVATION; 8316 8317 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8318 break; 8319 } 8320 8321 /* 8322 * Set the pstat and error stat values here, so data obtained during the 8323 * previous attach-time routines is available. 8324 * 8325 * Note: This is a critical sequence that needs to be maintained: 8326 * 1) Instantiate the kstats before any routines using the iopath 8327 * (i.e. sd_send_scsi_cmd). 8328 * 2) Initialize the error stats (sd_set_errstats) and partition 8329 * stats (sd_set_pstats)here, following 8330 * cmlb_validate_geometry(), sd_register_devid(), and 8331 * sd_cache_control(). 8332 */ 8333 8334 if (un->un_f_pkstats_enabled && geom_label_valid) { 8335 sd_set_pstats(un); 8336 SD_TRACE(SD_LOG_IO_PARTITION, un, 8337 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8338 } 8339 8340 sd_set_errstats(un); 8341 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8342 "sd_unit_attach: un:0x%p errstats set\n", un); 8343 8344 8345 /* 8346 * After successfully attaching an instance, we record the information 8347 * of how many luns have been attached on the relative target and 8348 * controller for parallel SCSI. This information is used when sd tries 8349 * to set the tagged queuing capability in HBA. 8350 */ 8351 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8352 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8353 } 8354 8355 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8356 "sd_unit_attach: un:0x%p exit success\n", un); 8357 8358 /* Uninitialize sd_ssc_t pointer */ 8359 sd_ssc_fini(ssc); 8360 8361 return (DDI_SUCCESS); 8362 8363 /* 8364 * An error occurred during the attach; clean up & return failure. 8365 */ 8366 wm_cache_failed: 8367 devid_failed: 8368 8369 setup_pm_failed: 8370 ddi_remove_minor_node(devi, NULL); 8371 8372 cmlb_attach_failed: 8373 /* 8374 * Cleanup from the scsi_ifsetcap() calls (437868) 8375 */ 8376 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8377 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8378 8379 /* 8380 * Refer to the comments of setting tagged-qing in the beginning of 8381 * sd_unit_attach. We can only disable tagged queuing when there is 8382 * no lun attached on the target. 8383 */ 8384 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8385 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8386 } 8387 8388 if (un->un_f_is_fibre == FALSE) { 8389 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8390 } 8391 8392 spinup_failed: 8393 8394 /* Uninitialize sd_ssc_t pointer */ 8395 sd_ssc_fini(ssc); 8396 8397 mutex_enter(SD_MUTEX(un)); 8398 8399 /* Deallocate SCSI FMA memory spaces */ 8400 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8401 8402 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8403 if (un->un_direct_priority_timeid != NULL) { 8404 timeout_id_t temp_id = un->un_direct_priority_timeid; 8405 un->un_direct_priority_timeid = NULL; 8406 mutex_exit(SD_MUTEX(un)); 8407 (void) untimeout(temp_id); 8408 mutex_enter(SD_MUTEX(un)); 8409 } 8410 8411 /* Cancel any pending start/stop timeouts */ 8412 if (un->un_startstop_timeid != NULL) { 8413 timeout_id_t temp_id = un->un_startstop_timeid; 8414 un->un_startstop_timeid = NULL; 8415 mutex_exit(SD_MUTEX(un)); 8416 (void) untimeout(temp_id); 8417 mutex_enter(SD_MUTEX(un)); 8418 } 8419 8420 /* Cancel any pending reset-throttle timeouts */ 8421 if (un->un_reset_throttle_timeid != NULL) { 8422 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8423 un->un_reset_throttle_timeid = NULL; 8424 mutex_exit(SD_MUTEX(un)); 8425 (void) untimeout(temp_id); 8426 mutex_enter(SD_MUTEX(un)); 8427 } 8428 8429 /* Cancel rmw warning message timeouts */ 8430 if (un->un_rmw_msg_timeid != NULL) { 8431 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8432 un->un_rmw_msg_timeid = NULL; 8433 mutex_exit(SD_MUTEX(un)); 8434 (void) untimeout(temp_id); 8435 mutex_enter(SD_MUTEX(un)); 8436 } 8437 8438 /* Cancel any pending retry timeouts */ 8439 if (un->un_retry_timeid != NULL) { 8440 timeout_id_t temp_id = un->un_retry_timeid; 8441 un->un_retry_timeid = NULL; 8442 mutex_exit(SD_MUTEX(un)); 8443 (void) untimeout(temp_id); 8444 mutex_enter(SD_MUTEX(un)); 8445 } 8446 8447 /* Cancel any pending delayed cv broadcast timeouts */ 8448 if (un->un_dcvb_timeid != NULL) { 8449 timeout_id_t temp_id = un->un_dcvb_timeid; 8450 un->un_dcvb_timeid = NULL; 8451 mutex_exit(SD_MUTEX(un)); 8452 (void) untimeout(temp_id); 8453 mutex_enter(SD_MUTEX(un)); 8454 } 8455 8456 mutex_exit(SD_MUTEX(un)); 8457 8458 /* There should not be any in-progress I/O so ASSERT this check */ 8459 ASSERT(un->un_ncmds_in_transport == 0); 8460 ASSERT(un->un_ncmds_in_driver == 0); 8461 8462 /* Do not free the softstate if the callback routine is active */ 8463 sd_sync_with_callback(un); 8464 8465 /* 8466 * Partition stats apparently are not used with removables. These would 8467 * not have been created during attach, so no need to clean them up... 8468 */ 8469 if (un->un_errstats != NULL) { 8470 kstat_delete(un->un_errstats); 8471 un->un_errstats = NULL; 8472 } 8473 8474 create_errstats_failed: 8475 8476 if (un->un_stats != NULL) { 8477 kstat_delete(un->un_stats); 8478 un->un_stats = NULL; 8479 } 8480 8481 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8482 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8483 8484 ddi_prop_remove_all(devi); 8485 sema_destroy(&un->un_semoclose); 8486 cv_destroy(&un->un_state_cv); 8487 8488 getrbuf_failed: 8489 8490 sd_free_rqs(un); 8491 8492 alloc_rqs_failed: 8493 8494 devp->sd_private = NULL; 8495 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8496 8497 get_softstate_failed: 8498 /* 8499 * Note: the man pages are unclear as to whether or not doing a 8500 * ddi_soft_state_free(sd_state, instance) is the right way to 8501 * clean up after the ddi_soft_state_zalloc() if the subsequent 8502 * ddi_get_soft_state() fails. The implication seems to be 8503 * that the get_soft_state cannot fail if the zalloc succeeds. 8504 */ 8505 #ifndef XPV_HVM_DRIVER 8506 ddi_soft_state_free(sd_state, instance); 8507 #endif /* !XPV_HVM_DRIVER */ 8508 8509 probe_failed: 8510 scsi_unprobe(devp); 8511 8512 return (DDI_FAILURE); 8513 } 8514 8515 8516 /* 8517 * Function: sd_unit_detach 8518 * 8519 * Description: Performs DDI_DETACH processing for sddetach(). 8520 * 8521 * Return Code: DDI_SUCCESS 8522 * DDI_FAILURE 8523 * 8524 * Context: Kernel thread context 8525 */ 8526 8527 static int 8528 sd_unit_detach(dev_info_t *devi) 8529 { 8530 struct scsi_device *devp; 8531 struct sd_lun *un; 8532 int i; 8533 int tgt; 8534 dev_t dev; 8535 dev_info_t *pdip = ddi_get_parent(devi); 8536 #ifndef XPV_HVM_DRIVER 8537 int instance = ddi_get_instance(devi); 8538 #endif /* !XPV_HVM_DRIVER */ 8539 8540 mutex_enter(&sd_detach_mutex); 8541 8542 /* 8543 * Fail the detach for any of the following: 8544 * - Unable to get the sd_lun struct for the instance 8545 * - A layered driver has an outstanding open on the instance 8546 * - Another thread is already detaching this instance 8547 * - Another thread is currently performing an open 8548 */ 8549 devp = ddi_get_driver_private(devi); 8550 if ((devp == NULL) || 8551 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8552 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8553 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8554 mutex_exit(&sd_detach_mutex); 8555 return (DDI_FAILURE); 8556 } 8557 8558 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8559 8560 /* 8561 * Mark this instance as currently in a detach, to inhibit any 8562 * opens from a layered driver. 8563 */ 8564 un->un_detach_count++; 8565 mutex_exit(&sd_detach_mutex); 8566 8567 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8568 SCSI_ADDR_PROP_TARGET, -1); 8569 8570 dev = sd_make_device(SD_DEVINFO(un)); 8571 8572 #ifndef lint 8573 _NOTE(COMPETING_THREADS_NOW); 8574 #endif 8575 8576 mutex_enter(SD_MUTEX(un)); 8577 8578 /* 8579 * Fail the detach if there are any outstanding layered 8580 * opens on this device. 8581 */ 8582 for (i = 0; i < NDKMAP; i++) { 8583 if (un->un_ocmap.lyropen[i] != 0) { 8584 goto err_notclosed; 8585 } 8586 } 8587 8588 /* 8589 * Verify there are NO outstanding commands issued to this device. 8590 * ie, un_ncmds_in_transport == 0. 8591 * It's possible to have outstanding commands through the physio 8592 * code path, even though everything's closed. 8593 */ 8594 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8595 (un->un_direct_priority_timeid != NULL) || 8596 (un->un_state == SD_STATE_RWAIT)) { 8597 mutex_exit(SD_MUTEX(un)); 8598 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8599 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8600 goto err_stillbusy; 8601 } 8602 8603 /* 8604 * If we have the device reserved, release the reservation. 8605 */ 8606 if ((un->un_resvd_status & SD_RESERVE) && 8607 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8608 mutex_exit(SD_MUTEX(un)); 8609 /* 8610 * Note: sd_reserve_release sends a command to the device 8611 * via the sd_ioctlcmd() path, and can sleep. 8612 */ 8613 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8614 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8615 "sd_dr_detach: Cannot release reservation \n"); 8616 } 8617 } else { 8618 mutex_exit(SD_MUTEX(un)); 8619 } 8620 8621 /* 8622 * Untimeout any reserve recover, throttle reset, restart unit 8623 * and delayed broadcast timeout threads. Protect the timeout pointer 8624 * from getting nulled by their callback functions. 8625 */ 8626 mutex_enter(SD_MUTEX(un)); 8627 if (un->un_resvd_timeid != NULL) { 8628 timeout_id_t temp_id = un->un_resvd_timeid; 8629 un->un_resvd_timeid = NULL; 8630 mutex_exit(SD_MUTEX(un)); 8631 (void) untimeout(temp_id); 8632 mutex_enter(SD_MUTEX(un)); 8633 } 8634 8635 if (un->un_reset_throttle_timeid != NULL) { 8636 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8637 un->un_reset_throttle_timeid = NULL; 8638 mutex_exit(SD_MUTEX(un)); 8639 (void) untimeout(temp_id); 8640 mutex_enter(SD_MUTEX(un)); 8641 } 8642 8643 if (un->un_startstop_timeid != NULL) { 8644 timeout_id_t temp_id = un->un_startstop_timeid; 8645 un->un_startstop_timeid = NULL; 8646 mutex_exit(SD_MUTEX(un)); 8647 (void) untimeout(temp_id); 8648 mutex_enter(SD_MUTEX(un)); 8649 } 8650 8651 if (un->un_rmw_msg_timeid != NULL) { 8652 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8653 un->un_rmw_msg_timeid = NULL; 8654 mutex_exit(SD_MUTEX(un)); 8655 (void) untimeout(temp_id); 8656 mutex_enter(SD_MUTEX(un)); 8657 } 8658 8659 if (un->un_dcvb_timeid != NULL) { 8660 timeout_id_t temp_id = un->un_dcvb_timeid; 8661 un->un_dcvb_timeid = NULL; 8662 mutex_exit(SD_MUTEX(un)); 8663 (void) untimeout(temp_id); 8664 } else { 8665 mutex_exit(SD_MUTEX(un)); 8666 } 8667 8668 /* Remove any pending reservation reclaim requests for this device */ 8669 sd_rmv_resv_reclaim_req(dev); 8670 8671 mutex_enter(SD_MUTEX(un)); 8672 8673 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8674 if (un->un_direct_priority_timeid != NULL) { 8675 timeout_id_t temp_id = un->un_direct_priority_timeid; 8676 un->un_direct_priority_timeid = NULL; 8677 mutex_exit(SD_MUTEX(un)); 8678 (void) untimeout(temp_id); 8679 mutex_enter(SD_MUTEX(un)); 8680 } 8681 8682 /* Cancel any active multi-host disk watch thread requests */ 8683 if (un->un_mhd_token != NULL) { 8684 mutex_exit(SD_MUTEX(un)); 8685 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8686 if (scsi_watch_request_terminate(un->un_mhd_token, 8687 SCSI_WATCH_TERMINATE_NOWAIT)) { 8688 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8689 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8690 /* 8691 * Note: We are returning here after having removed 8692 * some driver timeouts above. This is consistent with 8693 * the legacy implementation but perhaps the watch 8694 * terminate call should be made with the wait flag set. 8695 */ 8696 goto err_stillbusy; 8697 } 8698 mutex_enter(SD_MUTEX(un)); 8699 un->un_mhd_token = NULL; 8700 } 8701 8702 if (un->un_swr_token != NULL) { 8703 mutex_exit(SD_MUTEX(un)); 8704 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8705 if (scsi_watch_request_terminate(un->un_swr_token, 8706 SCSI_WATCH_TERMINATE_NOWAIT)) { 8707 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8708 "sd_dr_detach: Cannot cancel swr watch request\n"); 8709 /* 8710 * Note: We are returning here after having removed 8711 * some driver timeouts above. This is consistent with 8712 * the legacy implementation but perhaps the watch 8713 * terminate call should be made with the wait flag set. 8714 */ 8715 goto err_stillbusy; 8716 } 8717 mutex_enter(SD_MUTEX(un)); 8718 un->un_swr_token = NULL; 8719 } 8720 8721 mutex_exit(SD_MUTEX(un)); 8722 8723 /* 8724 * Clear any scsi_reset_notifies. We clear the reset notifies 8725 * if we have not registered one. 8726 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8727 */ 8728 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8729 sd_mhd_reset_notify_cb, (caddr_t)un); 8730 8731 /* 8732 * protect the timeout pointers from getting nulled by 8733 * their callback functions during the cancellation process. 8734 * In such a scenario untimeout can be invoked with a null value. 8735 */ 8736 _NOTE(NO_COMPETING_THREADS_NOW); 8737 8738 mutex_enter(&un->un_pm_mutex); 8739 if (un->un_pm_idle_timeid != NULL) { 8740 timeout_id_t temp_id = un->un_pm_idle_timeid; 8741 un->un_pm_idle_timeid = NULL; 8742 mutex_exit(&un->un_pm_mutex); 8743 8744 /* 8745 * Timeout is active; cancel it. 8746 * Note that it'll never be active on a device 8747 * that does not support PM therefore we don't 8748 * have to check before calling pm_idle_component. 8749 */ 8750 (void) untimeout(temp_id); 8751 (void) pm_idle_component(SD_DEVINFO(un), 0); 8752 mutex_enter(&un->un_pm_mutex); 8753 } 8754 8755 /* 8756 * Check whether there is already a timeout scheduled for power 8757 * management. If yes then don't lower the power here, that's. 8758 * the timeout handler's job. 8759 */ 8760 if (un->un_pm_timeid != NULL) { 8761 timeout_id_t temp_id = un->un_pm_timeid; 8762 un->un_pm_timeid = NULL; 8763 mutex_exit(&un->un_pm_mutex); 8764 /* 8765 * Timeout is active; cancel it. 8766 * Note that it'll never be active on a device 8767 * that does not support PM therefore we don't 8768 * have to check before calling pm_idle_component. 8769 */ 8770 (void) untimeout(temp_id); 8771 (void) pm_idle_component(SD_DEVINFO(un), 0); 8772 8773 } else { 8774 mutex_exit(&un->un_pm_mutex); 8775 if ((un->un_f_pm_is_enabled == TRUE) && 8776 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) 8777 != DDI_SUCCESS)) { 8778 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8779 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8780 /* 8781 * Fix for bug: 4297749, item # 13 8782 * The above test now includes a check to see if PM is 8783 * supported by this device before call 8784 * pm_lower_power(). 8785 * Note, the following is not dead code. The call to 8786 * pm_lower_power above will generate a call back into 8787 * our sdpower routine which might result in a timeout 8788 * handler getting activated. Therefore the following 8789 * code is valid and necessary. 8790 */ 8791 mutex_enter(&un->un_pm_mutex); 8792 if (un->un_pm_timeid != NULL) { 8793 timeout_id_t temp_id = un->un_pm_timeid; 8794 un->un_pm_timeid = NULL; 8795 mutex_exit(&un->un_pm_mutex); 8796 (void) untimeout(temp_id); 8797 (void) pm_idle_component(SD_DEVINFO(un), 0); 8798 } else { 8799 mutex_exit(&un->un_pm_mutex); 8800 } 8801 } 8802 } 8803 8804 /* 8805 * Cleanup from the scsi_ifsetcap() calls (437868) 8806 * Relocated here from above to be after the call to 8807 * pm_lower_power, which was getting errors. 8808 */ 8809 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8810 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8811 8812 /* 8813 * Currently, tagged queuing is supported per target based by HBA. 8814 * Setting this per lun instance actually sets the capability of this 8815 * target in HBA, which affects those luns already attached on the 8816 * same target. So during detach, we can only disable this capability 8817 * only when this is the only lun left on this target. By doing 8818 * this, we assume a target has the same tagged queuing capability 8819 * for every lun. The condition can be removed when HBA is changed to 8820 * support per lun based tagged queuing capability. 8821 */ 8822 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8823 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8824 } 8825 8826 if (un->un_f_is_fibre == FALSE) { 8827 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8828 } 8829 8830 /* 8831 * Remove any event callbacks, fibre only 8832 */ 8833 if (un->un_f_is_fibre == TRUE) { 8834 if ((un->un_insert_event != NULL) && 8835 (ddi_remove_event_handler(un->un_insert_cb_id) != 8836 DDI_SUCCESS)) { 8837 /* 8838 * Note: We are returning here after having done 8839 * substantial cleanup above. This is consistent 8840 * with the legacy implementation but this may not 8841 * be the right thing to do. 8842 */ 8843 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8844 "sd_dr_detach: Cannot cancel insert event\n"); 8845 goto err_remove_event; 8846 } 8847 un->un_insert_event = NULL; 8848 8849 if ((un->un_remove_event != NULL) && 8850 (ddi_remove_event_handler(un->un_remove_cb_id) != 8851 DDI_SUCCESS)) { 8852 /* 8853 * Note: We are returning here after having done 8854 * substantial cleanup above. This is consistent 8855 * with the legacy implementation but this may not 8856 * be the right thing to do. 8857 */ 8858 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8859 "sd_dr_detach: Cannot cancel remove event\n"); 8860 goto err_remove_event; 8861 } 8862 un->un_remove_event = NULL; 8863 } 8864 8865 /* Do not free the softstate if the callback routine is active */ 8866 sd_sync_with_callback(un); 8867 8868 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8869 cmlb_free_handle(&un->un_cmlbhandle); 8870 8871 /* 8872 * Hold the detach mutex here, to make sure that no other threads ever 8873 * can access a (partially) freed soft state structure. 8874 */ 8875 mutex_enter(&sd_detach_mutex); 8876 8877 /* 8878 * Clean up the soft state struct. 8879 * Cleanup is done in reverse order of allocs/inits. 8880 * At this point there should be no competing threads anymore. 8881 */ 8882 8883 scsi_fm_fini(devp); 8884 8885 /* 8886 * Deallocate memory for SCSI FMA. 8887 */ 8888 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8889 8890 /* 8891 * Unregister and free device id if it was not registered 8892 * by the transport. 8893 */ 8894 if (un->un_f_devid_transport_defined == FALSE) 8895 ddi_devid_unregister(devi); 8896 8897 /* 8898 * free the devid structure if allocated before (by ddi_devid_init() 8899 * or ddi_devid_get()). 8900 */ 8901 if (un->un_devid) { 8902 ddi_devid_free(un->un_devid); 8903 un->un_devid = NULL; 8904 } 8905 8906 /* 8907 * Destroy wmap cache if it exists. 8908 */ 8909 if (un->un_wm_cache != NULL) { 8910 kmem_cache_destroy(un->un_wm_cache); 8911 un->un_wm_cache = NULL; 8912 } 8913 8914 /* 8915 * kstat cleanup is done in detach for all device types (4363169). 8916 * We do not want to fail detach if the device kstats are not deleted 8917 * since there is a confusion about the devo_refcnt for the device. 8918 * We just delete the kstats and let detach complete successfully. 8919 */ 8920 if (un->un_stats != NULL) { 8921 kstat_delete(un->un_stats); 8922 un->un_stats = NULL; 8923 } 8924 if (un->un_errstats != NULL) { 8925 kstat_delete(un->un_errstats); 8926 un->un_errstats = NULL; 8927 } 8928 8929 /* Remove partition stats */ 8930 if (un->un_f_pkstats_enabled) { 8931 for (i = 0; i < NSDMAP; i++) { 8932 if (un->un_pstats[i] != NULL) { 8933 kstat_delete(un->un_pstats[i]); 8934 un->un_pstats[i] = NULL; 8935 } 8936 } 8937 } 8938 8939 /* Remove xbuf registration */ 8940 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8941 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8942 8943 /* Remove driver properties */ 8944 ddi_prop_remove_all(devi); 8945 8946 mutex_destroy(&un->un_pm_mutex); 8947 cv_destroy(&un->un_pm_busy_cv); 8948 8949 cv_destroy(&un->un_wcc_cv); 8950 8951 /* Open/close semaphore */ 8952 sema_destroy(&un->un_semoclose); 8953 8954 /* Removable media condvar. */ 8955 cv_destroy(&un->un_state_cv); 8956 8957 /* Suspend/resume condvar. */ 8958 cv_destroy(&un->un_suspend_cv); 8959 cv_destroy(&un->un_disk_busy_cv); 8960 8961 sd_free_rqs(un); 8962 8963 /* Free up soft state */ 8964 devp->sd_private = NULL; 8965 8966 bzero(un, sizeof (struct sd_lun)); 8967 #ifndef XPV_HVM_DRIVER 8968 ddi_soft_state_free(sd_state, instance); 8969 #endif /* !XPV_HVM_DRIVER */ 8970 8971 mutex_exit(&sd_detach_mutex); 8972 8973 /* This frees up the INQUIRY data associated with the device. */ 8974 scsi_unprobe(devp); 8975 8976 /* 8977 * After successfully detaching an instance, we update the information 8978 * of how many luns have been attached in the relative target and 8979 * controller for parallel SCSI. This information is used when sd tries 8980 * to set the tagged queuing capability in HBA. 8981 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8982 * check if the device is parallel SCSI. However, we don't need to 8983 * check here because we've already checked during attach. No device 8984 * that is not parallel SCSI is in the chain. 8985 */ 8986 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8987 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8988 } 8989 8990 return (DDI_SUCCESS); 8991 8992 err_notclosed: 8993 mutex_exit(SD_MUTEX(un)); 8994 8995 err_stillbusy: 8996 _NOTE(NO_COMPETING_THREADS_NOW); 8997 8998 err_remove_event: 8999 mutex_enter(&sd_detach_mutex); 9000 un->un_detach_count--; 9001 mutex_exit(&sd_detach_mutex); 9002 9003 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9004 return (DDI_FAILURE); 9005 } 9006 9007 9008 /* 9009 * Function: sd_create_errstats 9010 * 9011 * Description: This routine instantiates the device error stats. 9012 * 9013 * Note: During attach the stats are instantiated first so they are 9014 * available for attach-time routines that utilize the driver 9015 * iopath to send commands to the device. The stats are initialized 9016 * separately so data obtained during some attach-time routines is 9017 * available. (4362483) 9018 * 9019 * Arguments: un - driver soft state (unit) structure 9020 * instance - driver instance 9021 * 9022 * Context: Kernel thread context 9023 */ 9024 9025 static void 9026 sd_create_errstats(struct sd_lun *un, int instance) 9027 { 9028 struct sd_errstats *stp; 9029 char kstatmodule_err[KSTAT_STRLEN]; 9030 char kstatname[KSTAT_STRLEN]; 9031 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9032 9033 ASSERT(un != NULL); 9034 9035 if (un->un_errstats != NULL) { 9036 return; 9037 } 9038 9039 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9040 "%serr", sd_label); 9041 (void) snprintf(kstatname, sizeof (kstatname), 9042 "%s%d,err", sd_label, instance); 9043 9044 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9045 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9046 9047 if (un->un_errstats == NULL) { 9048 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9049 "sd_create_errstats: Failed kstat_create\n"); 9050 return; 9051 } 9052 9053 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9054 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9055 KSTAT_DATA_UINT32); 9056 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9057 KSTAT_DATA_UINT32); 9058 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9059 KSTAT_DATA_UINT32); 9060 kstat_named_init(&stp->sd_vid, "Vendor", 9061 KSTAT_DATA_CHAR); 9062 kstat_named_init(&stp->sd_pid, "Product", 9063 KSTAT_DATA_CHAR); 9064 kstat_named_init(&stp->sd_revision, "Revision", 9065 KSTAT_DATA_CHAR); 9066 kstat_named_init(&stp->sd_serial, "Serial No", 9067 KSTAT_DATA_CHAR); 9068 kstat_named_init(&stp->sd_capacity, "Size", 9069 KSTAT_DATA_ULONGLONG); 9070 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9071 KSTAT_DATA_UINT32); 9072 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9073 KSTAT_DATA_UINT32); 9074 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9075 KSTAT_DATA_UINT32); 9076 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9077 KSTAT_DATA_UINT32); 9078 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9079 KSTAT_DATA_UINT32); 9080 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9081 KSTAT_DATA_UINT32); 9082 9083 un->un_errstats->ks_private = un; 9084 un->un_errstats->ks_update = nulldev; 9085 9086 kstat_install(un->un_errstats); 9087 } 9088 9089 9090 /* 9091 * Function: sd_set_errstats 9092 * 9093 * Description: This routine sets the value of the vendor id, product id, 9094 * revision, serial number, and capacity device error stats. 9095 * 9096 * Note: During attach the stats are instantiated first so they are 9097 * available for attach-time routines that utilize the driver 9098 * iopath to send commands to the device. The stats are initialized 9099 * separately so data obtained during some attach-time routines is 9100 * available. (4362483) 9101 * 9102 * Arguments: un - driver soft state (unit) structure 9103 * 9104 * Context: Kernel thread context 9105 */ 9106 9107 static void 9108 sd_set_errstats(struct sd_lun *un) 9109 { 9110 struct sd_errstats *stp; 9111 char *sn; 9112 9113 ASSERT(un != NULL); 9114 ASSERT(un->un_errstats != NULL); 9115 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9116 ASSERT(stp != NULL); 9117 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9118 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9119 (void) strncpy(stp->sd_revision.value.c, 9120 un->un_sd->sd_inq->inq_revision, 4); 9121 9122 /* 9123 * All the errstats are persistent across detach/attach, 9124 * so reset all the errstats here in case of the hot 9125 * replacement of disk drives, except for not changed 9126 * Sun qualified drives. 9127 */ 9128 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 9129 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9130 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 9131 stp->sd_softerrs.value.ui32 = 0; 9132 stp->sd_harderrs.value.ui32 = 0; 9133 stp->sd_transerrs.value.ui32 = 0; 9134 stp->sd_rq_media_err.value.ui32 = 0; 9135 stp->sd_rq_ntrdy_err.value.ui32 = 0; 9136 stp->sd_rq_nodev_err.value.ui32 = 0; 9137 stp->sd_rq_recov_err.value.ui32 = 0; 9138 stp->sd_rq_illrq_err.value.ui32 = 0; 9139 stp->sd_rq_pfa_err.value.ui32 = 0; 9140 } 9141 9142 /* 9143 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9144 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9145 * (4376302)) 9146 */ 9147 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9148 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9149 sizeof (SD_INQUIRY(un)->inq_serial)); 9150 } else { 9151 /* 9152 * Set the "Serial No" kstat for non-Sun qualified drives 9153 */ 9154 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un), 9155 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 9156 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) { 9157 (void) strlcpy(stp->sd_serial.value.c, sn, 9158 sizeof (stp->sd_serial.value.c)); 9159 ddi_prop_free(sn); 9160 } 9161 } 9162 9163 if (un->un_f_blockcount_is_valid != TRUE) { 9164 /* 9165 * Set capacity error stat to 0 for no media. This ensures 9166 * a valid capacity is displayed in response to 'iostat -E' 9167 * when no media is present in the device. 9168 */ 9169 stp->sd_capacity.value.ui64 = 0; 9170 } else { 9171 /* 9172 * Multiply un_blockcount by un->un_sys_blocksize to get 9173 * capacity. 9174 * 9175 * Note: for non-512 blocksize devices "un_blockcount" has been 9176 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9177 * (un_tgt_blocksize / un->un_sys_blocksize). 9178 */ 9179 stp->sd_capacity.value.ui64 = (uint64_t) 9180 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9181 } 9182 } 9183 9184 9185 /* 9186 * Function: sd_set_pstats 9187 * 9188 * Description: This routine instantiates and initializes the partition 9189 * stats for each partition with more than zero blocks. 9190 * (4363169) 9191 * 9192 * Arguments: un - driver soft state (unit) structure 9193 * 9194 * Context: Kernel thread context 9195 */ 9196 9197 static void 9198 sd_set_pstats(struct sd_lun *un) 9199 { 9200 char kstatname[KSTAT_STRLEN]; 9201 int instance; 9202 int i; 9203 diskaddr_t nblks = 0; 9204 char *partname = NULL; 9205 9206 ASSERT(un != NULL); 9207 9208 instance = ddi_get_instance(SD_DEVINFO(un)); 9209 9210 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9211 for (i = 0; i < NSDMAP; i++) { 9212 9213 if (cmlb_partinfo(un->un_cmlbhandle, i, 9214 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9215 continue; 9216 mutex_enter(SD_MUTEX(un)); 9217 9218 if ((un->un_pstats[i] == NULL) && 9219 (nblks != 0)) { 9220 9221 (void) snprintf(kstatname, sizeof (kstatname), 9222 "%s%d,%s", sd_label, instance, 9223 partname); 9224 9225 un->un_pstats[i] = kstat_create(sd_label, 9226 instance, kstatname, "partition", KSTAT_TYPE_IO, 9227 1, KSTAT_FLAG_PERSISTENT); 9228 if (un->un_pstats[i] != NULL) { 9229 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9230 kstat_install(un->un_pstats[i]); 9231 } 9232 } 9233 mutex_exit(SD_MUTEX(un)); 9234 } 9235 } 9236 9237 9238 #if (defined(__fibre)) 9239 /* 9240 * Function: sd_init_event_callbacks 9241 * 9242 * Description: This routine initializes the insertion and removal event 9243 * callbacks. (fibre only) 9244 * 9245 * Arguments: un - driver soft state (unit) structure 9246 * 9247 * Context: Kernel thread context 9248 */ 9249 9250 static void 9251 sd_init_event_callbacks(struct sd_lun *un) 9252 { 9253 ASSERT(un != NULL); 9254 9255 if ((un->un_insert_event == NULL) && 9256 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9257 &un->un_insert_event) == DDI_SUCCESS)) { 9258 /* 9259 * Add the callback for an insertion event 9260 */ 9261 (void) ddi_add_event_handler(SD_DEVINFO(un), 9262 un->un_insert_event, sd_event_callback, (void *)un, 9263 &(un->un_insert_cb_id)); 9264 } 9265 9266 if ((un->un_remove_event == NULL) && 9267 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9268 &un->un_remove_event) == DDI_SUCCESS)) { 9269 /* 9270 * Add the callback for a removal event 9271 */ 9272 (void) ddi_add_event_handler(SD_DEVINFO(un), 9273 un->un_remove_event, sd_event_callback, (void *)un, 9274 &(un->un_remove_cb_id)); 9275 } 9276 } 9277 9278 9279 /* 9280 * Function: sd_event_callback 9281 * 9282 * Description: This routine handles insert/remove events (photon). The 9283 * state is changed to OFFLINE which can be used to supress 9284 * error msgs. (fibre only) 9285 * 9286 * Arguments: un - driver soft state (unit) structure 9287 * 9288 * Context: Callout thread context 9289 */ 9290 /* ARGSUSED */ 9291 static void 9292 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9293 void *bus_impldata) 9294 { 9295 struct sd_lun *un = (struct sd_lun *)arg; 9296 9297 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9298 if (event == un->un_insert_event) { 9299 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9300 mutex_enter(SD_MUTEX(un)); 9301 if (un->un_state == SD_STATE_OFFLINE) { 9302 if (un->un_last_state != SD_STATE_SUSPENDED) { 9303 un->un_state = un->un_last_state; 9304 } else { 9305 /* 9306 * We have gone through SUSPEND/RESUME while 9307 * we were offline. Restore the last state 9308 */ 9309 un->un_state = un->un_save_state; 9310 } 9311 } 9312 mutex_exit(SD_MUTEX(un)); 9313 9314 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9315 } else if (event == un->un_remove_event) { 9316 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9317 mutex_enter(SD_MUTEX(un)); 9318 /* 9319 * We need to handle an event callback that occurs during 9320 * the suspend operation, since we don't prevent it. 9321 */ 9322 if (un->un_state != SD_STATE_OFFLINE) { 9323 if (un->un_state != SD_STATE_SUSPENDED) { 9324 New_state(un, SD_STATE_OFFLINE); 9325 } else { 9326 un->un_last_state = SD_STATE_OFFLINE; 9327 } 9328 } 9329 mutex_exit(SD_MUTEX(un)); 9330 } else { 9331 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9332 "!Unknown event\n"); 9333 } 9334 9335 } 9336 #endif 9337 9338 /* 9339 * Function: sd_cache_control() 9340 * 9341 * Description: This routine is the driver entry point for setting 9342 * read and write caching by modifying the WCE (write cache 9343 * enable) and RCD (read cache disable) bits of mode 9344 * page 8 (MODEPAGE_CACHING). 9345 * 9346 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9347 * structure for this target. 9348 * rcd_flag - flag for controlling the read cache 9349 * wce_flag - flag for controlling the write cache 9350 * 9351 * Return Code: EIO 9352 * code returned by sd_send_scsi_MODE_SENSE and 9353 * sd_send_scsi_MODE_SELECT 9354 * 9355 * Context: Kernel Thread 9356 */ 9357 9358 static int 9359 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9360 { 9361 struct mode_caching *mode_caching_page; 9362 uchar_t *header; 9363 size_t buflen; 9364 int hdrlen; 9365 int bd_len; 9366 int rval = 0; 9367 struct mode_header_grp2 *mhp; 9368 struct sd_lun *un; 9369 int status; 9370 9371 ASSERT(ssc != NULL); 9372 un = ssc->ssc_un; 9373 ASSERT(un != NULL); 9374 9375 /* 9376 * Do a test unit ready, otherwise a mode sense may not work if this 9377 * is the first command sent to the device after boot. 9378 */ 9379 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9380 if (status != 0) 9381 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9382 9383 if (un->un_f_cfg_is_atapi == TRUE) { 9384 hdrlen = MODE_HEADER_LENGTH_GRP2; 9385 } else { 9386 hdrlen = MODE_HEADER_LENGTH; 9387 } 9388 9389 /* 9390 * Allocate memory for the retrieved mode page and its headers. Set 9391 * a pointer to the page itself. Use mode_cache_scsi3 to insure 9392 * we get all of the mode sense data otherwise, the mode select 9393 * will fail. mode_cache_scsi3 is a superset of mode_caching. 9394 */ 9395 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 9396 sizeof (struct mode_cache_scsi3); 9397 9398 header = kmem_zalloc(buflen, KM_SLEEP); 9399 9400 /* Get the information from the device. */ 9401 if (un->un_f_cfg_is_atapi == TRUE) { 9402 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9403 MODEPAGE_CACHING, SD_PATH_DIRECT); 9404 } else { 9405 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9406 MODEPAGE_CACHING, SD_PATH_DIRECT); 9407 } 9408 9409 if (rval != 0) { 9410 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9411 "sd_cache_control: Mode Sense Failed\n"); 9412 goto mode_sense_failed; 9413 } 9414 9415 /* 9416 * Determine size of Block Descriptors in order to locate 9417 * the mode page data. ATAPI devices return 0, SCSI devices 9418 * should return MODE_BLK_DESC_LENGTH. 9419 */ 9420 if (un->un_f_cfg_is_atapi == TRUE) { 9421 mhp = (struct mode_header_grp2 *)header; 9422 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9423 } else { 9424 bd_len = ((struct mode_header *)header)->bdesc_length; 9425 } 9426 9427 if (bd_len > MODE_BLK_DESC_LENGTH) { 9428 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9429 "sd_cache_control: Mode Sense returned invalid block " 9430 "descriptor length\n"); 9431 rval = EIO; 9432 goto mode_sense_failed; 9433 } 9434 9435 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9436 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9437 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9438 "sd_cache_control: Mode Sense caching page code mismatch " 9439 "%d\n", mode_caching_page->mode_page.code); 9440 rval = EIO; 9441 goto mode_sense_failed; 9442 } 9443 9444 /* Check the relevant bits on successful mode sense. */ 9445 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9446 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9447 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9448 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9449 9450 size_t sbuflen; 9451 uchar_t save_pg; 9452 9453 /* 9454 * Construct select buffer length based on the 9455 * length of the sense data returned. 9456 */ 9457 sbuflen = hdrlen + bd_len + 9458 sizeof (struct mode_page) + 9459 (int)mode_caching_page->mode_page.length; 9460 9461 /* 9462 * Set the caching bits as requested. 9463 */ 9464 if (rcd_flag == SD_CACHE_ENABLE) 9465 mode_caching_page->rcd = 0; 9466 else if (rcd_flag == SD_CACHE_DISABLE) 9467 mode_caching_page->rcd = 1; 9468 9469 if (wce_flag == SD_CACHE_ENABLE) 9470 mode_caching_page->wce = 1; 9471 else if (wce_flag == SD_CACHE_DISABLE) 9472 mode_caching_page->wce = 0; 9473 9474 /* 9475 * Save the page if the mode sense says the 9476 * drive supports it. 9477 */ 9478 save_pg = mode_caching_page->mode_page.ps ? 9479 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9480 9481 /* Clear reserved bits before mode select. */ 9482 mode_caching_page->mode_page.ps = 0; 9483 9484 /* 9485 * Clear out mode header for mode select. 9486 * The rest of the retrieved page will be reused. 9487 */ 9488 bzero(header, hdrlen); 9489 9490 if (un->un_f_cfg_is_atapi == TRUE) { 9491 mhp = (struct mode_header_grp2 *)header; 9492 mhp->bdesc_length_hi = bd_len >> 8; 9493 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9494 } else { 9495 ((struct mode_header *)header)->bdesc_length = bd_len; 9496 } 9497 9498 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9499 9500 /* Issue mode select to change the cache settings */ 9501 if (un->un_f_cfg_is_atapi == TRUE) { 9502 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9503 sbuflen, save_pg, SD_PATH_DIRECT); 9504 } else { 9505 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9506 sbuflen, save_pg, SD_PATH_DIRECT); 9507 } 9508 9509 } 9510 9511 9512 mode_sense_failed: 9513 9514 kmem_free(header, buflen); 9515 9516 if (rval != 0) { 9517 if (rval == EIO) 9518 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9519 else 9520 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9521 } 9522 return (rval); 9523 } 9524 9525 9526 /* 9527 * Function: sd_get_write_cache_enabled() 9528 * 9529 * Description: This routine is the driver entry point for determining if 9530 * write caching is enabled. It examines the WCE (write cache 9531 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9532 * 9533 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9534 * structure for this target. 9535 * is_enabled - pointer to int where write cache enabled state 9536 * is returned (non-zero -> write cache enabled) 9537 * 9538 * 9539 * Return Code: EIO 9540 * code returned by sd_send_scsi_MODE_SENSE 9541 * 9542 * Context: Kernel Thread 9543 * 9544 * NOTE: If ioctl is added to disable write cache, this sequence should 9545 * be followed so that no locking is required for accesses to 9546 * un->un_f_write_cache_enabled: 9547 * do mode select to clear wce 9548 * do synchronize cache to flush cache 9549 * set un->un_f_write_cache_enabled = FALSE 9550 * 9551 * Conversely, an ioctl to enable the write cache should be done 9552 * in this order: 9553 * set un->un_f_write_cache_enabled = TRUE 9554 * do mode select to set wce 9555 */ 9556 9557 static int 9558 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9559 { 9560 struct mode_caching *mode_caching_page; 9561 uchar_t *header; 9562 size_t buflen; 9563 int hdrlen; 9564 int bd_len; 9565 int rval = 0; 9566 struct sd_lun *un; 9567 int status; 9568 9569 ASSERT(ssc != NULL); 9570 un = ssc->ssc_un; 9571 ASSERT(un != NULL); 9572 ASSERT(is_enabled != NULL); 9573 9574 /* in case of error, flag as enabled */ 9575 *is_enabled = TRUE; 9576 9577 /* 9578 * Do a test unit ready, otherwise a mode sense may not work if this 9579 * is the first command sent to the device after boot. 9580 */ 9581 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9582 9583 if (status != 0) 9584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9585 9586 if (un->un_f_cfg_is_atapi == TRUE) { 9587 hdrlen = MODE_HEADER_LENGTH_GRP2; 9588 } else { 9589 hdrlen = MODE_HEADER_LENGTH; 9590 } 9591 9592 /* 9593 * Allocate memory for the retrieved mode page and its headers. Set 9594 * a pointer to the page itself. 9595 */ 9596 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9597 header = kmem_zalloc(buflen, KM_SLEEP); 9598 9599 /* Get the information from the device. */ 9600 if (un->un_f_cfg_is_atapi == TRUE) { 9601 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9602 MODEPAGE_CACHING, SD_PATH_DIRECT); 9603 } else { 9604 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9605 MODEPAGE_CACHING, SD_PATH_DIRECT); 9606 } 9607 9608 if (rval != 0) { 9609 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9610 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9611 goto mode_sense_failed; 9612 } 9613 9614 /* 9615 * Determine size of Block Descriptors in order to locate 9616 * the mode page data. ATAPI devices return 0, SCSI devices 9617 * should return MODE_BLK_DESC_LENGTH. 9618 */ 9619 if (un->un_f_cfg_is_atapi == TRUE) { 9620 struct mode_header_grp2 *mhp; 9621 mhp = (struct mode_header_grp2 *)header; 9622 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9623 } else { 9624 bd_len = ((struct mode_header *)header)->bdesc_length; 9625 } 9626 9627 if (bd_len > MODE_BLK_DESC_LENGTH) { 9628 /* FMA should make upset complain here */ 9629 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9630 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9631 "block descriptor length\n"); 9632 rval = EIO; 9633 goto mode_sense_failed; 9634 } 9635 9636 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9637 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9638 /* FMA could make upset complain here */ 9639 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9640 "sd_get_write_cache_enabled: Mode Sense caching page " 9641 "code mismatch %d\n", mode_caching_page->mode_page.code); 9642 rval = EIO; 9643 goto mode_sense_failed; 9644 } 9645 *is_enabled = mode_caching_page->wce; 9646 9647 mode_sense_failed: 9648 if (rval == 0) { 9649 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9650 } else if (rval == EIO) { 9651 /* 9652 * Some disks do not support mode sense(6), we 9653 * should ignore this kind of error(sense key is 9654 * 0x5 - illegal request). 9655 */ 9656 uint8_t *sensep; 9657 int senlen; 9658 9659 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9660 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9661 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9662 9663 if (senlen > 0 && 9664 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9665 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9666 } else { 9667 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9668 } 9669 } else { 9670 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9671 } 9672 kmem_free(header, buflen); 9673 return (rval); 9674 } 9675 9676 /* 9677 * Function: sd_get_nv_sup() 9678 * 9679 * Description: This routine is the driver entry point for 9680 * determining whether non-volatile cache is supported. This 9681 * determination process works as follows: 9682 * 9683 * 1. sd first queries sd.conf on whether 9684 * suppress_cache_flush bit is set for this device. 9685 * 9686 * 2. if not there, then queries the internal disk table. 9687 * 9688 * 3. if either sd.conf or internal disk table specifies 9689 * cache flush be suppressed, we don't bother checking 9690 * NV_SUP bit. 9691 * 9692 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9693 * the optional INQUIRY VPD page 0x86. If the device 9694 * supports VPD page 0x86, sd examines the NV_SUP 9695 * (non-volatile cache support) bit in the INQUIRY VPD page 9696 * 0x86: 9697 * o If NV_SUP bit is set, sd assumes the device has a 9698 * non-volatile cache and set the 9699 * un_f_sync_nv_supported to TRUE. 9700 * o Otherwise cache is not non-volatile, 9701 * un_f_sync_nv_supported is set to FALSE. 9702 * 9703 * Arguments: un - driver soft state (unit) structure 9704 * 9705 * Return Code: 9706 * 9707 * Context: Kernel Thread 9708 */ 9709 9710 static void 9711 sd_get_nv_sup(sd_ssc_t *ssc) 9712 { 9713 int rval = 0; 9714 uchar_t *inq86 = NULL; 9715 size_t inq86_len = MAX_INQUIRY_SIZE; 9716 size_t inq86_resid = 0; 9717 struct dk_callback *dkc; 9718 struct sd_lun *un; 9719 9720 ASSERT(ssc != NULL); 9721 un = ssc->ssc_un; 9722 ASSERT(un != NULL); 9723 9724 mutex_enter(SD_MUTEX(un)); 9725 9726 /* 9727 * Be conservative on the device's support of 9728 * SYNC_NV bit: un_f_sync_nv_supported is 9729 * initialized to be false. 9730 */ 9731 un->un_f_sync_nv_supported = FALSE; 9732 9733 /* 9734 * If either sd.conf or internal disk table 9735 * specifies cache flush be suppressed, then 9736 * we don't bother checking NV_SUP bit. 9737 */ 9738 if (un->un_f_suppress_cache_flush == TRUE) { 9739 mutex_exit(SD_MUTEX(un)); 9740 return; 9741 } 9742 9743 if (sd_check_vpd_page_support(ssc) == 0 && 9744 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9745 mutex_exit(SD_MUTEX(un)); 9746 /* collect page 86 data if available */ 9747 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9748 9749 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9750 0x01, 0x86, &inq86_resid); 9751 9752 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9753 SD_TRACE(SD_LOG_COMMON, un, 9754 "sd_get_nv_sup: \ 9755 successfully get VPD page: %x \ 9756 PAGE LENGTH: %x BYTE 6: %x\n", 9757 inq86[1], inq86[3], inq86[6]); 9758 9759 mutex_enter(SD_MUTEX(un)); 9760 /* 9761 * check the value of NV_SUP bit: only if the device 9762 * reports NV_SUP bit to be 1, the 9763 * un_f_sync_nv_supported bit will be set to true. 9764 */ 9765 if (inq86[6] & SD_VPD_NV_SUP) { 9766 un->un_f_sync_nv_supported = TRUE; 9767 } 9768 mutex_exit(SD_MUTEX(un)); 9769 } else if (rval != 0) { 9770 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9771 } 9772 9773 kmem_free(inq86, inq86_len); 9774 } else { 9775 mutex_exit(SD_MUTEX(un)); 9776 } 9777 9778 /* 9779 * Send a SYNC CACHE command to check whether 9780 * SYNC_NV bit is supported. This command should have 9781 * un_f_sync_nv_supported set to correct value. 9782 */ 9783 mutex_enter(SD_MUTEX(un)); 9784 if (un->un_f_sync_nv_supported) { 9785 mutex_exit(SD_MUTEX(un)); 9786 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9787 dkc->dkc_flag = FLUSH_VOLATILE; 9788 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9789 9790 /* 9791 * Send a TEST UNIT READY command to the device. This should 9792 * clear any outstanding UNIT ATTENTION that may be present. 9793 */ 9794 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9795 if (rval != 0) 9796 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9797 9798 kmem_free(dkc, sizeof (struct dk_callback)); 9799 } else { 9800 mutex_exit(SD_MUTEX(un)); 9801 } 9802 9803 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9804 un_f_suppress_cache_flush is set to %d\n", 9805 un->un_f_suppress_cache_flush); 9806 } 9807 9808 /* 9809 * Function: sd_make_device 9810 * 9811 * Description: Utility routine to return the Solaris device number from 9812 * the data in the device's dev_info structure. 9813 * 9814 * Return Code: The Solaris device number 9815 * 9816 * Context: Any 9817 */ 9818 9819 static dev_t 9820 sd_make_device(dev_info_t *devi) 9821 { 9822 return (makedevice(ddi_driver_major(devi), 9823 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9824 } 9825 9826 9827 /* 9828 * Function: sd_pm_entry 9829 * 9830 * Description: Called at the start of a new command to manage power 9831 * and busy status of a device. This includes determining whether 9832 * the current power state of the device is sufficient for 9833 * performing the command or whether it must be changed. 9834 * The PM framework is notified appropriately. 9835 * Only with a return status of DDI_SUCCESS will the 9836 * component be busy to the framework. 9837 * 9838 * All callers of sd_pm_entry must check the return status 9839 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9840 * of DDI_FAILURE indicates the device failed to power up. 9841 * In this case un_pm_count has been adjusted so the result 9842 * on exit is still powered down, ie. count is less than 0. 9843 * Calling sd_pm_exit with this count value hits an ASSERT. 9844 * 9845 * Return Code: DDI_SUCCESS or DDI_FAILURE 9846 * 9847 * Context: Kernel thread context. 9848 */ 9849 9850 static int 9851 sd_pm_entry(struct sd_lun *un) 9852 { 9853 int return_status = DDI_SUCCESS; 9854 9855 ASSERT(!mutex_owned(SD_MUTEX(un))); 9856 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9857 9858 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9859 9860 if (un->un_f_pm_is_enabled == FALSE) { 9861 SD_TRACE(SD_LOG_IO_PM, un, 9862 "sd_pm_entry: exiting, PM not enabled\n"); 9863 return (return_status); 9864 } 9865 9866 /* 9867 * Just increment a counter if PM is enabled. On the transition from 9868 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9869 * the count with each IO and mark the device as idle when the count 9870 * hits 0. 9871 * 9872 * If the count is less than 0 the device is powered down. If a powered 9873 * down device is successfully powered up then the count must be 9874 * incremented to reflect the power up. Note that it'll get incremented 9875 * a second time to become busy. 9876 * 9877 * Because the following has the potential to change the device state 9878 * and must release the un_pm_mutex to do so, only one thread can be 9879 * allowed through at a time. 9880 */ 9881 9882 mutex_enter(&un->un_pm_mutex); 9883 while (un->un_pm_busy == TRUE) { 9884 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9885 } 9886 un->un_pm_busy = TRUE; 9887 9888 if (un->un_pm_count < 1) { 9889 9890 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9891 9892 /* 9893 * Indicate we are now busy so the framework won't attempt to 9894 * power down the device. This call will only fail if either 9895 * we passed a bad component number or the device has no 9896 * components. Neither of these should ever happen. 9897 */ 9898 mutex_exit(&un->un_pm_mutex); 9899 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9900 ASSERT(return_status == DDI_SUCCESS); 9901 9902 mutex_enter(&un->un_pm_mutex); 9903 9904 if (un->un_pm_count < 0) { 9905 mutex_exit(&un->un_pm_mutex); 9906 9907 SD_TRACE(SD_LOG_IO_PM, un, 9908 "sd_pm_entry: power up component\n"); 9909 9910 /* 9911 * pm_raise_power will cause sdpower to be called 9912 * which brings the device power level to the 9913 * desired state, If successful, un_pm_count and 9914 * un_power_level will be updated appropriately. 9915 */ 9916 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9917 SD_PM_STATE_ACTIVE(un)); 9918 9919 mutex_enter(&un->un_pm_mutex); 9920 9921 if (return_status != DDI_SUCCESS) { 9922 /* 9923 * Power up failed. 9924 * Idle the device and adjust the count 9925 * so the result on exit is that we're 9926 * still powered down, ie. count is less than 0. 9927 */ 9928 SD_TRACE(SD_LOG_IO_PM, un, 9929 "sd_pm_entry: power up failed," 9930 " idle the component\n"); 9931 9932 (void) pm_idle_component(SD_DEVINFO(un), 0); 9933 un->un_pm_count--; 9934 } else { 9935 /* 9936 * Device is powered up, verify the 9937 * count is non-negative. 9938 * This is debug only. 9939 */ 9940 ASSERT(un->un_pm_count == 0); 9941 } 9942 } 9943 9944 if (return_status == DDI_SUCCESS) { 9945 /* 9946 * For performance, now that the device has been tagged 9947 * as busy, and it's known to be powered up, update the 9948 * chain types to use jump tables that do not include 9949 * pm. This significantly lowers the overhead and 9950 * therefore improves performance. 9951 */ 9952 9953 mutex_exit(&un->un_pm_mutex); 9954 mutex_enter(SD_MUTEX(un)); 9955 SD_TRACE(SD_LOG_IO_PM, un, 9956 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9957 un->un_uscsi_chain_type); 9958 9959 if (un->un_f_non_devbsize_supported) { 9960 un->un_buf_chain_type = 9961 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9962 } else { 9963 un->un_buf_chain_type = 9964 SD_CHAIN_INFO_DISK_NO_PM; 9965 } 9966 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9967 9968 SD_TRACE(SD_LOG_IO_PM, un, 9969 " changed uscsi_chain_type to %d\n", 9970 un->un_uscsi_chain_type); 9971 mutex_exit(SD_MUTEX(un)); 9972 mutex_enter(&un->un_pm_mutex); 9973 9974 if (un->un_pm_idle_timeid == NULL) { 9975 /* 300 ms. */ 9976 un->un_pm_idle_timeid = 9977 timeout(sd_pm_idletimeout_handler, un, 9978 (drv_usectohz((clock_t)300000))); 9979 /* 9980 * Include an extra call to busy which keeps the 9981 * device busy with-respect-to the PM layer 9982 * until the timer fires, at which time it'll 9983 * get the extra idle call. 9984 */ 9985 (void) pm_busy_component(SD_DEVINFO(un), 0); 9986 } 9987 } 9988 } 9989 un->un_pm_busy = FALSE; 9990 /* Next... */ 9991 cv_signal(&un->un_pm_busy_cv); 9992 9993 un->un_pm_count++; 9994 9995 SD_TRACE(SD_LOG_IO_PM, un, 9996 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9997 9998 mutex_exit(&un->un_pm_mutex); 9999 10000 return (return_status); 10001 } 10002 10003 10004 /* 10005 * Function: sd_pm_exit 10006 * 10007 * Description: Called at the completion of a command to manage busy 10008 * status for the device. If the device becomes idle the 10009 * PM framework is notified. 10010 * 10011 * Context: Kernel thread context 10012 */ 10013 10014 static void 10015 sd_pm_exit(struct sd_lun *un) 10016 { 10017 ASSERT(!mutex_owned(SD_MUTEX(un))); 10018 ASSERT(!mutex_owned(&un->un_pm_mutex)); 10019 10020 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 10021 10022 /* 10023 * After attach the following flag is only read, so don't 10024 * take the penalty of acquiring a mutex for it. 10025 */ 10026 if (un->un_f_pm_is_enabled == TRUE) { 10027 10028 mutex_enter(&un->un_pm_mutex); 10029 un->un_pm_count--; 10030 10031 SD_TRACE(SD_LOG_IO_PM, un, 10032 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 10033 10034 ASSERT(un->un_pm_count >= 0); 10035 if (un->un_pm_count == 0) { 10036 mutex_exit(&un->un_pm_mutex); 10037 10038 SD_TRACE(SD_LOG_IO_PM, un, 10039 "sd_pm_exit: idle component\n"); 10040 10041 (void) pm_idle_component(SD_DEVINFO(un), 0); 10042 10043 } else { 10044 mutex_exit(&un->un_pm_mutex); 10045 } 10046 } 10047 10048 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 10049 } 10050 10051 10052 /* 10053 * Function: sdopen 10054 * 10055 * Description: Driver's open(9e) entry point function. 10056 * 10057 * Arguments: dev_i - pointer to device number 10058 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 10059 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10060 * cred_p - user credential pointer 10061 * 10062 * Return Code: EINVAL 10063 * ENXIO 10064 * EIO 10065 * EROFS 10066 * EBUSY 10067 * 10068 * Context: Kernel thread context 10069 */ 10070 /* ARGSUSED */ 10071 static int 10072 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 10073 { 10074 struct sd_lun *un; 10075 int nodelay; 10076 int part; 10077 uint64_t partmask; 10078 int instance; 10079 dev_t dev; 10080 int rval = EIO; 10081 diskaddr_t nblks = 0; 10082 diskaddr_t label_cap; 10083 10084 /* Validate the open type */ 10085 if (otyp >= OTYPCNT) { 10086 return (EINVAL); 10087 } 10088 10089 dev = *dev_p; 10090 instance = SDUNIT(dev); 10091 mutex_enter(&sd_detach_mutex); 10092 10093 /* 10094 * Fail the open if there is no softstate for the instance, or 10095 * if another thread somewhere is trying to detach the instance. 10096 */ 10097 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10098 (un->un_detach_count != 0)) { 10099 mutex_exit(&sd_detach_mutex); 10100 /* 10101 * The probe cache only needs to be cleared when open (9e) fails 10102 * with ENXIO (4238046). 10103 */ 10104 /* 10105 * un-conditionally clearing probe cache is ok with 10106 * separate sd/ssd binaries 10107 * x86 platform can be an issue with both parallel 10108 * and fibre in 1 binary 10109 */ 10110 sd_scsi_clear_probe_cache(); 10111 return (ENXIO); 10112 } 10113 10114 /* 10115 * The un_layer_count is to prevent another thread in specfs from 10116 * trying to detach the instance, which can happen when we are 10117 * called from a higher-layer driver instead of thru specfs. 10118 * This will not be needed when DDI provides a layered driver 10119 * interface that allows specfs to know that an instance is in 10120 * use by a layered driver & should not be detached. 10121 * 10122 * Note: the semantics for layered driver opens are exactly one 10123 * close for every open. 10124 */ 10125 if (otyp == OTYP_LYR) { 10126 un->un_layer_count++; 10127 } 10128 10129 /* 10130 * Keep a count of the current # of opens in progress. This is because 10131 * some layered drivers try to call us as a regular open. This can 10132 * cause problems that we cannot prevent, however by keeping this count 10133 * we can at least keep our open and detach routines from racing against 10134 * each other under such conditions. 10135 */ 10136 un->un_opens_in_progress++; 10137 mutex_exit(&sd_detach_mutex); 10138 10139 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10140 part = SDPART(dev); 10141 partmask = 1 << part; 10142 10143 /* 10144 * We use a semaphore here in order to serialize 10145 * open and close requests on the device. 10146 */ 10147 sema_p(&un->un_semoclose); 10148 10149 mutex_enter(SD_MUTEX(un)); 10150 10151 /* 10152 * All device accesses go thru sdstrategy() where we check 10153 * on suspend status but there could be a scsi_poll command, 10154 * which bypasses sdstrategy(), so we need to check pm 10155 * status. 10156 */ 10157 10158 if (!nodelay) { 10159 while ((un->un_state == SD_STATE_SUSPENDED) || 10160 (un->un_state == SD_STATE_PM_CHANGING)) { 10161 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10162 } 10163 10164 mutex_exit(SD_MUTEX(un)); 10165 if (sd_pm_entry(un) != DDI_SUCCESS) { 10166 rval = EIO; 10167 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10168 "sdopen: sd_pm_entry failed\n"); 10169 goto open_failed_with_pm; 10170 } 10171 mutex_enter(SD_MUTEX(un)); 10172 } 10173 10174 /* check for previous exclusive open */ 10175 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10176 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10177 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10178 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10179 10180 if (un->un_exclopen & (partmask)) { 10181 goto excl_open_fail; 10182 } 10183 10184 if (flag & FEXCL) { 10185 int i; 10186 if (un->un_ocmap.lyropen[part]) { 10187 goto excl_open_fail; 10188 } 10189 for (i = 0; i < (OTYPCNT - 1); i++) { 10190 if (un->un_ocmap.regopen[i] & (partmask)) { 10191 goto excl_open_fail; 10192 } 10193 } 10194 } 10195 10196 /* 10197 * Check the write permission if this is a removable media device, 10198 * NDELAY has not been set, and writable permission is requested. 10199 * 10200 * Note: If NDELAY was set and this is write-protected media the WRITE 10201 * attempt will fail with EIO as part of the I/O processing. This is a 10202 * more permissive implementation that allows the open to succeed and 10203 * WRITE attempts to fail when appropriate. 10204 */ 10205 if (un->un_f_chk_wp_open) { 10206 if ((flag & FWRITE) && (!nodelay)) { 10207 mutex_exit(SD_MUTEX(un)); 10208 /* 10209 * Defer the check for write permission on writable 10210 * DVD drive till sdstrategy and will not fail open even 10211 * if FWRITE is set as the device can be writable 10212 * depending upon the media and the media can change 10213 * after the call to open(). 10214 */ 10215 if (un->un_f_dvdram_writable_device == FALSE) { 10216 if (ISCD(un) || sr_check_wp(dev)) { 10217 rval = EROFS; 10218 mutex_enter(SD_MUTEX(un)); 10219 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10220 "write to cd or write protected media\n"); 10221 goto open_fail; 10222 } 10223 } 10224 mutex_enter(SD_MUTEX(un)); 10225 } 10226 } 10227 10228 /* 10229 * If opening in NDELAY/NONBLOCK mode, just return. 10230 * Check if disk is ready and has a valid geometry later. 10231 */ 10232 if (!nodelay) { 10233 sd_ssc_t *ssc; 10234 10235 mutex_exit(SD_MUTEX(un)); 10236 ssc = sd_ssc_init(un); 10237 rval = sd_ready_and_valid(ssc, part); 10238 sd_ssc_fini(ssc); 10239 mutex_enter(SD_MUTEX(un)); 10240 /* 10241 * Fail if device is not ready or if the number of disk 10242 * blocks is zero or negative for non CD devices. 10243 */ 10244 10245 nblks = 0; 10246 10247 if (rval == SD_READY_VALID && (!ISCD(un))) { 10248 /* if cmlb_partinfo fails, nblks remains 0 */ 10249 mutex_exit(SD_MUTEX(un)); 10250 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10251 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10252 mutex_enter(SD_MUTEX(un)); 10253 } 10254 10255 if ((rval != SD_READY_VALID) || 10256 (!ISCD(un) && nblks <= 0)) { 10257 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10258 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10259 "device not ready or invalid disk block value\n"); 10260 goto open_fail; 10261 } 10262 #if defined(__i386) || defined(__amd64) 10263 } else { 10264 uchar_t *cp; 10265 /* 10266 * x86 requires special nodelay handling, so that p0 is 10267 * always defined and accessible. 10268 * Invalidate geometry only if device is not already open. 10269 */ 10270 cp = &un->un_ocmap.chkd[0]; 10271 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10272 if (*cp != (uchar_t)0) { 10273 break; 10274 } 10275 cp++; 10276 } 10277 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10278 mutex_exit(SD_MUTEX(un)); 10279 cmlb_invalidate(un->un_cmlbhandle, 10280 (void *)SD_PATH_DIRECT); 10281 mutex_enter(SD_MUTEX(un)); 10282 } 10283 10284 #endif 10285 } 10286 10287 if (otyp == OTYP_LYR) { 10288 un->un_ocmap.lyropen[part]++; 10289 } else { 10290 un->un_ocmap.regopen[otyp] |= partmask; 10291 } 10292 10293 /* Set up open and exclusive open flags */ 10294 if (flag & FEXCL) { 10295 un->un_exclopen |= (partmask); 10296 } 10297 10298 /* 10299 * If the lun is EFI labeled and lun capacity is greater than the 10300 * capacity contained in the label, log a sys-event to notify the 10301 * interested module. 10302 * To avoid an infinite loop of logging sys-event, we only log the 10303 * event when the lun is not opened in NDELAY mode. The event handler 10304 * should open the lun in NDELAY mode. 10305 */ 10306 if (!nodelay) { 10307 mutex_exit(SD_MUTEX(un)); 10308 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10309 (void*)SD_PATH_DIRECT) == 0) { 10310 mutex_enter(SD_MUTEX(un)); 10311 if (un->un_f_blockcount_is_valid && 10312 un->un_blockcount > label_cap && 10313 un->un_f_expnevent == B_FALSE) { 10314 un->un_f_expnevent = B_TRUE; 10315 mutex_exit(SD_MUTEX(un)); 10316 sd_log_lun_expansion_event(un, 10317 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10318 mutex_enter(SD_MUTEX(un)); 10319 } 10320 } else { 10321 mutex_enter(SD_MUTEX(un)); 10322 } 10323 } 10324 10325 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10326 "open of part %d type %d\n", part, otyp); 10327 10328 mutex_exit(SD_MUTEX(un)); 10329 if (!nodelay) { 10330 sd_pm_exit(un); 10331 } 10332 10333 sema_v(&un->un_semoclose); 10334 10335 mutex_enter(&sd_detach_mutex); 10336 un->un_opens_in_progress--; 10337 mutex_exit(&sd_detach_mutex); 10338 10339 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10340 return (DDI_SUCCESS); 10341 10342 excl_open_fail: 10343 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10344 rval = EBUSY; 10345 10346 open_fail: 10347 mutex_exit(SD_MUTEX(un)); 10348 10349 /* 10350 * On a failed open we must exit the pm management. 10351 */ 10352 if (!nodelay) { 10353 sd_pm_exit(un); 10354 } 10355 open_failed_with_pm: 10356 sema_v(&un->un_semoclose); 10357 10358 mutex_enter(&sd_detach_mutex); 10359 un->un_opens_in_progress--; 10360 if (otyp == OTYP_LYR) { 10361 un->un_layer_count--; 10362 } 10363 mutex_exit(&sd_detach_mutex); 10364 10365 return (rval); 10366 } 10367 10368 10369 /* 10370 * Function: sdclose 10371 * 10372 * Description: Driver's close(9e) entry point function. 10373 * 10374 * Arguments: dev - device number 10375 * flag - file status flag, informational only 10376 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10377 * cred_p - user credential pointer 10378 * 10379 * Return Code: ENXIO 10380 * 10381 * Context: Kernel thread context 10382 */ 10383 /* ARGSUSED */ 10384 static int 10385 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10386 { 10387 struct sd_lun *un; 10388 uchar_t *cp; 10389 int part; 10390 int nodelay; 10391 int rval = 0; 10392 10393 /* Validate the open type */ 10394 if (otyp >= OTYPCNT) { 10395 return (ENXIO); 10396 } 10397 10398 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10399 return (ENXIO); 10400 } 10401 10402 part = SDPART(dev); 10403 nodelay = flag & (FNDELAY | FNONBLOCK); 10404 10405 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10406 "sdclose: close of part %d type %d\n", part, otyp); 10407 10408 /* 10409 * We use a semaphore here in order to serialize 10410 * open and close requests on the device. 10411 */ 10412 sema_p(&un->un_semoclose); 10413 10414 mutex_enter(SD_MUTEX(un)); 10415 10416 /* Don't proceed if power is being changed. */ 10417 while (un->un_state == SD_STATE_PM_CHANGING) { 10418 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10419 } 10420 10421 if (un->un_exclopen & (1 << part)) { 10422 un->un_exclopen &= ~(1 << part); 10423 } 10424 10425 /* Update the open partition map */ 10426 if (otyp == OTYP_LYR) { 10427 un->un_ocmap.lyropen[part] -= 1; 10428 } else { 10429 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10430 } 10431 10432 cp = &un->un_ocmap.chkd[0]; 10433 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10434 if (*cp != NULL) { 10435 break; 10436 } 10437 cp++; 10438 } 10439 10440 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10441 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10442 10443 /* 10444 * We avoid persistance upon the last close, and set 10445 * the throttle back to the maximum. 10446 */ 10447 un->un_throttle = un->un_saved_throttle; 10448 10449 if (un->un_state == SD_STATE_OFFLINE) { 10450 if (un->un_f_is_fibre == FALSE) { 10451 scsi_log(SD_DEVINFO(un), sd_label, 10452 CE_WARN, "offline\n"); 10453 } 10454 mutex_exit(SD_MUTEX(un)); 10455 cmlb_invalidate(un->un_cmlbhandle, 10456 (void *)SD_PATH_DIRECT); 10457 mutex_enter(SD_MUTEX(un)); 10458 10459 } else { 10460 /* 10461 * Flush any outstanding writes in NVRAM cache. 10462 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10463 * cmd, it may not work for non-Pluto devices. 10464 * SYNCHRONIZE CACHE is not required for removables, 10465 * except DVD-RAM drives. 10466 * 10467 * Also note: because SYNCHRONIZE CACHE is currently 10468 * the only command issued here that requires the 10469 * drive be powered up, only do the power up before 10470 * sending the Sync Cache command. If additional 10471 * commands are added which require a powered up 10472 * drive, the following sequence may have to change. 10473 * 10474 * And finally, note that parallel SCSI on SPARC 10475 * only issues a Sync Cache to DVD-RAM, a newly 10476 * supported device. 10477 */ 10478 #if defined(__i386) || defined(__amd64) 10479 if ((un->un_f_sync_cache_supported && 10480 un->un_f_sync_cache_required) || 10481 un->un_f_dvdram_writable_device == TRUE) { 10482 #else 10483 if (un->un_f_dvdram_writable_device == TRUE) { 10484 #endif 10485 mutex_exit(SD_MUTEX(un)); 10486 if (sd_pm_entry(un) == DDI_SUCCESS) { 10487 rval = 10488 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10489 NULL); 10490 /* ignore error if not supported */ 10491 if (rval == ENOTSUP) { 10492 rval = 0; 10493 } else if (rval != 0) { 10494 rval = EIO; 10495 } 10496 sd_pm_exit(un); 10497 } else { 10498 rval = EIO; 10499 } 10500 mutex_enter(SD_MUTEX(un)); 10501 } 10502 10503 /* 10504 * For devices which supports DOOR_LOCK, send an ALLOW 10505 * MEDIA REMOVAL command, but don't get upset if it 10506 * fails. We need to raise the power of the drive before 10507 * we can call sd_send_scsi_DOORLOCK() 10508 */ 10509 if (un->un_f_doorlock_supported) { 10510 mutex_exit(SD_MUTEX(un)); 10511 if (sd_pm_entry(un) == DDI_SUCCESS) { 10512 sd_ssc_t *ssc; 10513 10514 ssc = sd_ssc_init(un); 10515 rval = sd_send_scsi_DOORLOCK(ssc, 10516 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10517 if (rval != 0) 10518 sd_ssc_assessment(ssc, 10519 SD_FMT_IGNORE); 10520 sd_ssc_fini(ssc); 10521 10522 sd_pm_exit(un); 10523 if (ISCD(un) && (rval != 0) && 10524 (nodelay != 0)) { 10525 rval = ENXIO; 10526 } 10527 } else { 10528 rval = EIO; 10529 } 10530 mutex_enter(SD_MUTEX(un)); 10531 } 10532 10533 /* 10534 * If a device has removable media, invalidate all 10535 * parameters related to media, such as geometry, 10536 * blocksize, and blockcount. 10537 */ 10538 if (un->un_f_has_removable_media) { 10539 sr_ejected(un); 10540 } 10541 10542 /* 10543 * Destroy the cache (if it exists) which was 10544 * allocated for the write maps since this is 10545 * the last close for this media. 10546 */ 10547 if (un->un_wm_cache) { 10548 /* 10549 * Check if there are pending commands. 10550 * and if there are give a warning and 10551 * do not destroy the cache. 10552 */ 10553 if (un->un_ncmds_in_driver > 0) { 10554 scsi_log(SD_DEVINFO(un), 10555 sd_label, CE_WARN, 10556 "Unable to clean up memory " 10557 "because of pending I/O\n"); 10558 } else { 10559 kmem_cache_destroy( 10560 un->un_wm_cache); 10561 un->un_wm_cache = NULL; 10562 } 10563 } 10564 } 10565 } 10566 10567 mutex_exit(SD_MUTEX(un)); 10568 sema_v(&un->un_semoclose); 10569 10570 if (otyp == OTYP_LYR) { 10571 mutex_enter(&sd_detach_mutex); 10572 /* 10573 * The detach routine may run when the layer count 10574 * drops to zero. 10575 */ 10576 un->un_layer_count--; 10577 mutex_exit(&sd_detach_mutex); 10578 } 10579 10580 return (rval); 10581 } 10582 10583 10584 /* 10585 * Function: sd_ready_and_valid 10586 * 10587 * Description: Test if device is ready and has a valid geometry. 10588 * 10589 * Arguments: ssc - sd_ssc_t will contain un 10590 * un - driver soft state (unit) structure 10591 * 10592 * Return Code: SD_READY_VALID ready and valid label 10593 * SD_NOT_READY_VALID not ready, no label 10594 * SD_RESERVED_BY_OTHERS reservation conflict 10595 * 10596 * Context: Never called at interrupt context. 10597 */ 10598 10599 static int 10600 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10601 { 10602 struct sd_errstats *stp; 10603 uint64_t capacity; 10604 uint_t lbasize; 10605 int rval = SD_READY_VALID; 10606 char name_str[48]; 10607 boolean_t is_valid; 10608 struct sd_lun *un; 10609 int status; 10610 10611 ASSERT(ssc != NULL); 10612 un = ssc->ssc_un; 10613 ASSERT(un != NULL); 10614 ASSERT(!mutex_owned(SD_MUTEX(un))); 10615 10616 mutex_enter(SD_MUTEX(un)); 10617 /* 10618 * If a device has removable media, we must check if media is 10619 * ready when checking if this device is ready and valid. 10620 */ 10621 if (un->un_f_has_removable_media) { 10622 mutex_exit(SD_MUTEX(un)); 10623 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10624 10625 if (status != 0) { 10626 rval = SD_NOT_READY_VALID; 10627 mutex_enter(SD_MUTEX(un)); 10628 10629 /* Ignore all failed status for removalbe media */ 10630 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10631 10632 goto done; 10633 } 10634 10635 is_valid = SD_IS_VALID_LABEL(un); 10636 mutex_enter(SD_MUTEX(un)); 10637 if (!is_valid || 10638 (un->un_f_blockcount_is_valid == FALSE) || 10639 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10640 10641 /* capacity has to be read every open. */ 10642 mutex_exit(SD_MUTEX(un)); 10643 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10644 &lbasize, SD_PATH_DIRECT); 10645 10646 if (status != 0) { 10647 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10648 10649 cmlb_invalidate(un->un_cmlbhandle, 10650 (void *)SD_PATH_DIRECT); 10651 mutex_enter(SD_MUTEX(un)); 10652 rval = SD_NOT_READY_VALID; 10653 10654 goto done; 10655 } else { 10656 mutex_enter(SD_MUTEX(un)); 10657 sd_update_block_info(un, lbasize, capacity); 10658 } 10659 } 10660 10661 /* 10662 * Check if the media in the device is writable or not. 10663 */ 10664 if (!is_valid && ISCD(un)) { 10665 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10666 } 10667 10668 } else { 10669 /* 10670 * Do a test unit ready to clear any unit attention from non-cd 10671 * devices. 10672 */ 10673 mutex_exit(SD_MUTEX(un)); 10674 10675 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10676 if (status != 0) { 10677 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10678 } 10679 10680 mutex_enter(SD_MUTEX(un)); 10681 } 10682 10683 10684 /* 10685 * If this is a non 512 block device, allocate space for 10686 * the wmap cache. This is being done here since every time 10687 * a media is changed this routine will be called and the 10688 * block size is a function of media rather than device. 10689 */ 10690 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10691 un->un_f_non_devbsize_supported) && 10692 un->un_tgt_blocksize != DEV_BSIZE) || 10693 un->un_f_enable_rmw) { 10694 if (!(un->un_wm_cache)) { 10695 (void) snprintf(name_str, sizeof (name_str), 10696 "%s%d_cache", 10697 ddi_driver_name(SD_DEVINFO(un)), 10698 ddi_get_instance(SD_DEVINFO(un))); 10699 un->un_wm_cache = kmem_cache_create( 10700 name_str, sizeof (struct sd_w_map), 10701 8, sd_wm_cache_constructor, 10702 sd_wm_cache_destructor, NULL, 10703 (void *)un, NULL, 0); 10704 if (!(un->un_wm_cache)) { 10705 rval = ENOMEM; 10706 goto done; 10707 } 10708 } 10709 } 10710 10711 if (un->un_state == SD_STATE_NORMAL) { 10712 /* 10713 * If the target is not yet ready here (defined by a TUR 10714 * failure), invalidate the geometry and print an 'offline' 10715 * message. This is a legacy message, as the state of the 10716 * target is not actually changed to SD_STATE_OFFLINE. 10717 * 10718 * If the TUR fails for EACCES (Reservation Conflict), 10719 * SD_RESERVED_BY_OTHERS will be returned to indicate 10720 * reservation conflict. If the TUR fails for other 10721 * reasons, SD_NOT_READY_VALID will be returned. 10722 */ 10723 int err; 10724 10725 mutex_exit(SD_MUTEX(un)); 10726 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10727 mutex_enter(SD_MUTEX(un)); 10728 10729 if (err != 0) { 10730 mutex_exit(SD_MUTEX(un)); 10731 cmlb_invalidate(un->un_cmlbhandle, 10732 (void *)SD_PATH_DIRECT); 10733 mutex_enter(SD_MUTEX(un)); 10734 if (err == EACCES) { 10735 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10736 "reservation conflict\n"); 10737 rval = SD_RESERVED_BY_OTHERS; 10738 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10739 } else { 10740 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10741 "drive offline\n"); 10742 rval = SD_NOT_READY_VALID; 10743 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10744 } 10745 goto done; 10746 } 10747 } 10748 10749 if (un->un_f_format_in_progress == FALSE) { 10750 mutex_exit(SD_MUTEX(un)); 10751 10752 (void) cmlb_validate(un->un_cmlbhandle, 0, 10753 (void *)SD_PATH_DIRECT); 10754 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10755 NULL, (void *) SD_PATH_DIRECT) != 0) { 10756 rval = SD_NOT_READY_VALID; 10757 mutex_enter(SD_MUTEX(un)); 10758 10759 goto done; 10760 } 10761 if (un->un_f_pkstats_enabled) { 10762 sd_set_pstats(un); 10763 SD_TRACE(SD_LOG_IO_PARTITION, un, 10764 "sd_ready_and_valid: un:0x%p pstats created and " 10765 "set\n", un); 10766 } 10767 mutex_enter(SD_MUTEX(un)); 10768 } 10769 10770 /* 10771 * If this device supports DOOR_LOCK command, try and send 10772 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10773 * if it fails. For a CD, however, it is an error 10774 */ 10775 if (un->un_f_doorlock_supported) { 10776 mutex_exit(SD_MUTEX(un)); 10777 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10778 SD_PATH_DIRECT); 10779 10780 if ((status != 0) && ISCD(un)) { 10781 rval = SD_NOT_READY_VALID; 10782 mutex_enter(SD_MUTEX(un)); 10783 10784 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10785 10786 goto done; 10787 } else if (status != 0) 10788 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10789 mutex_enter(SD_MUTEX(un)); 10790 } 10791 10792 /* The state has changed, inform the media watch routines */ 10793 un->un_mediastate = DKIO_INSERTED; 10794 cv_broadcast(&un->un_state_cv); 10795 rval = SD_READY_VALID; 10796 10797 done: 10798 10799 /* 10800 * Initialize the capacity kstat value, if no media previously 10801 * (capacity kstat is 0) and a media has been inserted 10802 * (un_blockcount > 0). 10803 */ 10804 if (un->un_errstats != NULL) { 10805 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10806 if ((stp->sd_capacity.value.ui64 == 0) && 10807 (un->un_f_blockcount_is_valid == TRUE)) { 10808 stp->sd_capacity.value.ui64 = 10809 (uint64_t)((uint64_t)un->un_blockcount * 10810 un->un_sys_blocksize); 10811 } 10812 } 10813 10814 mutex_exit(SD_MUTEX(un)); 10815 return (rval); 10816 } 10817 10818 10819 /* 10820 * Function: sdmin 10821 * 10822 * Description: Routine to limit the size of a data transfer. Used in 10823 * conjunction with physio(9F). 10824 * 10825 * Arguments: bp - pointer to the indicated buf(9S) struct. 10826 * 10827 * Context: Kernel thread context. 10828 */ 10829 10830 static void 10831 sdmin(struct buf *bp) 10832 { 10833 struct sd_lun *un; 10834 int instance; 10835 10836 instance = SDUNIT(bp->b_edev); 10837 10838 un = ddi_get_soft_state(sd_state, instance); 10839 ASSERT(un != NULL); 10840 10841 /* 10842 * We depend on buf breakup to restrict 10843 * IO size if it is enabled. 10844 */ 10845 if (un->un_buf_breakup_supported) { 10846 return; 10847 } 10848 10849 if (bp->b_bcount > un->un_max_xfer_size) { 10850 bp->b_bcount = un->un_max_xfer_size; 10851 } 10852 } 10853 10854 10855 /* 10856 * Function: sdread 10857 * 10858 * Description: Driver's read(9e) entry point function. 10859 * 10860 * Arguments: dev - device number 10861 * uio - structure pointer describing where data is to be stored 10862 * in user's space 10863 * cred_p - user credential pointer 10864 * 10865 * Return Code: ENXIO 10866 * EIO 10867 * EINVAL 10868 * value returned by physio 10869 * 10870 * Context: Kernel thread context. 10871 */ 10872 /* ARGSUSED */ 10873 static int 10874 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10875 { 10876 struct sd_lun *un = NULL; 10877 int secmask; 10878 int err = 0; 10879 sd_ssc_t *ssc; 10880 10881 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10882 return (ENXIO); 10883 } 10884 10885 ASSERT(!mutex_owned(SD_MUTEX(un))); 10886 10887 10888 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10889 mutex_enter(SD_MUTEX(un)); 10890 /* 10891 * Because the call to sd_ready_and_valid will issue I/O we 10892 * must wait here if either the device is suspended or 10893 * if it's power level is changing. 10894 */ 10895 while ((un->un_state == SD_STATE_SUSPENDED) || 10896 (un->un_state == SD_STATE_PM_CHANGING)) { 10897 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10898 } 10899 un->un_ncmds_in_driver++; 10900 mutex_exit(SD_MUTEX(un)); 10901 10902 /* Initialize sd_ssc_t for internal uscsi commands */ 10903 ssc = sd_ssc_init(un); 10904 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10905 err = EIO; 10906 } else { 10907 err = 0; 10908 } 10909 sd_ssc_fini(ssc); 10910 10911 mutex_enter(SD_MUTEX(un)); 10912 un->un_ncmds_in_driver--; 10913 ASSERT(un->un_ncmds_in_driver >= 0); 10914 mutex_exit(SD_MUTEX(un)); 10915 if (err != 0) 10916 return (err); 10917 } 10918 10919 /* 10920 * Read requests are restricted to multiples of the system block size. 10921 */ 10922 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 10923 !un->un_f_enable_rmw) 10924 secmask = un->un_tgt_blocksize - 1; 10925 else 10926 secmask = DEV_BSIZE - 1; 10927 10928 if (uio->uio_loffset & ((offset_t)(secmask))) { 10929 SD_ERROR(SD_LOG_READ_WRITE, un, 10930 "sdread: file offset not modulo %d\n", 10931 secmask + 1); 10932 err = EINVAL; 10933 } else if (uio->uio_iov->iov_len & (secmask)) { 10934 SD_ERROR(SD_LOG_READ_WRITE, un, 10935 "sdread: transfer length not modulo %d\n", 10936 secmask + 1); 10937 err = EINVAL; 10938 } else { 10939 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10940 } 10941 10942 return (err); 10943 } 10944 10945 10946 /* 10947 * Function: sdwrite 10948 * 10949 * Description: Driver's write(9e) entry point function. 10950 * 10951 * Arguments: dev - device number 10952 * uio - structure pointer describing where data is stored in 10953 * user's space 10954 * cred_p - user credential pointer 10955 * 10956 * Return Code: ENXIO 10957 * EIO 10958 * EINVAL 10959 * value returned by physio 10960 * 10961 * Context: Kernel thread context. 10962 */ 10963 /* ARGSUSED */ 10964 static int 10965 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10966 { 10967 struct sd_lun *un = NULL; 10968 int secmask; 10969 int err = 0; 10970 sd_ssc_t *ssc; 10971 10972 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10973 return (ENXIO); 10974 } 10975 10976 ASSERT(!mutex_owned(SD_MUTEX(un))); 10977 10978 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10979 mutex_enter(SD_MUTEX(un)); 10980 /* 10981 * Because the call to sd_ready_and_valid will issue I/O we 10982 * must wait here if either the device is suspended or 10983 * if it's power level is changing. 10984 */ 10985 while ((un->un_state == SD_STATE_SUSPENDED) || 10986 (un->un_state == SD_STATE_PM_CHANGING)) { 10987 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10988 } 10989 un->un_ncmds_in_driver++; 10990 mutex_exit(SD_MUTEX(un)); 10991 10992 /* Initialize sd_ssc_t for internal uscsi commands */ 10993 ssc = sd_ssc_init(un); 10994 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10995 err = EIO; 10996 } else { 10997 err = 0; 10998 } 10999 sd_ssc_fini(ssc); 11000 11001 mutex_enter(SD_MUTEX(un)); 11002 un->un_ncmds_in_driver--; 11003 ASSERT(un->un_ncmds_in_driver >= 0); 11004 mutex_exit(SD_MUTEX(un)); 11005 if (err != 0) 11006 return (err); 11007 } 11008 11009 /* 11010 * Write requests are restricted to multiples of the system block size. 11011 */ 11012 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11013 !un->un_f_enable_rmw) 11014 secmask = un->un_tgt_blocksize - 1; 11015 else 11016 secmask = DEV_BSIZE - 1; 11017 11018 if (uio->uio_loffset & ((offset_t)(secmask))) { 11019 SD_ERROR(SD_LOG_READ_WRITE, un, 11020 "sdwrite: file offset not modulo %d\n", 11021 secmask + 1); 11022 err = EINVAL; 11023 } else if (uio->uio_iov->iov_len & (secmask)) { 11024 SD_ERROR(SD_LOG_READ_WRITE, un, 11025 "sdwrite: transfer length not modulo %d\n", 11026 secmask + 1); 11027 err = EINVAL; 11028 } else { 11029 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 11030 } 11031 11032 return (err); 11033 } 11034 11035 11036 /* 11037 * Function: sdaread 11038 * 11039 * Description: Driver's aread(9e) entry point function. 11040 * 11041 * Arguments: dev - device number 11042 * aio - structure pointer describing where data is to be stored 11043 * cred_p - user credential pointer 11044 * 11045 * Return Code: ENXIO 11046 * EIO 11047 * EINVAL 11048 * value returned by aphysio 11049 * 11050 * Context: Kernel thread context. 11051 */ 11052 /* ARGSUSED */ 11053 static int 11054 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11055 { 11056 struct sd_lun *un = NULL; 11057 struct uio *uio = aio->aio_uio; 11058 int secmask; 11059 int err = 0; 11060 sd_ssc_t *ssc; 11061 11062 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11063 return (ENXIO); 11064 } 11065 11066 ASSERT(!mutex_owned(SD_MUTEX(un))); 11067 11068 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11069 mutex_enter(SD_MUTEX(un)); 11070 /* 11071 * Because the call to sd_ready_and_valid will issue I/O we 11072 * must wait here if either the device is suspended or 11073 * if it's power level is changing. 11074 */ 11075 while ((un->un_state == SD_STATE_SUSPENDED) || 11076 (un->un_state == SD_STATE_PM_CHANGING)) { 11077 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11078 } 11079 un->un_ncmds_in_driver++; 11080 mutex_exit(SD_MUTEX(un)); 11081 11082 /* Initialize sd_ssc_t for internal uscsi commands */ 11083 ssc = sd_ssc_init(un); 11084 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11085 err = EIO; 11086 } else { 11087 err = 0; 11088 } 11089 sd_ssc_fini(ssc); 11090 11091 mutex_enter(SD_MUTEX(un)); 11092 un->un_ncmds_in_driver--; 11093 ASSERT(un->un_ncmds_in_driver >= 0); 11094 mutex_exit(SD_MUTEX(un)); 11095 if (err != 0) 11096 return (err); 11097 } 11098 11099 /* 11100 * Read requests are restricted to multiples of the system block size. 11101 */ 11102 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11103 !un->un_f_enable_rmw) 11104 secmask = un->un_tgt_blocksize - 1; 11105 else 11106 secmask = DEV_BSIZE - 1; 11107 11108 if (uio->uio_loffset & ((offset_t)(secmask))) { 11109 SD_ERROR(SD_LOG_READ_WRITE, un, 11110 "sdaread: file offset not modulo %d\n", 11111 secmask + 1); 11112 err = EINVAL; 11113 } else if (uio->uio_iov->iov_len & (secmask)) { 11114 SD_ERROR(SD_LOG_READ_WRITE, un, 11115 "sdaread: transfer length not modulo %d\n", 11116 secmask + 1); 11117 err = EINVAL; 11118 } else { 11119 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11120 } 11121 11122 return (err); 11123 } 11124 11125 11126 /* 11127 * Function: sdawrite 11128 * 11129 * Description: Driver's awrite(9e) entry point function. 11130 * 11131 * Arguments: dev - device number 11132 * aio - structure pointer describing where data is stored 11133 * cred_p - user credential pointer 11134 * 11135 * Return Code: ENXIO 11136 * EIO 11137 * EINVAL 11138 * value returned by aphysio 11139 * 11140 * Context: Kernel thread context. 11141 */ 11142 /* ARGSUSED */ 11143 static int 11144 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11145 { 11146 struct sd_lun *un = NULL; 11147 struct uio *uio = aio->aio_uio; 11148 int secmask; 11149 int err = 0; 11150 sd_ssc_t *ssc; 11151 11152 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11153 return (ENXIO); 11154 } 11155 11156 ASSERT(!mutex_owned(SD_MUTEX(un))); 11157 11158 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11159 mutex_enter(SD_MUTEX(un)); 11160 /* 11161 * Because the call to sd_ready_and_valid will issue I/O we 11162 * must wait here if either the device is suspended or 11163 * if it's power level is changing. 11164 */ 11165 while ((un->un_state == SD_STATE_SUSPENDED) || 11166 (un->un_state == SD_STATE_PM_CHANGING)) { 11167 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11168 } 11169 un->un_ncmds_in_driver++; 11170 mutex_exit(SD_MUTEX(un)); 11171 11172 /* Initialize sd_ssc_t for internal uscsi commands */ 11173 ssc = sd_ssc_init(un); 11174 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11175 err = EIO; 11176 } else { 11177 err = 0; 11178 } 11179 sd_ssc_fini(ssc); 11180 11181 mutex_enter(SD_MUTEX(un)); 11182 un->un_ncmds_in_driver--; 11183 ASSERT(un->un_ncmds_in_driver >= 0); 11184 mutex_exit(SD_MUTEX(un)); 11185 if (err != 0) 11186 return (err); 11187 } 11188 11189 /* 11190 * Write requests are restricted to multiples of the system block size. 11191 */ 11192 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11193 !un->un_f_enable_rmw) 11194 secmask = un->un_tgt_blocksize - 1; 11195 else 11196 secmask = DEV_BSIZE - 1; 11197 11198 if (uio->uio_loffset & ((offset_t)(secmask))) { 11199 SD_ERROR(SD_LOG_READ_WRITE, un, 11200 "sdawrite: file offset not modulo %d\n", 11201 secmask + 1); 11202 err = EINVAL; 11203 } else if (uio->uio_iov->iov_len & (secmask)) { 11204 SD_ERROR(SD_LOG_READ_WRITE, un, 11205 "sdawrite: transfer length not modulo %d\n", 11206 secmask + 1); 11207 err = EINVAL; 11208 } else { 11209 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11210 } 11211 11212 return (err); 11213 } 11214 11215 11216 11217 11218 11219 /* 11220 * Driver IO processing follows the following sequence: 11221 * 11222 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11223 * | | ^ 11224 * v v | 11225 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11226 * | | | | 11227 * v | | | 11228 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11229 * | | ^ ^ 11230 * v v | | 11231 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11232 * | | | | 11233 * +---+ | +------------+ +-------+ 11234 * | | | | 11235 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11236 * | v | | 11237 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11238 * | | ^ | 11239 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11240 * | v | | 11241 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11242 * | | ^ | 11243 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11244 * | v | | 11245 * | sd_checksum_iostart() sd_checksum_iodone() | 11246 * | | ^ | 11247 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11248 * | v | | 11249 * | sd_pm_iostart() sd_pm_iodone() | 11250 * | | ^ | 11251 * | | | | 11252 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11253 * | ^ 11254 * v | 11255 * sd_core_iostart() | 11256 * | | 11257 * | +------>(*destroypkt)() 11258 * +-> sd_start_cmds() <-+ | | 11259 * | | | v 11260 * | | | scsi_destroy_pkt(9F) 11261 * | | | 11262 * +->(*initpkt)() +- sdintr() 11263 * | | | | 11264 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11265 * | +-> scsi_setup_cdb(9F) | 11266 * | | 11267 * +--> scsi_transport(9F) | 11268 * | | 11269 * +----> SCSA ---->+ 11270 * 11271 * 11272 * This code is based upon the following presumptions: 11273 * 11274 * - iostart and iodone functions operate on buf(9S) structures. These 11275 * functions perform the necessary operations on the buf(9S) and pass 11276 * them along to the next function in the chain by using the macros 11277 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11278 * (for iodone side functions). 11279 * 11280 * - The iostart side functions may sleep. The iodone side functions 11281 * are called under interrupt context and may NOT sleep. Therefore 11282 * iodone side functions also may not call iostart side functions. 11283 * (NOTE: iostart side functions should NOT sleep for memory, as 11284 * this could result in deadlock.) 11285 * 11286 * - An iostart side function may call its corresponding iodone side 11287 * function directly (if necessary). 11288 * 11289 * - In the event of an error, an iostart side function can return a buf(9S) 11290 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11291 * b_error in the usual way of course). 11292 * 11293 * - The taskq mechanism may be used by the iodone side functions to dispatch 11294 * requests to the iostart side functions. The iostart side functions in 11295 * this case would be called under the context of a taskq thread, so it's 11296 * OK for them to block/sleep/spin in this case. 11297 * 11298 * - iostart side functions may allocate "shadow" buf(9S) structs and 11299 * pass them along to the next function in the chain. The corresponding 11300 * iodone side functions must coalesce the "shadow" bufs and return 11301 * the "original" buf to the next higher layer. 11302 * 11303 * - The b_private field of the buf(9S) struct holds a pointer to 11304 * an sd_xbuf struct, which contains information needed to 11305 * construct the scsi_pkt for the command. 11306 * 11307 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11308 * layer must acquire & release the SD_MUTEX(un) as needed. 11309 */ 11310 11311 11312 /* 11313 * Create taskq for all targets in the system. This is created at 11314 * _init(9E) and destroyed at _fini(9E). 11315 * 11316 * Note: here we set the minalloc to a reasonably high number to ensure that 11317 * we will have an adequate supply of task entries available at interrupt time. 11318 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11319 * sd_create_taskq(). Since we do not want to sleep for allocations at 11320 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11321 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11322 * requests any one instant in time. 11323 */ 11324 #define SD_TASKQ_NUMTHREADS 8 11325 #define SD_TASKQ_MINALLOC 256 11326 #define SD_TASKQ_MAXALLOC 256 11327 11328 static taskq_t *sd_tq = NULL; 11329 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11330 11331 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11332 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11333 11334 /* 11335 * The following task queue is being created for the write part of 11336 * read-modify-write of non-512 block size devices. 11337 * Limit the number of threads to 1 for now. This number has been chosen 11338 * considering the fact that it applies only to dvd ram drives/MO drives 11339 * currently. Performance for which is not main criteria at this stage. 11340 * Note: It needs to be explored if we can use a single taskq in future 11341 */ 11342 #define SD_WMR_TASKQ_NUMTHREADS 1 11343 static taskq_t *sd_wmr_tq = NULL; 11344 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11345 11346 /* 11347 * Function: sd_taskq_create 11348 * 11349 * Description: Create taskq thread(s) and preallocate task entries 11350 * 11351 * Return Code: Returns a pointer to the allocated taskq_t. 11352 * 11353 * Context: Can sleep. Requires blockable context. 11354 * 11355 * Notes: - The taskq() facility currently is NOT part of the DDI. 11356 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11357 * - taskq_create() will block for memory, also it will panic 11358 * if it cannot create the requested number of threads. 11359 * - Currently taskq_create() creates threads that cannot be 11360 * swapped. 11361 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11362 * supply of taskq entries at interrupt time (ie, so that we 11363 * do not have to sleep for memory) 11364 */ 11365 11366 static void 11367 sd_taskq_create(void) 11368 { 11369 char taskq_name[TASKQ_NAMELEN]; 11370 11371 ASSERT(sd_tq == NULL); 11372 ASSERT(sd_wmr_tq == NULL); 11373 11374 (void) snprintf(taskq_name, sizeof (taskq_name), 11375 "%s_drv_taskq", sd_label); 11376 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11377 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11378 TASKQ_PREPOPULATE)); 11379 11380 (void) snprintf(taskq_name, sizeof (taskq_name), 11381 "%s_rmw_taskq", sd_label); 11382 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11383 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11384 TASKQ_PREPOPULATE)); 11385 } 11386 11387 11388 /* 11389 * Function: sd_taskq_delete 11390 * 11391 * Description: Complementary cleanup routine for sd_taskq_create(). 11392 * 11393 * Context: Kernel thread context. 11394 */ 11395 11396 static void 11397 sd_taskq_delete(void) 11398 { 11399 ASSERT(sd_tq != NULL); 11400 ASSERT(sd_wmr_tq != NULL); 11401 taskq_destroy(sd_tq); 11402 taskq_destroy(sd_wmr_tq); 11403 sd_tq = NULL; 11404 sd_wmr_tq = NULL; 11405 } 11406 11407 11408 /* 11409 * Function: sdstrategy 11410 * 11411 * Description: Driver's strategy (9E) entry point function. 11412 * 11413 * Arguments: bp - pointer to buf(9S) 11414 * 11415 * Return Code: Always returns zero 11416 * 11417 * Context: Kernel thread context. 11418 */ 11419 11420 static int 11421 sdstrategy(struct buf *bp) 11422 { 11423 struct sd_lun *un; 11424 11425 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11426 if (un == NULL) { 11427 bioerror(bp, EIO); 11428 bp->b_resid = bp->b_bcount; 11429 biodone(bp); 11430 return (0); 11431 } 11432 11433 /* As was done in the past, fail new cmds. if state is dumping. */ 11434 if (un->un_state == SD_STATE_DUMPING) { 11435 bioerror(bp, ENXIO); 11436 bp->b_resid = bp->b_bcount; 11437 biodone(bp); 11438 return (0); 11439 } 11440 11441 ASSERT(!mutex_owned(SD_MUTEX(un))); 11442 11443 /* 11444 * Commands may sneak in while we released the mutex in 11445 * DDI_SUSPEND, we should block new commands. However, old 11446 * commands that are still in the driver at this point should 11447 * still be allowed to drain. 11448 */ 11449 mutex_enter(SD_MUTEX(un)); 11450 /* 11451 * Must wait here if either the device is suspended or 11452 * if it's power level is changing. 11453 */ 11454 while ((un->un_state == SD_STATE_SUSPENDED) || 11455 (un->un_state == SD_STATE_PM_CHANGING)) { 11456 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11457 } 11458 11459 un->un_ncmds_in_driver++; 11460 11461 /* 11462 * atapi: Since we are running the CD for now in PIO mode we need to 11463 * call bp_mapin here to avoid bp_mapin called interrupt context under 11464 * the HBA's init_pkt routine. 11465 */ 11466 if (un->un_f_cfg_is_atapi == TRUE) { 11467 mutex_exit(SD_MUTEX(un)); 11468 bp_mapin(bp); 11469 mutex_enter(SD_MUTEX(un)); 11470 } 11471 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11472 un->un_ncmds_in_driver); 11473 11474 if (bp->b_flags & B_WRITE) 11475 un->un_f_sync_cache_required = TRUE; 11476 11477 mutex_exit(SD_MUTEX(un)); 11478 11479 /* 11480 * This will (eventually) allocate the sd_xbuf area and 11481 * call sd_xbuf_strategy(). We just want to return the 11482 * result of ddi_xbuf_qstrategy so that we have an opt- 11483 * imized tail call which saves us a stack frame. 11484 */ 11485 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11486 } 11487 11488 11489 /* 11490 * Function: sd_xbuf_strategy 11491 * 11492 * Description: Function for initiating IO operations via the 11493 * ddi_xbuf_qstrategy() mechanism. 11494 * 11495 * Context: Kernel thread context. 11496 */ 11497 11498 static void 11499 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11500 { 11501 struct sd_lun *un = arg; 11502 11503 ASSERT(bp != NULL); 11504 ASSERT(xp != NULL); 11505 ASSERT(un != NULL); 11506 ASSERT(!mutex_owned(SD_MUTEX(un))); 11507 11508 /* 11509 * Initialize the fields in the xbuf and save a pointer to the 11510 * xbuf in bp->b_private. 11511 */ 11512 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11513 11514 /* Send the buf down the iostart chain */ 11515 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11516 } 11517 11518 11519 /* 11520 * Function: sd_xbuf_init 11521 * 11522 * Description: Prepare the given sd_xbuf struct for use. 11523 * 11524 * Arguments: un - ptr to softstate 11525 * bp - ptr to associated buf(9S) 11526 * xp - ptr to associated sd_xbuf 11527 * chain_type - IO chain type to use: 11528 * SD_CHAIN_NULL 11529 * SD_CHAIN_BUFIO 11530 * SD_CHAIN_USCSI 11531 * SD_CHAIN_DIRECT 11532 * SD_CHAIN_DIRECT_PRIORITY 11533 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11534 * initialization; may be NULL if none. 11535 * 11536 * Context: Kernel thread context 11537 */ 11538 11539 static void 11540 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11541 uchar_t chain_type, void *pktinfop) 11542 { 11543 int index; 11544 11545 ASSERT(un != NULL); 11546 ASSERT(bp != NULL); 11547 ASSERT(xp != NULL); 11548 11549 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11550 bp, chain_type); 11551 11552 xp->xb_un = un; 11553 xp->xb_pktp = NULL; 11554 xp->xb_pktinfo = pktinfop; 11555 xp->xb_private = bp->b_private; 11556 xp->xb_blkno = (daddr_t)bp->b_blkno; 11557 11558 /* 11559 * Set up the iostart and iodone chain indexes in the xbuf, based 11560 * upon the specified chain type to use. 11561 */ 11562 switch (chain_type) { 11563 case SD_CHAIN_NULL: 11564 /* 11565 * Fall thru to just use the values for the buf type, even 11566 * tho for the NULL chain these values will never be used. 11567 */ 11568 /* FALLTHRU */ 11569 case SD_CHAIN_BUFIO: 11570 index = un->un_buf_chain_type; 11571 if ((!un->un_f_has_removable_media) && 11572 (un->un_tgt_blocksize != 0) && 11573 (un->un_tgt_blocksize != DEV_BSIZE || 11574 un->un_f_enable_rmw)) { 11575 int secmask = 0, blknomask = 0; 11576 if (un->un_f_enable_rmw) { 11577 blknomask = 11578 (un->un_phy_blocksize / DEV_BSIZE) - 1; 11579 secmask = un->un_phy_blocksize - 1; 11580 } else { 11581 blknomask = 11582 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11583 secmask = un->un_tgt_blocksize - 1; 11584 } 11585 11586 if ((bp->b_lblkno & (blknomask)) || 11587 (bp->b_bcount & (secmask))) { 11588 if ((un->un_f_rmw_type != 11589 SD_RMW_TYPE_RETURN_ERROR) || 11590 un->un_f_enable_rmw) { 11591 if (un->un_f_pm_is_enabled == FALSE) 11592 index = 11593 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11594 else 11595 index = 11596 SD_CHAIN_INFO_MSS_DISK; 11597 } 11598 } 11599 } 11600 break; 11601 case SD_CHAIN_USCSI: 11602 index = un->un_uscsi_chain_type; 11603 break; 11604 case SD_CHAIN_DIRECT: 11605 index = un->un_direct_chain_type; 11606 break; 11607 case SD_CHAIN_DIRECT_PRIORITY: 11608 index = un->un_priority_chain_type; 11609 break; 11610 default: 11611 /* We're really broken if we ever get here... */ 11612 panic("sd_xbuf_init: illegal chain type!"); 11613 /*NOTREACHED*/ 11614 } 11615 11616 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11617 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11618 11619 /* 11620 * It might be a bit easier to simply bzero the entire xbuf above, 11621 * but it turns out that since we init a fair number of members anyway, 11622 * we save a fair number cycles by doing explicit assignment of zero. 11623 */ 11624 xp->xb_pkt_flags = 0; 11625 xp->xb_dma_resid = 0; 11626 xp->xb_retry_count = 0; 11627 xp->xb_victim_retry_count = 0; 11628 xp->xb_ua_retry_count = 0; 11629 xp->xb_nr_retry_count = 0; 11630 xp->xb_sense_bp = NULL; 11631 xp->xb_sense_status = 0; 11632 xp->xb_sense_state = 0; 11633 xp->xb_sense_resid = 0; 11634 xp->xb_ena = 0; 11635 11636 bp->b_private = xp; 11637 bp->b_flags &= ~(B_DONE | B_ERROR); 11638 bp->b_resid = 0; 11639 bp->av_forw = NULL; 11640 bp->av_back = NULL; 11641 bioerror(bp, 0); 11642 11643 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11644 } 11645 11646 11647 /* 11648 * Function: sd_uscsi_strategy 11649 * 11650 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11651 * 11652 * Arguments: bp - buf struct ptr 11653 * 11654 * Return Code: Always returns 0 11655 * 11656 * Context: Kernel thread context 11657 */ 11658 11659 static int 11660 sd_uscsi_strategy(struct buf *bp) 11661 { 11662 struct sd_lun *un; 11663 struct sd_uscsi_info *uip; 11664 struct sd_xbuf *xp; 11665 uchar_t chain_type; 11666 uchar_t cmd; 11667 11668 ASSERT(bp != NULL); 11669 11670 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11671 if (un == NULL) { 11672 bioerror(bp, EIO); 11673 bp->b_resid = bp->b_bcount; 11674 biodone(bp); 11675 return (0); 11676 } 11677 11678 ASSERT(!mutex_owned(SD_MUTEX(un))); 11679 11680 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11681 11682 /* 11683 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11684 */ 11685 ASSERT(bp->b_private != NULL); 11686 uip = (struct sd_uscsi_info *)bp->b_private; 11687 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11688 11689 mutex_enter(SD_MUTEX(un)); 11690 /* 11691 * atapi: Since we are running the CD for now in PIO mode we need to 11692 * call bp_mapin here to avoid bp_mapin called interrupt context under 11693 * the HBA's init_pkt routine. 11694 */ 11695 if (un->un_f_cfg_is_atapi == TRUE) { 11696 mutex_exit(SD_MUTEX(un)); 11697 bp_mapin(bp); 11698 mutex_enter(SD_MUTEX(un)); 11699 } 11700 un->un_ncmds_in_driver++; 11701 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11702 un->un_ncmds_in_driver); 11703 11704 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11705 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11706 un->un_f_sync_cache_required = TRUE; 11707 11708 mutex_exit(SD_MUTEX(un)); 11709 11710 switch (uip->ui_flags) { 11711 case SD_PATH_DIRECT: 11712 chain_type = SD_CHAIN_DIRECT; 11713 break; 11714 case SD_PATH_DIRECT_PRIORITY: 11715 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11716 break; 11717 default: 11718 chain_type = SD_CHAIN_USCSI; 11719 break; 11720 } 11721 11722 /* 11723 * We may allocate extra buf for external USCSI commands. If the 11724 * application asks for bigger than 20-byte sense data via USCSI, 11725 * SCSA layer will allocate 252 bytes sense buf for that command. 11726 */ 11727 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11728 SENSE_LENGTH) { 11729 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11730 MAX_SENSE_LENGTH, KM_SLEEP); 11731 } else { 11732 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11733 } 11734 11735 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11736 11737 /* Use the index obtained within xbuf_init */ 11738 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11739 11740 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11741 11742 return (0); 11743 } 11744 11745 /* 11746 * Function: sd_send_scsi_cmd 11747 * 11748 * Description: Runs a USCSI command for user (when called thru sdioctl), 11749 * or for the driver 11750 * 11751 * Arguments: dev - the dev_t for the device 11752 * incmd - ptr to a valid uscsi_cmd struct 11753 * flag - bit flag, indicating open settings, 32/64 bit type 11754 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11755 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11756 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11757 * to use the USCSI "direct" chain and bypass the normal 11758 * command waitq. 11759 * 11760 * Return Code: 0 - successful completion of the given command 11761 * EIO - scsi_uscsi_handle_command() failed 11762 * ENXIO - soft state not found for specified dev 11763 * EINVAL 11764 * EFAULT - copyin/copyout error 11765 * return code of scsi_uscsi_handle_command(): 11766 * EIO 11767 * ENXIO 11768 * EACCES 11769 * 11770 * Context: Waits for command to complete. Can sleep. 11771 */ 11772 11773 static int 11774 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11775 enum uio_seg dataspace, int path_flag) 11776 { 11777 struct sd_lun *un; 11778 sd_ssc_t *ssc; 11779 int rval; 11780 11781 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11782 if (un == NULL) { 11783 return (ENXIO); 11784 } 11785 11786 /* 11787 * Using sd_ssc_send to handle uscsi cmd 11788 */ 11789 ssc = sd_ssc_init(un); 11790 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11791 sd_ssc_fini(ssc); 11792 11793 return (rval); 11794 } 11795 11796 /* 11797 * Function: sd_ssc_init 11798 * 11799 * Description: Uscsi end-user call this function to initialize necessary 11800 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11801 * 11802 * The return value of sd_send_scsi_cmd will be treated as a 11803 * fault in various conditions. Even it is not Zero, some 11804 * callers may ignore the return value. That is to say, we can 11805 * not make an accurate assessment in sdintr, since if a 11806 * command is failed in sdintr it does not mean the caller of 11807 * sd_send_scsi_cmd will treat it as a real failure. 11808 * 11809 * To avoid printing too many error logs for a failed uscsi 11810 * packet that the caller may not treat it as a failure, the 11811 * sd will keep silent for handling all uscsi commands. 11812 * 11813 * During detach->attach and attach-open, for some types of 11814 * problems, the driver should be providing information about 11815 * the problem encountered. Device use USCSI_SILENT, which 11816 * suppresses all driver information. The result is that no 11817 * information about the problem is available. Being 11818 * completely silent during this time is inappropriate. The 11819 * driver needs a more selective filter than USCSI_SILENT, so 11820 * that information related to faults is provided. 11821 * 11822 * To make the accurate accessment, the caller of 11823 * sd_send_scsi_USCSI_CMD should take the ownership and 11824 * get necessary information to print error messages. 11825 * 11826 * If we want to print necessary info of uscsi command, we need to 11827 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11828 * assessment. We use sd_ssc_init to alloc necessary 11829 * structs for sending an uscsi command and we are also 11830 * responsible for free the memory by calling 11831 * sd_ssc_fini. 11832 * 11833 * The calling secquences will look like: 11834 * sd_ssc_init-> 11835 * 11836 * ... 11837 * 11838 * sd_send_scsi_USCSI_CMD-> 11839 * sd_ssc_send-> - - - sdintr 11840 * ... 11841 * 11842 * if we think the return value should be treated as a 11843 * failure, we make the accessment here and print out 11844 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11845 * 11846 * ... 11847 * 11848 * sd_ssc_fini 11849 * 11850 * 11851 * Arguments: un - pointer to driver soft state (unit) structure for this 11852 * target. 11853 * 11854 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11855 * uscsi_cmd and sd_uscsi_info. 11856 * NULL - if can not alloc memory for sd_ssc_t struct 11857 * 11858 * Context: Kernel Thread. 11859 */ 11860 static sd_ssc_t * 11861 sd_ssc_init(struct sd_lun *un) 11862 { 11863 sd_ssc_t *ssc; 11864 struct uscsi_cmd *ucmdp; 11865 struct sd_uscsi_info *uip; 11866 11867 ASSERT(un != NULL); 11868 ASSERT(!mutex_owned(SD_MUTEX(un))); 11869 11870 /* 11871 * Allocate sd_ssc_t structure 11872 */ 11873 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11874 11875 /* 11876 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11877 */ 11878 ucmdp = scsi_uscsi_alloc(); 11879 11880 /* 11881 * Allocate sd_uscsi_info structure 11882 */ 11883 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11884 11885 ssc->ssc_uscsi_cmd = ucmdp; 11886 ssc->ssc_uscsi_info = uip; 11887 ssc->ssc_un = un; 11888 11889 return (ssc); 11890 } 11891 11892 /* 11893 * Function: sd_ssc_fini 11894 * 11895 * Description: To free sd_ssc_t and it's hanging off 11896 * 11897 * Arguments: ssc - struct pointer of sd_ssc_t. 11898 */ 11899 static void 11900 sd_ssc_fini(sd_ssc_t *ssc) 11901 { 11902 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11903 11904 if (ssc->ssc_uscsi_info != NULL) { 11905 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11906 ssc->ssc_uscsi_info = NULL; 11907 } 11908 11909 kmem_free(ssc, sizeof (sd_ssc_t)); 11910 ssc = NULL; 11911 } 11912 11913 /* 11914 * Function: sd_ssc_send 11915 * 11916 * Description: Runs a USCSI command for user when called through sdioctl, 11917 * or for the driver. 11918 * 11919 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11920 * sd_uscsi_info in. 11921 * incmd - ptr to a valid uscsi_cmd struct 11922 * flag - bit flag, indicating open settings, 32/64 bit type 11923 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11924 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11925 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11926 * to use the USCSI "direct" chain and bypass the normal 11927 * command waitq. 11928 * 11929 * Return Code: 0 - successful completion of the given command 11930 * EIO - scsi_uscsi_handle_command() failed 11931 * ENXIO - soft state not found for specified dev 11932 * ECANCELED - command cancelled due to low power 11933 * EINVAL 11934 * EFAULT - copyin/copyout error 11935 * return code of scsi_uscsi_handle_command(): 11936 * EIO 11937 * ENXIO 11938 * EACCES 11939 * 11940 * Context: Kernel Thread; 11941 * Waits for command to complete. Can sleep. 11942 */ 11943 static int 11944 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11945 enum uio_seg dataspace, int path_flag) 11946 { 11947 struct sd_uscsi_info *uip; 11948 struct uscsi_cmd *uscmd; 11949 struct sd_lun *un; 11950 dev_t dev; 11951 11952 int format = 0; 11953 int rval; 11954 11955 ASSERT(ssc != NULL); 11956 un = ssc->ssc_un; 11957 ASSERT(un != NULL); 11958 uscmd = ssc->ssc_uscsi_cmd; 11959 ASSERT(uscmd != NULL); 11960 ASSERT(!mutex_owned(SD_MUTEX(un))); 11961 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11962 /* 11963 * If enter here, it indicates that the previous uscsi 11964 * command has not been processed by sd_ssc_assessment. 11965 * This is violating our rules of FMA telemetry processing. 11966 * We should print out this message and the last undisposed 11967 * uscsi command. 11968 */ 11969 if (uscmd->uscsi_cdb != NULL) { 11970 SD_INFO(SD_LOG_SDTEST, un, 11971 "sd_ssc_send is missing the alternative " 11972 "sd_ssc_assessment when running command 0x%x.\n", 11973 uscmd->uscsi_cdb[0]); 11974 } 11975 /* 11976 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11977 * the initial status. 11978 */ 11979 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11980 } 11981 11982 /* 11983 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11984 * followed to avoid missing FMA telemetries. 11985 */ 11986 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11987 11988 /* 11989 * if USCSI_PMFAILFAST is set and un is in low power, fail the 11990 * command immediately. 11991 */ 11992 mutex_enter(SD_MUTEX(un)); 11993 mutex_enter(&un->un_pm_mutex); 11994 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && 11995 SD_DEVICE_IS_IN_LOW_POWER(un)) { 11996 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" 11997 "un:0x%p is in low power\n", un); 11998 mutex_exit(&un->un_pm_mutex); 11999 mutex_exit(SD_MUTEX(un)); 12000 return (ECANCELED); 12001 } 12002 mutex_exit(&un->un_pm_mutex); 12003 mutex_exit(SD_MUTEX(un)); 12004 12005 #ifdef SDDEBUG 12006 switch (dataspace) { 12007 case UIO_USERSPACE: 12008 SD_TRACE(SD_LOG_IO, un, 12009 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 12010 break; 12011 case UIO_SYSSPACE: 12012 SD_TRACE(SD_LOG_IO, un, 12013 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 12014 break; 12015 default: 12016 SD_TRACE(SD_LOG_IO, un, 12017 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 12018 break; 12019 } 12020 #endif 12021 12022 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 12023 SD_ADDRESS(un), &uscmd); 12024 if (rval != 0) { 12025 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 12026 "scsi_uscsi_alloc_and_copyin failed\n", un); 12027 return (rval); 12028 } 12029 12030 if ((uscmd->uscsi_cdb != NULL) && 12031 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 12032 mutex_enter(SD_MUTEX(un)); 12033 un->un_f_format_in_progress = TRUE; 12034 mutex_exit(SD_MUTEX(un)); 12035 format = 1; 12036 } 12037 12038 /* 12039 * Allocate an sd_uscsi_info struct and fill it with the info 12040 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 12041 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 12042 * since we allocate the buf here in this function, we do not 12043 * need to preserve the prior contents of b_private. 12044 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 12045 */ 12046 uip = ssc->ssc_uscsi_info; 12047 uip->ui_flags = path_flag; 12048 uip->ui_cmdp = uscmd; 12049 12050 /* 12051 * Commands sent with priority are intended for error recovery 12052 * situations, and do not have retries performed. 12053 */ 12054 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 12055 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 12056 } 12057 uscmd->uscsi_flags &= ~USCSI_NOINTR; 12058 12059 dev = SD_GET_DEV(un); 12060 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 12061 sd_uscsi_strategy, NULL, uip); 12062 12063 /* 12064 * mark ssc_flags right after handle_cmd to make sure 12065 * the uscsi has been sent 12066 */ 12067 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 12068 12069 #ifdef SDDEBUG 12070 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12071 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 12072 uscmd->uscsi_status, uscmd->uscsi_resid); 12073 if (uscmd->uscsi_bufaddr != NULL) { 12074 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12075 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 12076 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 12077 if (dataspace == UIO_SYSSPACE) { 12078 SD_DUMP_MEMORY(un, SD_LOG_IO, 12079 "data", (uchar_t *)uscmd->uscsi_bufaddr, 12080 uscmd->uscsi_buflen, SD_LOG_HEX); 12081 } 12082 } 12083 #endif 12084 12085 if (format == 1) { 12086 mutex_enter(SD_MUTEX(un)); 12087 un->un_f_format_in_progress = FALSE; 12088 mutex_exit(SD_MUTEX(un)); 12089 } 12090 12091 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 12092 12093 return (rval); 12094 } 12095 12096 /* 12097 * Function: sd_ssc_print 12098 * 12099 * Description: Print information available to the console. 12100 * 12101 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12102 * sd_uscsi_info in. 12103 * sd_severity - log level. 12104 * Context: Kernel thread or interrupt context. 12105 */ 12106 static void 12107 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 12108 { 12109 struct uscsi_cmd *ucmdp; 12110 struct scsi_device *devp; 12111 dev_info_t *devinfo; 12112 uchar_t *sensep; 12113 int senlen; 12114 union scsi_cdb *cdbp; 12115 uchar_t com; 12116 extern struct scsi_key_strings scsi_cmds[]; 12117 12118 ASSERT(ssc != NULL); 12119 ASSERT(ssc->ssc_un != NULL); 12120 12121 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 12122 return; 12123 ucmdp = ssc->ssc_uscsi_cmd; 12124 devp = SD_SCSI_DEVP(ssc->ssc_un); 12125 devinfo = SD_DEVINFO(ssc->ssc_un); 12126 ASSERT(ucmdp != NULL); 12127 ASSERT(devp != NULL); 12128 ASSERT(devinfo != NULL); 12129 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 12130 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 12131 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 12132 12133 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 12134 if (cdbp == NULL) 12135 return; 12136 /* We don't print log if no sense data available. */ 12137 if (senlen == 0) 12138 sensep = NULL; 12139 com = cdbp->scc_cmd; 12140 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 12141 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 12142 } 12143 12144 /* 12145 * Function: sd_ssc_assessment 12146 * 12147 * Description: We use this function to make an assessment at the point 12148 * where SD driver may encounter a potential error. 12149 * 12150 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12151 * sd_uscsi_info in. 12152 * tp_assess - a hint of strategy for ereport posting. 12153 * Possible values of tp_assess include: 12154 * SD_FMT_IGNORE - we don't post any ereport because we're 12155 * sure that it is ok to ignore the underlying problems. 12156 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 12157 * but it might be not correct to ignore the underlying hardware 12158 * error. 12159 * SD_FMT_STATUS_CHECK - we will post an ereport with the 12160 * payload driver-assessment of value "fail" or 12161 * "fatal"(depending on what information we have here). This 12162 * assessment value is usually set when SD driver think there 12163 * is a potential error occurred(Typically, when return value 12164 * of the SCSI command is EIO). 12165 * SD_FMT_STANDARD - we will post an ereport with the payload 12166 * driver-assessment of value "info". This assessment value is 12167 * set when the SCSI command returned successfully and with 12168 * sense data sent back. 12169 * 12170 * Context: Kernel thread. 12171 */ 12172 static void 12173 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 12174 { 12175 int senlen = 0; 12176 struct uscsi_cmd *ucmdp = NULL; 12177 struct sd_lun *un; 12178 12179 ASSERT(ssc != NULL); 12180 un = ssc->ssc_un; 12181 ASSERT(un != NULL); 12182 ucmdp = ssc->ssc_uscsi_cmd; 12183 ASSERT(ucmdp != NULL); 12184 12185 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12186 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 12187 } else { 12188 /* 12189 * If enter here, it indicates that we have a wrong 12190 * calling sequence of sd_ssc_send and sd_ssc_assessment, 12191 * both of which should be called in a pair in case of 12192 * loss of FMA telemetries. 12193 */ 12194 if (ucmdp->uscsi_cdb != NULL) { 12195 SD_INFO(SD_LOG_SDTEST, un, 12196 "sd_ssc_assessment is missing the " 12197 "alternative sd_ssc_send when running 0x%x, " 12198 "or there are superfluous sd_ssc_assessment for " 12199 "the same sd_ssc_send.\n", 12200 ucmdp->uscsi_cdb[0]); 12201 } 12202 /* 12203 * Set the ssc_flags to the initial value to avoid passing 12204 * down dirty flags to the following sd_ssc_send function. 12205 */ 12206 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12207 return; 12208 } 12209 12210 /* 12211 * Only handle an issued command which is waiting for assessment. 12212 * A command which is not issued will not have 12213 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 12214 */ 12215 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 12216 sd_ssc_print(ssc, SCSI_ERR_INFO); 12217 return; 12218 } else { 12219 /* 12220 * For an issued command, we should clear this flag in 12221 * order to make the sd_ssc_t structure be used off 12222 * multiple uscsi commands. 12223 */ 12224 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12225 } 12226 12227 /* 12228 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12229 * commands here. And we should clear the ssc_flags before return. 12230 */ 12231 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12232 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12233 return; 12234 } 12235 12236 switch (tp_assess) { 12237 case SD_FMT_IGNORE: 12238 case SD_FMT_IGNORE_COMPROMISE: 12239 break; 12240 case SD_FMT_STATUS_CHECK: 12241 /* 12242 * For a failed command(including the succeeded command 12243 * with invalid data sent back). 12244 */ 12245 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12246 break; 12247 case SD_FMT_STANDARD: 12248 /* 12249 * Always for the succeeded commands probably with sense 12250 * data sent back. 12251 * Limitation: 12252 * We can only handle a succeeded command with sense 12253 * data sent back when auto-request-sense is enabled. 12254 */ 12255 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12256 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12257 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12258 (un->un_f_arq_enabled == TRUE) && 12259 senlen > 0 && 12260 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12261 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12262 } 12263 break; 12264 default: 12265 /* 12266 * Should not have other type of assessment. 12267 */ 12268 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12269 "sd_ssc_assessment got wrong " 12270 "sd_type_assessment %d.\n", tp_assess); 12271 break; 12272 } 12273 /* 12274 * Clear up the ssc_flags before return. 12275 */ 12276 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12277 } 12278 12279 /* 12280 * Function: sd_ssc_post 12281 * 12282 * Description: 1. read the driver property to get fm-scsi-log flag. 12283 * 2. print log if fm_log_capable is non-zero. 12284 * 3. call sd_ssc_ereport_post to post ereport if possible. 12285 * 12286 * Context: May be called from kernel thread or interrupt context. 12287 */ 12288 static void 12289 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12290 { 12291 struct sd_lun *un; 12292 int sd_severity; 12293 12294 ASSERT(ssc != NULL); 12295 un = ssc->ssc_un; 12296 ASSERT(un != NULL); 12297 12298 /* 12299 * We may enter here from sd_ssc_assessment(for USCSI command) or 12300 * by directly called from sdintr context. 12301 * We don't handle a non-disk drive(CD-ROM, removable media). 12302 * Clear the ssc_flags before return in case we've set 12303 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12304 * driver. 12305 */ 12306 if (ISCD(un) || un->un_f_has_removable_media) { 12307 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12308 return; 12309 } 12310 12311 switch (sd_assess) { 12312 case SD_FM_DRV_FATAL: 12313 sd_severity = SCSI_ERR_FATAL; 12314 break; 12315 case SD_FM_DRV_RECOVERY: 12316 sd_severity = SCSI_ERR_RECOVERED; 12317 break; 12318 case SD_FM_DRV_RETRY: 12319 sd_severity = SCSI_ERR_RETRYABLE; 12320 break; 12321 case SD_FM_DRV_NOTICE: 12322 sd_severity = SCSI_ERR_INFO; 12323 break; 12324 default: 12325 sd_severity = SCSI_ERR_UNKNOWN; 12326 } 12327 /* print log */ 12328 sd_ssc_print(ssc, sd_severity); 12329 12330 /* always post ereport */ 12331 sd_ssc_ereport_post(ssc, sd_assess); 12332 } 12333 12334 /* 12335 * Function: sd_ssc_set_info 12336 * 12337 * Description: Mark ssc_flags and set ssc_info which would be the 12338 * payload of uderr ereport. This function will cause 12339 * sd_ssc_ereport_post to post uderr ereport only. 12340 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12341 * the function will also call SD_ERROR or scsi_log for a 12342 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12343 * 12344 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12345 * sd_uscsi_info in. 12346 * ssc_flags - indicate the sub-category of a uderr. 12347 * comp - this argument is meaningful only when 12348 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12349 * values include: 12350 * > 0, SD_ERROR is used with comp as the driver logging 12351 * component; 12352 * = 0, scsi-log is used to log error telemetries; 12353 * < 0, no log available for this telemetry. 12354 * 12355 * Context: Kernel thread or interrupt context 12356 */ 12357 static void 12358 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12359 { 12360 va_list ap; 12361 12362 ASSERT(ssc != NULL); 12363 ASSERT(ssc->ssc_un != NULL); 12364 12365 ssc->ssc_flags |= ssc_flags; 12366 va_start(ap, fmt); 12367 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12368 va_end(ap); 12369 12370 /* 12371 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12372 * with invalid data sent back. For non-uscsi command, the 12373 * following code will be bypassed. 12374 */ 12375 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12376 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12377 /* 12378 * If the error belong to certain component and we 12379 * do not want it to show up on the console, we 12380 * will use SD_ERROR, otherwise scsi_log is 12381 * preferred. 12382 */ 12383 if (comp > 0) { 12384 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12385 } else if (comp == 0) { 12386 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12387 CE_WARN, ssc->ssc_info); 12388 } 12389 } 12390 } 12391 } 12392 12393 /* 12394 * Function: sd_buf_iodone 12395 * 12396 * Description: Frees the sd_xbuf & returns the buf to its originator. 12397 * 12398 * Context: May be called from interrupt context. 12399 */ 12400 /* ARGSUSED */ 12401 static void 12402 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12403 { 12404 struct sd_xbuf *xp; 12405 12406 ASSERT(un != NULL); 12407 ASSERT(bp != NULL); 12408 ASSERT(!mutex_owned(SD_MUTEX(un))); 12409 12410 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12411 12412 xp = SD_GET_XBUF(bp); 12413 ASSERT(xp != NULL); 12414 12415 /* xbuf is gone after this */ 12416 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12417 mutex_enter(SD_MUTEX(un)); 12418 12419 /* 12420 * Grab time when the cmd completed. 12421 * This is used for determining if the system has been 12422 * idle long enough to make it idle to the PM framework. 12423 * This is for lowering the overhead, and therefore improving 12424 * performance per I/O operation. 12425 */ 12426 un->un_pm_idle_time = ddi_get_time(); 12427 12428 un->un_ncmds_in_driver--; 12429 ASSERT(un->un_ncmds_in_driver >= 0); 12430 SD_INFO(SD_LOG_IO, un, 12431 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12432 un->un_ncmds_in_driver); 12433 12434 mutex_exit(SD_MUTEX(un)); 12435 } 12436 12437 biodone(bp); /* bp is gone after this */ 12438 12439 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12440 } 12441 12442 12443 /* 12444 * Function: sd_uscsi_iodone 12445 * 12446 * Description: Frees the sd_xbuf & returns the buf to its originator. 12447 * 12448 * Context: May be called from interrupt context. 12449 */ 12450 /* ARGSUSED */ 12451 static void 12452 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12453 { 12454 struct sd_xbuf *xp; 12455 12456 ASSERT(un != NULL); 12457 ASSERT(bp != NULL); 12458 12459 xp = SD_GET_XBUF(bp); 12460 ASSERT(xp != NULL); 12461 ASSERT(!mutex_owned(SD_MUTEX(un))); 12462 12463 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12464 12465 bp->b_private = xp->xb_private; 12466 12467 mutex_enter(SD_MUTEX(un)); 12468 12469 /* 12470 * Grab time when the cmd completed. 12471 * This is used for determining if the system has been 12472 * idle long enough to make it idle to the PM framework. 12473 * This is for lowering the overhead, and therefore improving 12474 * performance per I/O operation. 12475 */ 12476 un->un_pm_idle_time = ddi_get_time(); 12477 12478 un->un_ncmds_in_driver--; 12479 ASSERT(un->un_ncmds_in_driver >= 0); 12480 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12481 un->un_ncmds_in_driver); 12482 12483 mutex_exit(SD_MUTEX(un)); 12484 12485 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12486 SENSE_LENGTH) { 12487 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12488 MAX_SENSE_LENGTH); 12489 } else { 12490 kmem_free(xp, sizeof (struct sd_xbuf)); 12491 } 12492 12493 biodone(bp); 12494 12495 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12496 } 12497 12498 12499 /* 12500 * Function: sd_mapblockaddr_iostart 12501 * 12502 * Description: Verify request lies within the partition limits for 12503 * the indicated minor device. Issue "overrun" buf if 12504 * request would exceed partition range. Converts 12505 * partition-relative block address to absolute. 12506 * 12507 * Upon exit of this function: 12508 * 1.I/O is aligned 12509 * xp->xb_blkno represents the absolute sector address 12510 * 2.I/O is misaligned 12511 * xp->xb_blkno represents the absolute logical block address 12512 * based on DEV_BSIZE. The logical block address will be 12513 * converted to physical sector address in sd_mapblocksize_\ 12514 * iostart. 12515 * 3.I/O is misaligned but is aligned in "overrun" buf 12516 * xp->xb_blkno represents the absolute logical block address 12517 * based on DEV_BSIZE. The logical block address will be 12518 * converted to physical sector address in sd_mapblocksize_\ 12519 * iostart. But no RMW will be issued in this case. 12520 * 12521 * Context: Can sleep 12522 * 12523 * Issues: This follows what the old code did, in terms of accessing 12524 * some of the partition info in the unit struct without holding 12525 * the mutext. This is a general issue, if the partition info 12526 * can be altered while IO is in progress... as soon as we send 12527 * a buf, its partitioning can be invalid before it gets to the 12528 * device. Probably the right fix is to move partitioning out 12529 * of the driver entirely. 12530 */ 12531 12532 static void 12533 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12534 { 12535 diskaddr_t nblocks; /* #blocks in the given partition */ 12536 daddr_t blocknum; /* Block number specified by the buf */ 12537 size_t requested_nblocks; 12538 size_t available_nblocks; 12539 int partition; 12540 diskaddr_t partition_offset; 12541 struct sd_xbuf *xp; 12542 int secmask = 0, blknomask = 0; 12543 ushort_t is_aligned = TRUE; 12544 12545 ASSERT(un != NULL); 12546 ASSERT(bp != NULL); 12547 ASSERT(!mutex_owned(SD_MUTEX(un))); 12548 12549 SD_TRACE(SD_LOG_IO_PARTITION, un, 12550 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12551 12552 xp = SD_GET_XBUF(bp); 12553 ASSERT(xp != NULL); 12554 12555 /* 12556 * If the geometry is not indicated as valid, attempt to access 12557 * the unit & verify the geometry/label. This can be the case for 12558 * removable-media devices, of if the device was opened in 12559 * NDELAY/NONBLOCK mode. 12560 */ 12561 partition = SDPART(bp->b_edev); 12562 12563 if (!SD_IS_VALID_LABEL(un)) { 12564 sd_ssc_t *ssc; 12565 /* 12566 * Initialize sd_ssc_t for internal uscsi commands 12567 * In case of potential porformance issue, we need 12568 * to alloc memory only if there is invalid label 12569 */ 12570 ssc = sd_ssc_init(un); 12571 12572 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12573 /* 12574 * For removable devices it is possible to start an 12575 * I/O without a media by opening the device in nodelay 12576 * mode. Also for writable CDs there can be many 12577 * scenarios where there is no geometry yet but volume 12578 * manager is trying to issue a read() just because 12579 * it can see TOC on the CD. So do not print a message 12580 * for removables. 12581 */ 12582 if (!un->un_f_has_removable_media) { 12583 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12584 "i/o to invalid geometry\n"); 12585 } 12586 bioerror(bp, EIO); 12587 bp->b_resid = bp->b_bcount; 12588 SD_BEGIN_IODONE(index, un, bp); 12589 12590 sd_ssc_fini(ssc); 12591 return; 12592 } 12593 sd_ssc_fini(ssc); 12594 } 12595 12596 nblocks = 0; 12597 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12598 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12599 12600 if (un->un_f_enable_rmw) { 12601 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1; 12602 secmask = un->un_phy_blocksize - 1; 12603 } else { 12604 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12605 secmask = un->un_tgt_blocksize - 1; 12606 } 12607 12608 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12609 is_aligned = FALSE; 12610 } 12611 12612 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) { 12613 /* 12614 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12615 * Convert the logical block number to target's physical sector 12616 * number. 12617 */ 12618 if (is_aligned) { 12619 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12620 } else { 12621 switch (un->un_f_rmw_type) { 12622 case SD_RMW_TYPE_RETURN_ERROR: 12623 if (un->un_f_enable_rmw) 12624 break; 12625 else { 12626 bp->b_flags |= B_ERROR; 12627 goto error_exit; 12628 } 12629 12630 case SD_RMW_TYPE_DEFAULT: 12631 mutex_enter(SD_MUTEX(un)); 12632 if (!un->un_f_enable_rmw && 12633 un->un_rmw_msg_timeid == NULL) { 12634 scsi_log(SD_DEVINFO(un), sd_label, 12635 CE_WARN, "I/O request is not " 12636 "aligned with %d disk sector size. " 12637 "It is handled through Read Modify " 12638 "Write but the performance is " 12639 "very low.\n", 12640 un->un_tgt_blocksize); 12641 un->un_rmw_msg_timeid = 12642 timeout(sd_rmw_msg_print_handler, 12643 un, SD_RMW_MSG_PRINT_TIMEOUT); 12644 } else { 12645 un->un_rmw_incre_count ++; 12646 } 12647 mutex_exit(SD_MUTEX(un)); 12648 break; 12649 12650 case SD_RMW_TYPE_NO_WARNING: 12651 default: 12652 break; 12653 } 12654 12655 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12656 partition_offset = SD_TGT2SYSBLOCK(un, 12657 partition_offset); 12658 } 12659 } 12660 12661 /* 12662 * blocknum is the starting block number of the request. At this 12663 * point it is still relative to the start of the minor device. 12664 */ 12665 blocknum = xp->xb_blkno; 12666 12667 /* 12668 * Legacy: If the starting block number is one past the last block 12669 * in the partition, do not set B_ERROR in the buf. 12670 */ 12671 if (blocknum == nblocks) { 12672 goto error_exit; 12673 } 12674 12675 /* 12676 * Confirm that the first block of the request lies within the 12677 * partition limits. Also the requested number of bytes must be 12678 * a multiple of the system block size. 12679 */ 12680 if ((blocknum < 0) || (blocknum >= nblocks) || 12681 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12682 bp->b_flags |= B_ERROR; 12683 goto error_exit; 12684 } 12685 12686 /* 12687 * If the requsted # blocks exceeds the available # blocks, that 12688 * is an overrun of the partition. 12689 */ 12690 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12691 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12692 } else { 12693 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12694 } 12695 12696 available_nblocks = (size_t)(nblocks - blocknum); 12697 ASSERT(nblocks >= blocknum); 12698 12699 if (requested_nblocks > available_nblocks) { 12700 size_t resid; 12701 12702 /* 12703 * Allocate an "overrun" buf to allow the request to proceed 12704 * for the amount of space available in the partition. The 12705 * amount not transferred will be added into the b_resid 12706 * when the operation is complete. The overrun buf 12707 * replaces the original buf here, and the original buf 12708 * is saved inside the overrun buf, for later use. 12709 */ 12710 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12711 resid = SD_TGTBLOCKS2BYTES(un, 12712 (offset_t)(requested_nblocks - available_nblocks)); 12713 } else { 12714 resid = SD_SYSBLOCKS2BYTES( 12715 (offset_t)(requested_nblocks - available_nblocks)); 12716 } 12717 12718 size_t count = bp->b_bcount - resid; 12719 /* 12720 * Note: count is an unsigned entity thus it'll NEVER 12721 * be less than 0 so ASSERT the original values are 12722 * correct. 12723 */ 12724 ASSERT(bp->b_bcount >= resid); 12725 12726 bp = sd_bioclone_alloc(bp, count, blocknum, 12727 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12728 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12729 ASSERT(xp != NULL); 12730 } 12731 12732 /* At this point there should be no residual for this buf. */ 12733 ASSERT(bp->b_resid == 0); 12734 12735 /* Convert the block number to an absolute address. */ 12736 xp->xb_blkno += partition_offset; 12737 12738 SD_NEXT_IOSTART(index, un, bp); 12739 12740 SD_TRACE(SD_LOG_IO_PARTITION, un, 12741 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12742 12743 return; 12744 12745 error_exit: 12746 bp->b_resid = bp->b_bcount; 12747 SD_BEGIN_IODONE(index, un, bp); 12748 SD_TRACE(SD_LOG_IO_PARTITION, un, 12749 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12750 } 12751 12752 12753 /* 12754 * Function: sd_mapblockaddr_iodone 12755 * 12756 * Description: Completion-side processing for partition management. 12757 * 12758 * Context: May be called under interrupt context 12759 */ 12760 12761 static void 12762 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12763 { 12764 /* int partition; */ /* Not used, see below. */ 12765 ASSERT(un != NULL); 12766 ASSERT(bp != NULL); 12767 ASSERT(!mutex_owned(SD_MUTEX(un))); 12768 12769 SD_TRACE(SD_LOG_IO_PARTITION, un, 12770 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12771 12772 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12773 /* 12774 * We have an "overrun" buf to deal with... 12775 */ 12776 struct sd_xbuf *xp; 12777 struct buf *obp; /* ptr to the original buf */ 12778 12779 xp = SD_GET_XBUF(bp); 12780 ASSERT(xp != NULL); 12781 12782 /* Retrieve the pointer to the original buf */ 12783 obp = (struct buf *)xp->xb_private; 12784 ASSERT(obp != NULL); 12785 12786 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12787 bioerror(obp, bp->b_error); 12788 12789 sd_bioclone_free(bp); 12790 12791 /* 12792 * Get back the original buf. 12793 * Note that since the restoration of xb_blkno below 12794 * was removed, the sd_xbuf is not needed. 12795 */ 12796 bp = obp; 12797 /* 12798 * xp = SD_GET_XBUF(bp); 12799 * ASSERT(xp != NULL); 12800 */ 12801 } 12802 12803 /* 12804 * Convert sd->xb_blkno back to a minor-device relative value. 12805 * Note: this has been commented out, as it is not needed in the 12806 * current implementation of the driver (ie, since this function 12807 * is at the top of the layering chains, so the info will be 12808 * discarded) and it is in the "hot" IO path. 12809 * 12810 * partition = getminor(bp->b_edev) & SDPART_MASK; 12811 * xp->xb_blkno -= un->un_offset[partition]; 12812 */ 12813 12814 SD_NEXT_IODONE(index, un, bp); 12815 12816 SD_TRACE(SD_LOG_IO_PARTITION, un, 12817 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12818 } 12819 12820 12821 /* 12822 * Function: sd_mapblocksize_iostart 12823 * 12824 * Description: Convert between system block size (un->un_sys_blocksize) 12825 * and target block size (un->un_tgt_blocksize). 12826 * 12827 * Context: Can sleep to allocate resources. 12828 * 12829 * Assumptions: A higher layer has already performed any partition validation, 12830 * and converted the xp->xb_blkno to an absolute value relative 12831 * to the start of the device. 12832 * 12833 * It is also assumed that the higher layer has implemented 12834 * an "overrun" mechanism for the case where the request would 12835 * read/write beyond the end of a partition. In this case we 12836 * assume (and ASSERT) that bp->b_resid == 0. 12837 * 12838 * Note: The implementation for this routine assumes the target 12839 * block size remains constant between allocation and transport. 12840 */ 12841 12842 static void 12843 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12844 { 12845 struct sd_mapblocksize_info *bsp; 12846 struct sd_xbuf *xp; 12847 offset_t first_byte; 12848 daddr_t start_block, end_block; 12849 daddr_t request_bytes; 12850 ushort_t is_aligned = FALSE; 12851 12852 ASSERT(un != NULL); 12853 ASSERT(bp != NULL); 12854 ASSERT(!mutex_owned(SD_MUTEX(un))); 12855 ASSERT(bp->b_resid == 0); 12856 12857 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12858 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12859 12860 /* 12861 * For a non-writable CD, a write request is an error 12862 */ 12863 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12864 (un->un_f_mmc_writable_media == FALSE)) { 12865 bioerror(bp, EIO); 12866 bp->b_resid = bp->b_bcount; 12867 SD_BEGIN_IODONE(index, un, bp); 12868 return; 12869 } 12870 12871 /* 12872 * We do not need a shadow buf if the device is using 12873 * un->un_sys_blocksize as its block size or if bcount == 0. 12874 * In this case there is no layer-private data block allocated. 12875 */ 12876 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 12877 (bp->b_bcount == 0)) { 12878 goto done; 12879 } 12880 12881 #if defined(__i386) || defined(__amd64) 12882 /* We do not support non-block-aligned transfers for ROD devices */ 12883 ASSERT(!ISROD(un)); 12884 #endif 12885 12886 xp = SD_GET_XBUF(bp); 12887 ASSERT(xp != NULL); 12888 12889 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12890 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12891 un->un_tgt_blocksize, DEV_BSIZE); 12892 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12893 "request start block:0x%x\n", xp->xb_blkno); 12894 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12895 "request len:0x%x\n", bp->b_bcount); 12896 12897 /* 12898 * Allocate the layer-private data area for the mapblocksize layer. 12899 * Layers are allowed to use the xp_private member of the sd_xbuf 12900 * struct to store the pointer to their layer-private data block, but 12901 * each layer also has the responsibility of restoring the prior 12902 * contents of xb_private before returning the buf/xbuf to the 12903 * higher layer that sent it. 12904 * 12905 * Here we save the prior contents of xp->xb_private into the 12906 * bsp->mbs_oprivate field of our layer-private data area. This value 12907 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12908 * the layer-private area and returning the buf/xbuf to the layer 12909 * that sent it. 12910 * 12911 * Note that here we use kmem_zalloc for the allocation as there are 12912 * parts of the mapblocksize code that expect certain fields to be 12913 * zero unless explicitly set to a required value. 12914 */ 12915 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12916 bsp->mbs_oprivate = xp->xb_private; 12917 xp->xb_private = bsp; 12918 12919 /* 12920 * This treats the data on the disk (target) as an array of bytes. 12921 * first_byte is the byte offset, from the beginning of the device, 12922 * to the location of the request. This is converted from a 12923 * un->un_sys_blocksize block address to a byte offset, and then back 12924 * to a block address based upon a un->un_tgt_blocksize block size. 12925 * 12926 * xp->xb_blkno should be absolute upon entry into this function, 12927 * but, but it is based upon partitions that use the "system" 12928 * block size. It must be adjusted to reflect the block size of 12929 * the target. 12930 * 12931 * Note that end_block is actually the block that follows the last 12932 * block of the request, but that's what is needed for the computation. 12933 */ 12934 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12935 if (un->un_f_enable_rmw) { 12936 start_block = xp->xb_blkno = 12937 (first_byte / un->un_phy_blocksize) * 12938 (un->un_phy_blocksize / DEV_BSIZE); 12939 end_block = ((first_byte + bp->b_bcount + 12940 un->un_phy_blocksize - 1) / un->un_phy_blocksize) * 12941 (un->un_phy_blocksize / DEV_BSIZE); 12942 } else { 12943 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12944 end_block = (first_byte + bp->b_bcount + 12945 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 12946 } 12947 12948 /* request_bytes is rounded up to a multiple of the target block size */ 12949 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12950 12951 /* 12952 * See if the starting address of the request and the request 12953 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12954 * then we do not need to allocate a shadow buf to handle the request. 12955 */ 12956 if (un->un_f_enable_rmw) { 12957 if (((first_byte % un->un_phy_blocksize) == 0) && 12958 ((bp->b_bcount % un->un_phy_blocksize) == 0)) { 12959 is_aligned = TRUE; 12960 } 12961 } else { 12962 if (((first_byte % un->un_tgt_blocksize) == 0) && 12963 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12964 is_aligned = TRUE; 12965 } 12966 } 12967 12968 if ((bp->b_flags & B_READ) == 0) { 12969 /* 12970 * Lock the range for a write operation. An aligned request is 12971 * considered a simple write; otherwise the request must be a 12972 * read-modify-write. 12973 */ 12974 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12975 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12976 } 12977 12978 /* 12979 * Alloc a shadow buf if the request is not aligned. Also, this is 12980 * where the READ command is generated for a read-modify-write. (The 12981 * write phase is deferred until after the read completes.) 12982 */ 12983 if (is_aligned == FALSE) { 12984 12985 struct sd_mapblocksize_info *shadow_bsp; 12986 struct sd_xbuf *shadow_xp; 12987 struct buf *shadow_bp; 12988 12989 /* 12990 * Allocate the shadow buf and it associated xbuf. Note that 12991 * after this call the xb_blkno value in both the original 12992 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12993 * same: absolute relative to the start of the device, and 12994 * adjusted for the target block size. The b_blkno in the 12995 * shadow buf will also be set to this value. We should never 12996 * change b_blkno in the original bp however. 12997 * 12998 * Note also that the shadow buf will always need to be a 12999 * READ command, regardless of whether the incoming command 13000 * is a READ or a WRITE. 13001 */ 13002 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 13003 xp->xb_blkno, 13004 (int (*)(struct buf *)) sd_mapblocksize_iodone); 13005 13006 shadow_xp = SD_GET_XBUF(shadow_bp); 13007 13008 /* 13009 * Allocate the layer-private data for the shadow buf. 13010 * (No need to preserve xb_private in the shadow xbuf.) 13011 */ 13012 shadow_xp->xb_private = shadow_bsp = 13013 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 13014 13015 /* 13016 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 13017 * to figure out where the start of the user data is (based upon 13018 * the system block size) in the data returned by the READ 13019 * command (which will be based upon the target blocksize). Note 13020 * that this is only really used if the request is unaligned. 13021 */ 13022 if (un->un_f_enable_rmw) { 13023 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13024 ((offset_t)xp->xb_blkno * un->un_sys_blocksize)); 13025 ASSERT((bsp->mbs_copy_offset >= 0) && 13026 (bsp->mbs_copy_offset < un->un_phy_blocksize)); 13027 } else { 13028 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13029 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 13030 ASSERT((bsp->mbs_copy_offset >= 0) && 13031 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 13032 } 13033 13034 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 13035 13036 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 13037 13038 /* Transfer the wmap (if any) to the shadow buf */ 13039 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 13040 bsp->mbs_wmp = NULL; 13041 13042 /* 13043 * The shadow buf goes on from here in place of the 13044 * original buf. 13045 */ 13046 shadow_bsp->mbs_orig_bp = bp; 13047 bp = shadow_bp; 13048 } 13049 13050 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13051 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 13052 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13053 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 13054 request_bytes); 13055 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13056 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 13057 13058 done: 13059 SD_NEXT_IOSTART(index, un, bp); 13060 13061 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13062 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 13063 } 13064 13065 13066 /* 13067 * Function: sd_mapblocksize_iodone 13068 * 13069 * Description: Completion side processing for block-size mapping. 13070 * 13071 * Context: May be called under interrupt context 13072 */ 13073 13074 static void 13075 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 13076 { 13077 struct sd_mapblocksize_info *bsp; 13078 struct sd_xbuf *xp; 13079 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 13080 struct buf *orig_bp; /* ptr to the original buf */ 13081 offset_t shadow_end; 13082 offset_t request_end; 13083 offset_t shadow_start; 13084 ssize_t copy_offset; 13085 size_t copy_length; 13086 size_t shortfall; 13087 uint_t is_write; /* TRUE if this bp is a WRITE */ 13088 uint_t has_wmap; /* TRUE is this bp has a wmap */ 13089 13090 ASSERT(un != NULL); 13091 ASSERT(bp != NULL); 13092 13093 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13094 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 13095 13096 /* 13097 * There is no shadow buf or layer-private data if the target is 13098 * using un->un_sys_blocksize as its block size or if bcount == 0. 13099 */ 13100 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 13101 (bp->b_bcount == 0)) { 13102 goto exit; 13103 } 13104 13105 xp = SD_GET_XBUF(bp); 13106 ASSERT(xp != NULL); 13107 13108 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 13109 bsp = xp->xb_private; 13110 13111 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 13112 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 13113 13114 if (is_write) { 13115 /* 13116 * For a WRITE request we must free up the block range that 13117 * we have locked up. This holds regardless of whether this is 13118 * an aligned write request or a read-modify-write request. 13119 */ 13120 sd_range_unlock(un, bsp->mbs_wmp); 13121 bsp->mbs_wmp = NULL; 13122 } 13123 13124 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 13125 /* 13126 * An aligned read or write command will have no shadow buf; 13127 * there is not much else to do with it. 13128 */ 13129 goto done; 13130 } 13131 13132 orig_bp = bsp->mbs_orig_bp; 13133 ASSERT(orig_bp != NULL); 13134 orig_xp = SD_GET_XBUF(orig_bp); 13135 ASSERT(orig_xp != NULL); 13136 ASSERT(!mutex_owned(SD_MUTEX(un))); 13137 13138 if (!is_write && has_wmap) { 13139 /* 13140 * A READ with a wmap means this is the READ phase of a 13141 * read-modify-write. If an error occurred on the READ then 13142 * we do not proceed with the WRITE phase or copy any data. 13143 * Just release the write maps and return with an error. 13144 */ 13145 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 13146 orig_bp->b_resid = orig_bp->b_bcount; 13147 bioerror(orig_bp, bp->b_error); 13148 sd_range_unlock(un, bsp->mbs_wmp); 13149 goto freebuf_done; 13150 } 13151 } 13152 13153 /* 13154 * Here is where we set up to copy the data from the shadow buf 13155 * into the space associated with the original buf. 13156 * 13157 * To deal with the conversion between block sizes, these 13158 * computations treat the data as an array of bytes, with the 13159 * first byte (byte 0) corresponding to the first byte in the 13160 * first block on the disk. 13161 */ 13162 13163 /* 13164 * shadow_start and shadow_len indicate the location and size of 13165 * the data returned with the shadow IO request. 13166 */ 13167 if (un->un_f_enable_rmw) { 13168 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 13169 } else { 13170 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 13171 } 13172 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 13173 13174 /* 13175 * copy_offset gives the offset (in bytes) from the start of the first 13176 * block of the READ request to the beginning of the data. We retrieve 13177 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 13178 * there by sd_mapblockize_iostart(). copy_length gives the amount of 13179 * data to be copied (in bytes). 13180 */ 13181 copy_offset = bsp->mbs_copy_offset; 13182 if (un->un_f_enable_rmw) { 13183 ASSERT((copy_offset >= 0) && 13184 (copy_offset < un->un_phy_blocksize)); 13185 } else { 13186 ASSERT((copy_offset >= 0) && 13187 (copy_offset < un->un_tgt_blocksize)); 13188 } 13189 13190 copy_length = orig_bp->b_bcount; 13191 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 13192 13193 /* 13194 * Set up the resid and error fields of orig_bp as appropriate. 13195 */ 13196 if (shadow_end >= request_end) { 13197 /* We got all the requested data; set resid to zero */ 13198 orig_bp->b_resid = 0; 13199 } else { 13200 /* 13201 * We failed to get enough data to fully satisfy the original 13202 * request. Just copy back whatever data we got and set 13203 * up the residual and error code as required. 13204 * 13205 * 'shortfall' is the amount by which the data received with the 13206 * shadow buf has "fallen short" of the requested amount. 13207 */ 13208 shortfall = (size_t)(request_end - shadow_end); 13209 13210 if (shortfall > orig_bp->b_bcount) { 13211 /* 13212 * We did not get enough data to even partially 13213 * fulfill the original request. The residual is 13214 * equal to the amount requested. 13215 */ 13216 orig_bp->b_resid = orig_bp->b_bcount; 13217 } else { 13218 /* 13219 * We did not get all the data that we requested 13220 * from the device, but we will try to return what 13221 * portion we did get. 13222 */ 13223 orig_bp->b_resid = shortfall; 13224 } 13225 ASSERT(copy_length >= orig_bp->b_resid); 13226 copy_length -= orig_bp->b_resid; 13227 } 13228 13229 /* Propagate the error code from the shadow buf to the original buf */ 13230 bioerror(orig_bp, bp->b_error); 13231 13232 if (is_write) { 13233 goto freebuf_done; /* No data copying for a WRITE */ 13234 } 13235 13236 if (has_wmap) { 13237 /* 13238 * This is a READ command from the READ phase of a 13239 * read-modify-write request. We have to copy the data given 13240 * by the user OVER the data returned by the READ command, 13241 * then convert the command from a READ to a WRITE and send 13242 * it back to the target. 13243 */ 13244 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 13245 copy_length); 13246 13247 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 13248 13249 /* 13250 * Dispatch the WRITE command to the taskq thread, which 13251 * will in turn send the command to the target. When the 13252 * WRITE command completes, we (sd_mapblocksize_iodone()) 13253 * will get called again as part of the iodone chain 13254 * processing for it. Note that we will still be dealing 13255 * with the shadow buf at that point. 13256 */ 13257 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 13258 KM_NOSLEEP) != 0) { 13259 /* 13260 * Dispatch was successful so we are done. Return 13261 * without going any higher up the iodone chain. Do 13262 * not free up any layer-private data until after the 13263 * WRITE completes. 13264 */ 13265 return; 13266 } 13267 13268 /* 13269 * Dispatch of the WRITE command failed; set up the error 13270 * condition and send this IO back up the iodone chain. 13271 */ 13272 bioerror(orig_bp, EIO); 13273 orig_bp->b_resid = orig_bp->b_bcount; 13274 13275 } else { 13276 /* 13277 * This is a regular READ request (ie, not a RMW). Copy the 13278 * data from the shadow buf into the original buf. The 13279 * copy_offset compensates for any "misalignment" between the 13280 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13281 * original buf (with its un->un_sys_blocksize blocks). 13282 */ 13283 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13284 copy_length); 13285 } 13286 13287 freebuf_done: 13288 13289 /* 13290 * At this point we still have both the shadow buf AND the original 13291 * buf to deal with, as well as the layer-private data area in each. 13292 * Local variables are as follows: 13293 * 13294 * bp -- points to shadow buf 13295 * xp -- points to xbuf of shadow buf 13296 * bsp -- points to layer-private data area of shadow buf 13297 * orig_bp -- points to original buf 13298 * 13299 * First free the shadow buf and its associated xbuf, then free the 13300 * layer-private data area from the shadow buf. There is no need to 13301 * restore xb_private in the shadow xbuf. 13302 */ 13303 sd_shadow_buf_free(bp); 13304 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13305 13306 /* 13307 * Now update the local variables to point to the original buf, xbuf, 13308 * and layer-private area. 13309 */ 13310 bp = orig_bp; 13311 xp = SD_GET_XBUF(bp); 13312 ASSERT(xp != NULL); 13313 ASSERT(xp == orig_xp); 13314 bsp = xp->xb_private; 13315 ASSERT(bsp != NULL); 13316 13317 done: 13318 /* 13319 * Restore xb_private to whatever it was set to by the next higher 13320 * layer in the chain, then free the layer-private data area. 13321 */ 13322 xp->xb_private = bsp->mbs_oprivate; 13323 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13324 13325 exit: 13326 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13327 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13328 13329 SD_NEXT_IODONE(index, un, bp); 13330 } 13331 13332 13333 /* 13334 * Function: sd_checksum_iostart 13335 * 13336 * Description: A stub function for a layer that's currently not used. 13337 * For now just a placeholder. 13338 * 13339 * Context: Kernel thread context 13340 */ 13341 13342 static void 13343 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13344 { 13345 ASSERT(un != NULL); 13346 ASSERT(bp != NULL); 13347 ASSERT(!mutex_owned(SD_MUTEX(un))); 13348 SD_NEXT_IOSTART(index, un, bp); 13349 } 13350 13351 13352 /* 13353 * Function: sd_checksum_iodone 13354 * 13355 * Description: A stub function for a layer that's currently not used. 13356 * For now just a placeholder. 13357 * 13358 * Context: May be called under interrupt context 13359 */ 13360 13361 static void 13362 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13363 { 13364 ASSERT(un != NULL); 13365 ASSERT(bp != NULL); 13366 ASSERT(!mutex_owned(SD_MUTEX(un))); 13367 SD_NEXT_IODONE(index, un, bp); 13368 } 13369 13370 13371 /* 13372 * Function: sd_checksum_uscsi_iostart 13373 * 13374 * Description: A stub function for a layer that's currently not used. 13375 * For now just a placeholder. 13376 * 13377 * Context: Kernel thread context 13378 */ 13379 13380 static void 13381 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13382 { 13383 ASSERT(un != NULL); 13384 ASSERT(bp != NULL); 13385 ASSERT(!mutex_owned(SD_MUTEX(un))); 13386 SD_NEXT_IOSTART(index, un, bp); 13387 } 13388 13389 13390 /* 13391 * Function: sd_checksum_uscsi_iodone 13392 * 13393 * Description: A stub function for a layer that's currently not used. 13394 * For now just a placeholder. 13395 * 13396 * Context: May be called under interrupt context 13397 */ 13398 13399 static void 13400 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13401 { 13402 ASSERT(un != NULL); 13403 ASSERT(bp != NULL); 13404 ASSERT(!mutex_owned(SD_MUTEX(un))); 13405 SD_NEXT_IODONE(index, un, bp); 13406 } 13407 13408 13409 /* 13410 * Function: sd_pm_iostart 13411 * 13412 * Description: iostart-side routine for Power mangement. 13413 * 13414 * Context: Kernel thread context 13415 */ 13416 13417 static void 13418 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13419 { 13420 ASSERT(un != NULL); 13421 ASSERT(bp != NULL); 13422 ASSERT(!mutex_owned(SD_MUTEX(un))); 13423 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13424 13425 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13426 13427 if (sd_pm_entry(un) != DDI_SUCCESS) { 13428 /* 13429 * Set up to return the failed buf back up the 'iodone' 13430 * side of the calling chain. 13431 */ 13432 bioerror(bp, EIO); 13433 bp->b_resid = bp->b_bcount; 13434 13435 SD_BEGIN_IODONE(index, un, bp); 13436 13437 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13438 return; 13439 } 13440 13441 SD_NEXT_IOSTART(index, un, bp); 13442 13443 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13444 } 13445 13446 13447 /* 13448 * Function: sd_pm_iodone 13449 * 13450 * Description: iodone-side routine for power mangement. 13451 * 13452 * Context: may be called from interrupt context 13453 */ 13454 13455 static void 13456 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13457 { 13458 ASSERT(un != NULL); 13459 ASSERT(bp != NULL); 13460 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13461 13462 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13463 13464 /* 13465 * After attach the following flag is only read, so don't 13466 * take the penalty of acquiring a mutex for it. 13467 */ 13468 if (un->un_f_pm_is_enabled == TRUE) { 13469 sd_pm_exit(un); 13470 } 13471 13472 SD_NEXT_IODONE(index, un, bp); 13473 13474 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13475 } 13476 13477 13478 /* 13479 * Function: sd_core_iostart 13480 * 13481 * Description: Primary driver function for enqueuing buf(9S) structs from 13482 * the system and initiating IO to the target device 13483 * 13484 * Context: Kernel thread context. Can sleep. 13485 * 13486 * Assumptions: - The given xp->xb_blkno is absolute 13487 * (ie, relative to the start of the device). 13488 * - The IO is to be done using the native blocksize of 13489 * the device, as specified in un->un_tgt_blocksize. 13490 */ 13491 /* ARGSUSED */ 13492 static void 13493 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13494 { 13495 struct sd_xbuf *xp; 13496 13497 ASSERT(un != NULL); 13498 ASSERT(bp != NULL); 13499 ASSERT(!mutex_owned(SD_MUTEX(un))); 13500 ASSERT(bp->b_resid == 0); 13501 13502 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13503 13504 xp = SD_GET_XBUF(bp); 13505 ASSERT(xp != NULL); 13506 13507 mutex_enter(SD_MUTEX(un)); 13508 13509 /* 13510 * If we are currently in the failfast state, fail any new IO 13511 * that has B_FAILFAST set, then return. 13512 */ 13513 if ((bp->b_flags & B_FAILFAST) && 13514 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13515 mutex_exit(SD_MUTEX(un)); 13516 bioerror(bp, EIO); 13517 bp->b_resid = bp->b_bcount; 13518 SD_BEGIN_IODONE(index, un, bp); 13519 return; 13520 } 13521 13522 if (SD_IS_DIRECT_PRIORITY(xp)) { 13523 /* 13524 * Priority command -- transport it immediately. 13525 * 13526 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13527 * because all direct priority commands should be associated 13528 * with error recovery actions which we don't want to retry. 13529 */ 13530 sd_start_cmds(un, bp); 13531 } else { 13532 /* 13533 * Normal command -- add it to the wait queue, then start 13534 * transporting commands from the wait queue. 13535 */ 13536 sd_add_buf_to_waitq(un, bp); 13537 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13538 sd_start_cmds(un, NULL); 13539 } 13540 13541 mutex_exit(SD_MUTEX(un)); 13542 13543 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13544 } 13545 13546 13547 /* 13548 * Function: sd_init_cdb_limits 13549 * 13550 * Description: This is to handle scsi_pkt initialization differences 13551 * between the driver platforms. 13552 * 13553 * Legacy behaviors: 13554 * 13555 * If the block number or the sector count exceeds the 13556 * capabilities of a Group 0 command, shift over to a 13557 * Group 1 command. We don't blindly use Group 1 13558 * commands because a) some drives (CDC Wren IVs) get a 13559 * bit confused, and b) there is probably a fair amount 13560 * of speed difference for a target to receive and decode 13561 * a 10 byte command instead of a 6 byte command. 13562 * 13563 * The xfer time difference of 6 vs 10 byte CDBs is 13564 * still significant so this code is still worthwhile. 13565 * 10 byte CDBs are very inefficient with the fas HBA driver 13566 * and older disks. Each CDB byte took 1 usec with some 13567 * popular disks. 13568 * 13569 * Context: Must be called at attach time 13570 */ 13571 13572 static void 13573 sd_init_cdb_limits(struct sd_lun *un) 13574 { 13575 int hba_cdb_limit; 13576 13577 /* 13578 * Use CDB_GROUP1 commands for most devices except for 13579 * parallel SCSI fixed drives in which case we get better 13580 * performance using CDB_GROUP0 commands (where applicable). 13581 */ 13582 un->un_mincdb = SD_CDB_GROUP1; 13583 #if !defined(__fibre) 13584 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13585 !un->un_f_has_removable_media) { 13586 un->un_mincdb = SD_CDB_GROUP0; 13587 } 13588 #endif 13589 13590 /* 13591 * Try to read the max-cdb-length supported by HBA. 13592 */ 13593 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13594 if (0 >= un->un_max_hba_cdb) { 13595 un->un_max_hba_cdb = CDB_GROUP4; 13596 hba_cdb_limit = SD_CDB_GROUP4; 13597 } else if (0 < un->un_max_hba_cdb && 13598 un->un_max_hba_cdb < CDB_GROUP1) { 13599 hba_cdb_limit = SD_CDB_GROUP0; 13600 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13601 un->un_max_hba_cdb < CDB_GROUP5) { 13602 hba_cdb_limit = SD_CDB_GROUP1; 13603 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13604 un->un_max_hba_cdb < CDB_GROUP4) { 13605 hba_cdb_limit = SD_CDB_GROUP5; 13606 } else { 13607 hba_cdb_limit = SD_CDB_GROUP4; 13608 } 13609 13610 /* 13611 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13612 * commands for fixed disks unless we are building for a 32 bit 13613 * kernel. 13614 */ 13615 #ifdef _LP64 13616 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13617 min(hba_cdb_limit, SD_CDB_GROUP4); 13618 #else 13619 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13620 min(hba_cdb_limit, SD_CDB_GROUP1); 13621 #endif 13622 13623 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13624 ? sizeof (struct scsi_arq_status) : 1); 13625 un->un_cmd_timeout = (ushort_t)sd_io_time; 13626 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13627 } 13628 13629 13630 /* 13631 * Function: sd_initpkt_for_buf 13632 * 13633 * Description: Allocate and initialize for transport a scsi_pkt struct, 13634 * based upon the info specified in the given buf struct. 13635 * 13636 * Assumes the xb_blkno in the request is absolute (ie, 13637 * relative to the start of the device (NOT partition!). 13638 * Also assumes that the request is using the native block 13639 * size of the device (as returned by the READ CAPACITY 13640 * command). 13641 * 13642 * Return Code: SD_PKT_ALLOC_SUCCESS 13643 * SD_PKT_ALLOC_FAILURE 13644 * SD_PKT_ALLOC_FAILURE_NO_DMA 13645 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13646 * 13647 * Context: Kernel thread and may be called from software interrupt context 13648 * as part of a sdrunout callback. This function may not block or 13649 * call routines that block 13650 */ 13651 13652 static int 13653 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13654 { 13655 struct sd_xbuf *xp; 13656 struct scsi_pkt *pktp = NULL; 13657 struct sd_lun *un; 13658 size_t blockcount; 13659 daddr_t startblock; 13660 int rval; 13661 int cmd_flags; 13662 13663 ASSERT(bp != NULL); 13664 ASSERT(pktpp != NULL); 13665 xp = SD_GET_XBUF(bp); 13666 ASSERT(xp != NULL); 13667 un = SD_GET_UN(bp); 13668 ASSERT(un != NULL); 13669 ASSERT(mutex_owned(SD_MUTEX(un))); 13670 ASSERT(bp->b_resid == 0); 13671 13672 SD_TRACE(SD_LOG_IO_CORE, un, 13673 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13674 13675 mutex_exit(SD_MUTEX(un)); 13676 13677 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13678 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13679 /* 13680 * Already have a scsi_pkt -- just need DMA resources. 13681 * We must recompute the CDB in case the mapping returns 13682 * a nonzero pkt_resid. 13683 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13684 * that is being retried, the unmap/remap of the DMA resouces 13685 * will result in the entire transfer starting over again 13686 * from the very first block. 13687 */ 13688 ASSERT(xp->xb_pktp != NULL); 13689 pktp = xp->xb_pktp; 13690 } else { 13691 pktp = NULL; 13692 } 13693 #endif /* __i386 || __amd64 */ 13694 13695 startblock = xp->xb_blkno; /* Absolute block num. */ 13696 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13697 13698 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13699 13700 /* 13701 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13702 * call scsi_init_pkt, and build the CDB. 13703 */ 13704 rval = sd_setup_rw_pkt(un, &pktp, bp, 13705 cmd_flags, sdrunout, (caddr_t)un, 13706 startblock, blockcount); 13707 13708 if (rval == 0) { 13709 /* 13710 * Success. 13711 * 13712 * If partial DMA is being used and required for this transfer. 13713 * set it up here. 13714 */ 13715 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13716 (pktp->pkt_resid != 0)) { 13717 13718 /* 13719 * Save the CDB length and pkt_resid for the 13720 * next xfer 13721 */ 13722 xp->xb_dma_resid = pktp->pkt_resid; 13723 13724 /* rezero resid */ 13725 pktp->pkt_resid = 0; 13726 13727 } else { 13728 xp->xb_dma_resid = 0; 13729 } 13730 13731 pktp->pkt_flags = un->un_tagflags; 13732 pktp->pkt_time = un->un_cmd_timeout; 13733 pktp->pkt_comp = sdintr; 13734 13735 pktp->pkt_private = bp; 13736 *pktpp = pktp; 13737 13738 SD_TRACE(SD_LOG_IO_CORE, un, 13739 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13740 13741 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13742 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13743 #endif 13744 13745 mutex_enter(SD_MUTEX(un)); 13746 return (SD_PKT_ALLOC_SUCCESS); 13747 13748 } 13749 13750 /* 13751 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13752 * from sd_setup_rw_pkt. 13753 */ 13754 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13755 13756 if (rval == SD_PKT_ALLOC_FAILURE) { 13757 *pktpp = NULL; 13758 /* 13759 * Set the driver state to RWAIT to indicate the driver 13760 * is waiting on resource allocations. The driver will not 13761 * suspend, pm_suspend, or detatch while the state is RWAIT. 13762 */ 13763 mutex_enter(SD_MUTEX(un)); 13764 New_state(un, SD_STATE_RWAIT); 13765 13766 SD_ERROR(SD_LOG_IO_CORE, un, 13767 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13768 13769 if ((bp->b_flags & B_ERROR) != 0) { 13770 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13771 } 13772 return (SD_PKT_ALLOC_FAILURE); 13773 } else { 13774 /* 13775 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13776 * 13777 * This should never happen. Maybe someone messed with the 13778 * kernel's minphys? 13779 */ 13780 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13781 "Request rejected: too large for CDB: " 13782 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13783 SD_ERROR(SD_LOG_IO_CORE, un, 13784 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13785 mutex_enter(SD_MUTEX(un)); 13786 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13787 13788 } 13789 } 13790 13791 13792 /* 13793 * Function: sd_destroypkt_for_buf 13794 * 13795 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13796 * 13797 * Context: Kernel thread or interrupt context 13798 */ 13799 13800 static void 13801 sd_destroypkt_for_buf(struct buf *bp) 13802 { 13803 ASSERT(bp != NULL); 13804 ASSERT(SD_GET_UN(bp) != NULL); 13805 13806 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13807 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13808 13809 ASSERT(SD_GET_PKTP(bp) != NULL); 13810 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13811 13812 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13813 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13814 } 13815 13816 /* 13817 * Function: sd_setup_rw_pkt 13818 * 13819 * Description: Determines appropriate CDB group for the requested LBA 13820 * and transfer length, calls scsi_init_pkt, and builds 13821 * the CDB. Do not use for partial DMA transfers except 13822 * for the initial transfer since the CDB size must 13823 * remain constant. 13824 * 13825 * Context: Kernel thread and may be called from software interrupt 13826 * context as part of a sdrunout callback. This function may not 13827 * block or call routines that block 13828 */ 13829 13830 13831 int 13832 sd_setup_rw_pkt(struct sd_lun *un, 13833 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13834 int (*callback)(caddr_t), caddr_t callback_arg, 13835 diskaddr_t lba, uint32_t blockcount) 13836 { 13837 struct scsi_pkt *return_pktp; 13838 union scsi_cdb *cdbp; 13839 struct sd_cdbinfo *cp = NULL; 13840 int i; 13841 13842 /* 13843 * See which size CDB to use, based upon the request. 13844 */ 13845 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13846 13847 /* 13848 * Check lba and block count against sd_cdbtab limits. 13849 * In the partial DMA case, we have to use the same size 13850 * CDB for all the transfers. Check lba + blockcount 13851 * against the max LBA so we know that segment of the 13852 * transfer can use the CDB we select. 13853 */ 13854 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13855 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13856 13857 /* 13858 * The command will fit into the CDB type 13859 * specified by sd_cdbtab[i]. 13860 */ 13861 cp = sd_cdbtab + i; 13862 13863 /* 13864 * Call scsi_init_pkt so we can fill in the 13865 * CDB. 13866 */ 13867 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13868 bp, cp->sc_grpcode, un->un_status_len, 0, 13869 flags, callback, callback_arg); 13870 13871 if (return_pktp != NULL) { 13872 13873 /* 13874 * Return new value of pkt 13875 */ 13876 *pktpp = return_pktp; 13877 13878 /* 13879 * To be safe, zero the CDB insuring there is 13880 * no leftover data from a previous command. 13881 */ 13882 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13883 13884 /* 13885 * Handle partial DMA mapping 13886 */ 13887 if (return_pktp->pkt_resid != 0) { 13888 13889 /* 13890 * Not going to xfer as many blocks as 13891 * originally expected 13892 */ 13893 blockcount -= 13894 SD_BYTES2TGTBLOCKS(un, 13895 return_pktp->pkt_resid); 13896 } 13897 13898 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13899 13900 /* 13901 * Set command byte based on the CDB 13902 * type we matched. 13903 */ 13904 cdbp->scc_cmd = cp->sc_grpmask | 13905 ((bp->b_flags & B_READ) ? 13906 SCMD_READ : SCMD_WRITE); 13907 13908 SD_FILL_SCSI1_LUN(un, return_pktp); 13909 13910 /* 13911 * Fill in LBA and length 13912 */ 13913 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13914 (cp->sc_grpcode == CDB_GROUP4) || 13915 (cp->sc_grpcode == CDB_GROUP0) || 13916 (cp->sc_grpcode == CDB_GROUP5)); 13917 13918 if (cp->sc_grpcode == CDB_GROUP1) { 13919 FORMG1ADDR(cdbp, lba); 13920 FORMG1COUNT(cdbp, blockcount); 13921 return (0); 13922 } else if (cp->sc_grpcode == CDB_GROUP4) { 13923 FORMG4LONGADDR(cdbp, lba); 13924 FORMG4COUNT(cdbp, blockcount); 13925 return (0); 13926 } else if (cp->sc_grpcode == CDB_GROUP0) { 13927 FORMG0ADDR(cdbp, lba); 13928 FORMG0COUNT(cdbp, blockcount); 13929 return (0); 13930 } else if (cp->sc_grpcode == CDB_GROUP5) { 13931 FORMG5ADDR(cdbp, lba); 13932 FORMG5COUNT(cdbp, blockcount); 13933 return (0); 13934 } 13935 13936 /* 13937 * It should be impossible to not match one 13938 * of the CDB types above, so we should never 13939 * reach this point. Set the CDB command byte 13940 * to test-unit-ready to avoid writing 13941 * to somewhere we don't intend. 13942 */ 13943 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13944 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13945 } else { 13946 /* 13947 * Couldn't get scsi_pkt 13948 */ 13949 return (SD_PKT_ALLOC_FAILURE); 13950 } 13951 } 13952 } 13953 13954 /* 13955 * None of the available CDB types were suitable. This really 13956 * should never happen: on a 64 bit system we support 13957 * READ16/WRITE16 which will hold an entire 64 bit disk address 13958 * and on a 32 bit system we will refuse to bind to a device 13959 * larger than 2TB so addresses will never be larger than 32 bits. 13960 */ 13961 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13962 } 13963 13964 /* 13965 * Function: sd_setup_next_rw_pkt 13966 * 13967 * Description: Setup packet for partial DMA transfers, except for the 13968 * initial transfer. sd_setup_rw_pkt should be used for 13969 * the initial transfer. 13970 * 13971 * Context: Kernel thread and may be called from interrupt context. 13972 */ 13973 13974 int 13975 sd_setup_next_rw_pkt(struct sd_lun *un, 13976 struct scsi_pkt *pktp, struct buf *bp, 13977 diskaddr_t lba, uint32_t blockcount) 13978 { 13979 uchar_t com; 13980 union scsi_cdb *cdbp; 13981 uchar_t cdb_group_id; 13982 13983 ASSERT(pktp != NULL); 13984 ASSERT(pktp->pkt_cdbp != NULL); 13985 13986 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13987 com = cdbp->scc_cmd; 13988 cdb_group_id = CDB_GROUPID(com); 13989 13990 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13991 (cdb_group_id == CDB_GROUPID_1) || 13992 (cdb_group_id == CDB_GROUPID_4) || 13993 (cdb_group_id == CDB_GROUPID_5)); 13994 13995 /* 13996 * Move pkt to the next portion of the xfer. 13997 * func is NULL_FUNC so we do not have to release 13998 * the disk mutex here. 13999 */ 14000 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 14001 NULL_FUNC, NULL) == pktp) { 14002 /* Success. Handle partial DMA */ 14003 if (pktp->pkt_resid != 0) { 14004 blockcount -= 14005 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 14006 } 14007 14008 cdbp->scc_cmd = com; 14009 SD_FILL_SCSI1_LUN(un, pktp); 14010 if (cdb_group_id == CDB_GROUPID_1) { 14011 FORMG1ADDR(cdbp, lba); 14012 FORMG1COUNT(cdbp, blockcount); 14013 return (0); 14014 } else if (cdb_group_id == CDB_GROUPID_4) { 14015 FORMG4LONGADDR(cdbp, lba); 14016 FORMG4COUNT(cdbp, blockcount); 14017 return (0); 14018 } else if (cdb_group_id == CDB_GROUPID_0) { 14019 FORMG0ADDR(cdbp, lba); 14020 FORMG0COUNT(cdbp, blockcount); 14021 return (0); 14022 } else if (cdb_group_id == CDB_GROUPID_5) { 14023 FORMG5ADDR(cdbp, lba); 14024 FORMG5COUNT(cdbp, blockcount); 14025 return (0); 14026 } 14027 14028 /* Unreachable */ 14029 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 14030 } 14031 14032 /* 14033 * Error setting up next portion of cmd transfer. 14034 * Something is definitely very wrong and this 14035 * should not happen. 14036 */ 14037 return (SD_PKT_ALLOC_FAILURE); 14038 } 14039 14040 /* 14041 * Function: sd_initpkt_for_uscsi 14042 * 14043 * Description: Allocate and initialize for transport a scsi_pkt struct, 14044 * based upon the info specified in the given uscsi_cmd struct. 14045 * 14046 * Return Code: SD_PKT_ALLOC_SUCCESS 14047 * SD_PKT_ALLOC_FAILURE 14048 * SD_PKT_ALLOC_FAILURE_NO_DMA 14049 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 14050 * 14051 * Context: Kernel thread and may be called from software interrupt context 14052 * as part of a sdrunout callback. This function may not block or 14053 * call routines that block 14054 */ 14055 14056 static int 14057 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 14058 { 14059 struct uscsi_cmd *uscmd; 14060 struct sd_xbuf *xp; 14061 struct scsi_pkt *pktp; 14062 struct sd_lun *un; 14063 uint32_t flags = 0; 14064 14065 ASSERT(bp != NULL); 14066 ASSERT(pktpp != NULL); 14067 xp = SD_GET_XBUF(bp); 14068 ASSERT(xp != NULL); 14069 un = SD_GET_UN(bp); 14070 ASSERT(un != NULL); 14071 ASSERT(mutex_owned(SD_MUTEX(un))); 14072 14073 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14074 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14075 ASSERT(uscmd != NULL); 14076 14077 SD_TRACE(SD_LOG_IO_CORE, un, 14078 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 14079 14080 /* 14081 * Allocate the scsi_pkt for the command. 14082 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 14083 * during scsi_init_pkt time and will continue to use the 14084 * same path as long as the same scsi_pkt is used without 14085 * intervening scsi_dma_free(). Since uscsi command does 14086 * not call scsi_dmafree() before retry failed command, it 14087 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 14088 * set such that scsi_vhci can use other available path for 14089 * retry. Besides, ucsci command does not allow DMA breakup, 14090 * so there is no need to set PKT_DMA_PARTIAL flag. 14091 */ 14092 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14093 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14094 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14095 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 14096 - sizeof (struct scsi_extended_sense)), 0, 14097 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 14098 sdrunout, (caddr_t)un); 14099 } else { 14100 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14101 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14102 sizeof (struct scsi_arq_status), 0, 14103 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 14104 sdrunout, (caddr_t)un); 14105 } 14106 14107 if (pktp == NULL) { 14108 *pktpp = NULL; 14109 /* 14110 * Set the driver state to RWAIT to indicate the driver 14111 * is waiting on resource allocations. The driver will not 14112 * suspend, pm_suspend, or detatch while the state is RWAIT. 14113 */ 14114 New_state(un, SD_STATE_RWAIT); 14115 14116 SD_ERROR(SD_LOG_IO_CORE, un, 14117 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 14118 14119 if ((bp->b_flags & B_ERROR) != 0) { 14120 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 14121 } 14122 return (SD_PKT_ALLOC_FAILURE); 14123 } 14124 14125 /* 14126 * We do not do DMA breakup for USCSI commands, so return failure 14127 * here if all the needed DMA resources were not allocated. 14128 */ 14129 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 14130 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 14131 scsi_destroy_pkt(pktp); 14132 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 14133 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 14134 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 14135 } 14136 14137 /* Init the cdb from the given uscsi struct */ 14138 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 14139 uscmd->uscsi_cdb[0], 0, 0, 0); 14140 14141 SD_FILL_SCSI1_LUN(un, pktp); 14142 14143 /* 14144 * Set up the optional USCSI flags. See the uscsi (7I) man page 14145 * for listing of the supported flags. 14146 */ 14147 14148 if (uscmd->uscsi_flags & USCSI_SILENT) { 14149 flags |= FLAG_SILENT; 14150 } 14151 14152 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 14153 flags |= FLAG_DIAGNOSE; 14154 } 14155 14156 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 14157 flags |= FLAG_ISOLATE; 14158 } 14159 14160 if (un->un_f_is_fibre == FALSE) { 14161 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 14162 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 14163 } 14164 } 14165 14166 /* 14167 * Set the pkt flags here so we save time later. 14168 * Note: These flags are NOT in the uscsi man page!!! 14169 */ 14170 if (uscmd->uscsi_flags & USCSI_HEAD) { 14171 flags |= FLAG_HEAD; 14172 } 14173 14174 if (uscmd->uscsi_flags & USCSI_NOINTR) { 14175 flags |= FLAG_NOINTR; 14176 } 14177 14178 /* 14179 * For tagged queueing, things get a bit complicated. 14180 * Check first for head of queue and last for ordered queue. 14181 * If neither head nor order, use the default driver tag flags. 14182 */ 14183 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 14184 if (uscmd->uscsi_flags & USCSI_HTAG) { 14185 flags |= FLAG_HTAG; 14186 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 14187 flags |= FLAG_OTAG; 14188 } else { 14189 flags |= un->un_tagflags & FLAG_TAGMASK; 14190 } 14191 } 14192 14193 if (uscmd->uscsi_flags & USCSI_NODISCON) { 14194 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 14195 } 14196 14197 pktp->pkt_flags = flags; 14198 14199 /* Transfer uscsi information to scsi_pkt */ 14200 (void) scsi_uscsi_pktinit(uscmd, pktp); 14201 14202 /* Copy the caller's CDB into the pkt... */ 14203 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 14204 14205 if (uscmd->uscsi_timeout == 0) { 14206 pktp->pkt_time = un->un_uscsi_timeout; 14207 } else { 14208 pktp->pkt_time = uscmd->uscsi_timeout; 14209 } 14210 14211 /* need it later to identify USCSI request in sdintr */ 14212 xp->xb_pkt_flags |= SD_XB_USCSICMD; 14213 14214 xp->xb_sense_resid = uscmd->uscsi_rqresid; 14215 14216 pktp->pkt_private = bp; 14217 pktp->pkt_comp = sdintr; 14218 *pktpp = pktp; 14219 14220 SD_TRACE(SD_LOG_IO_CORE, un, 14221 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 14222 14223 return (SD_PKT_ALLOC_SUCCESS); 14224 } 14225 14226 14227 /* 14228 * Function: sd_destroypkt_for_uscsi 14229 * 14230 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 14231 * IOs.. Also saves relevant info into the associated uscsi_cmd 14232 * struct. 14233 * 14234 * Context: May be called under interrupt context 14235 */ 14236 14237 static void 14238 sd_destroypkt_for_uscsi(struct buf *bp) 14239 { 14240 struct uscsi_cmd *uscmd; 14241 struct sd_xbuf *xp; 14242 struct scsi_pkt *pktp; 14243 struct sd_lun *un; 14244 struct sd_uscsi_info *suip; 14245 14246 ASSERT(bp != NULL); 14247 xp = SD_GET_XBUF(bp); 14248 ASSERT(xp != NULL); 14249 un = SD_GET_UN(bp); 14250 ASSERT(un != NULL); 14251 ASSERT(!mutex_owned(SD_MUTEX(un))); 14252 pktp = SD_GET_PKTP(bp); 14253 ASSERT(pktp != NULL); 14254 14255 SD_TRACE(SD_LOG_IO_CORE, un, 14256 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 14257 14258 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14259 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14260 ASSERT(uscmd != NULL); 14261 14262 /* Save the status and the residual into the uscsi_cmd struct */ 14263 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 14264 uscmd->uscsi_resid = bp->b_resid; 14265 14266 /* Transfer scsi_pkt information to uscsi */ 14267 (void) scsi_uscsi_pktfini(pktp, uscmd); 14268 14269 /* 14270 * If enabled, copy any saved sense data into the area specified 14271 * by the uscsi command. 14272 */ 14273 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14274 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14275 /* 14276 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14277 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14278 */ 14279 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14280 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14281 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14282 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14283 MAX_SENSE_LENGTH); 14284 } else { 14285 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14286 SENSE_LENGTH); 14287 } 14288 } 14289 /* 14290 * The following assignments are for SCSI FMA. 14291 */ 14292 ASSERT(xp->xb_private != NULL); 14293 suip = (struct sd_uscsi_info *)xp->xb_private; 14294 suip->ui_pkt_reason = pktp->pkt_reason; 14295 suip->ui_pkt_state = pktp->pkt_state; 14296 suip->ui_pkt_statistics = pktp->pkt_statistics; 14297 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14298 14299 /* We are done with the scsi_pkt; free it now */ 14300 ASSERT(SD_GET_PKTP(bp) != NULL); 14301 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14302 14303 SD_TRACE(SD_LOG_IO_CORE, un, 14304 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14305 } 14306 14307 14308 /* 14309 * Function: sd_bioclone_alloc 14310 * 14311 * Description: Allocate a buf(9S) and init it as per the given buf 14312 * and the various arguments. The associated sd_xbuf 14313 * struct is (nearly) duplicated. The struct buf *bp 14314 * argument is saved in new_xp->xb_private. 14315 * 14316 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14317 * datalen - size of data area for the shadow bp 14318 * blkno - starting LBA 14319 * func - function pointer for b_iodone in the shadow buf. (May 14320 * be NULL if none.) 14321 * 14322 * Return Code: Pointer to allocates buf(9S) struct 14323 * 14324 * Context: Can sleep. 14325 */ 14326 14327 static struct buf * 14328 sd_bioclone_alloc(struct buf *bp, size_t datalen, 14329 daddr_t blkno, int (*func)(struct buf *)) 14330 { 14331 struct sd_lun *un; 14332 struct sd_xbuf *xp; 14333 struct sd_xbuf *new_xp; 14334 struct buf *new_bp; 14335 14336 ASSERT(bp != NULL); 14337 xp = SD_GET_XBUF(bp); 14338 ASSERT(xp != NULL); 14339 un = SD_GET_UN(bp); 14340 ASSERT(un != NULL); 14341 ASSERT(!mutex_owned(SD_MUTEX(un))); 14342 14343 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14344 NULL, KM_SLEEP); 14345 14346 new_bp->b_lblkno = blkno; 14347 14348 /* 14349 * Allocate an xbuf for the shadow bp and copy the contents of the 14350 * original xbuf into it. 14351 */ 14352 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14353 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14354 14355 /* 14356 * The given bp is automatically saved in the xb_private member 14357 * of the new xbuf. Callers are allowed to depend on this. 14358 */ 14359 new_xp->xb_private = bp; 14360 14361 new_bp->b_private = new_xp; 14362 14363 return (new_bp); 14364 } 14365 14366 /* 14367 * Function: sd_shadow_buf_alloc 14368 * 14369 * Description: Allocate a buf(9S) and init it as per the given buf 14370 * and the various arguments. The associated sd_xbuf 14371 * struct is (nearly) duplicated. The struct buf *bp 14372 * argument is saved in new_xp->xb_private. 14373 * 14374 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14375 * datalen - size of data area for the shadow bp 14376 * bflags - B_READ or B_WRITE (pseudo flag) 14377 * blkno - starting LBA 14378 * func - function pointer for b_iodone in the shadow buf. (May 14379 * be NULL if none.) 14380 * 14381 * Return Code: Pointer to allocates buf(9S) struct 14382 * 14383 * Context: Can sleep. 14384 */ 14385 14386 static struct buf * 14387 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14388 daddr_t blkno, int (*func)(struct buf *)) 14389 { 14390 struct sd_lun *un; 14391 struct sd_xbuf *xp; 14392 struct sd_xbuf *new_xp; 14393 struct buf *new_bp; 14394 14395 ASSERT(bp != NULL); 14396 xp = SD_GET_XBUF(bp); 14397 ASSERT(xp != NULL); 14398 un = SD_GET_UN(bp); 14399 ASSERT(un != NULL); 14400 ASSERT(!mutex_owned(SD_MUTEX(un))); 14401 14402 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14403 bp_mapin(bp); 14404 } 14405 14406 bflags &= (B_READ | B_WRITE); 14407 #if defined(__i386) || defined(__amd64) 14408 new_bp = getrbuf(KM_SLEEP); 14409 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14410 new_bp->b_bcount = datalen; 14411 new_bp->b_flags = bflags | 14412 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14413 #else 14414 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14415 datalen, bflags, SLEEP_FUNC, NULL); 14416 #endif 14417 new_bp->av_forw = NULL; 14418 new_bp->av_back = NULL; 14419 new_bp->b_dev = bp->b_dev; 14420 new_bp->b_blkno = blkno; 14421 new_bp->b_iodone = func; 14422 new_bp->b_edev = bp->b_edev; 14423 new_bp->b_resid = 0; 14424 14425 /* We need to preserve the B_FAILFAST flag */ 14426 if (bp->b_flags & B_FAILFAST) { 14427 new_bp->b_flags |= B_FAILFAST; 14428 } 14429 14430 /* 14431 * Allocate an xbuf for the shadow bp and copy the contents of the 14432 * original xbuf into it. 14433 */ 14434 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14435 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14436 14437 /* Need later to copy data between the shadow buf & original buf! */ 14438 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14439 14440 /* 14441 * The given bp is automatically saved in the xb_private member 14442 * of the new xbuf. Callers are allowed to depend on this. 14443 */ 14444 new_xp->xb_private = bp; 14445 14446 new_bp->b_private = new_xp; 14447 14448 return (new_bp); 14449 } 14450 14451 /* 14452 * Function: sd_bioclone_free 14453 * 14454 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14455 * in the larger than partition operation. 14456 * 14457 * Context: May be called under interrupt context 14458 */ 14459 14460 static void 14461 sd_bioclone_free(struct buf *bp) 14462 { 14463 struct sd_xbuf *xp; 14464 14465 ASSERT(bp != NULL); 14466 xp = SD_GET_XBUF(bp); 14467 ASSERT(xp != NULL); 14468 14469 /* 14470 * Call bp_mapout() before freeing the buf, in case a lower 14471 * layer or HBA had done a bp_mapin(). we must do this here 14472 * as we are the "originator" of the shadow buf. 14473 */ 14474 bp_mapout(bp); 14475 14476 /* 14477 * Null out b_iodone before freeing the bp, to ensure that the driver 14478 * never gets confused by a stale value in this field. (Just a little 14479 * extra defensiveness here.) 14480 */ 14481 bp->b_iodone = NULL; 14482 14483 freerbuf(bp); 14484 14485 kmem_free(xp, sizeof (struct sd_xbuf)); 14486 } 14487 14488 /* 14489 * Function: sd_shadow_buf_free 14490 * 14491 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14492 * 14493 * Context: May be called under interrupt context 14494 */ 14495 14496 static void 14497 sd_shadow_buf_free(struct buf *bp) 14498 { 14499 struct sd_xbuf *xp; 14500 14501 ASSERT(bp != NULL); 14502 xp = SD_GET_XBUF(bp); 14503 ASSERT(xp != NULL); 14504 14505 #if defined(__sparc) 14506 /* 14507 * Call bp_mapout() before freeing the buf, in case a lower 14508 * layer or HBA had done a bp_mapin(). we must do this here 14509 * as we are the "originator" of the shadow buf. 14510 */ 14511 bp_mapout(bp); 14512 #endif 14513 14514 /* 14515 * Null out b_iodone before freeing the bp, to ensure that the driver 14516 * never gets confused by a stale value in this field. (Just a little 14517 * extra defensiveness here.) 14518 */ 14519 bp->b_iodone = NULL; 14520 14521 #if defined(__i386) || defined(__amd64) 14522 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14523 freerbuf(bp); 14524 #else 14525 scsi_free_consistent_buf(bp); 14526 #endif 14527 14528 kmem_free(xp, sizeof (struct sd_xbuf)); 14529 } 14530 14531 14532 /* 14533 * Function: sd_print_transport_rejected_message 14534 * 14535 * Description: This implements the ludicrously complex rules for printing 14536 * a "transport rejected" message. This is to address the 14537 * specific problem of having a flood of this error message 14538 * produced when a failover occurs. 14539 * 14540 * Context: Any. 14541 */ 14542 14543 static void 14544 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14545 int code) 14546 { 14547 ASSERT(un != NULL); 14548 ASSERT(mutex_owned(SD_MUTEX(un))); 14549 ASSERT(xp != NULL); 14550 14551 /* 14552 * Print the "transport rejected" message under the following 14553 * conditions: 14554 * 14555 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14556 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14557 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14558 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14559 * scsi_transport(9F) (which indicates that the target might have 14560 * gone off-line). This uses the un->un_tran_fatal_count 14561 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14562 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14563 * from scsi_transport(). 14564 * 14565 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14566 * the preceeding cases in order for the message to be printed. 14567 */ 14568 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14569 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14570 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14571 (code != TRAN_FATAL_ERROR) || 14572 (un->un_tran_fatal_count == 1)) { 14573 switch (code) { 14574 case TRAN_BADPKT: 14575 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14576 "transport rejected bad packet\n"); 14577 break; 14578 case TRAN_FATAL_ERROR: 14579 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14580 "transport rejected fatal error\n"); 14581 break; 14582 default: 14583 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14584 "transport rejected (%d)\n", code); 14585 break; 14586 } 14587 } 14588 } 14589 } 14590 14591 14592 /* 14593 * Function: sd_add_buf_to_waitq 14594 * 14595 * Description: Add the given buf(9S) struct to the wait queue for the 14596 * instance. If sorting is enabled, then the buf is added 14597 * to the queue via an elevator sort algorithm (a la 14598 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14599 * If sorting is not enabled, then the buf is just added 14600 * to the end of the wait queue. 14601 * 14602 * Return Code: void 14603 * 14604 * Context: Does not sleep/block, therefore technically can be called 14605 * from any context. However if sorting is enabled then the 14606 * execution time is indeterminate, and may take long if 14607 * the wait queue grows large. 14608 */ 14609 14610 static void 14611 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14612 { 14613 struct buf *ap; 14614 14615 ASSERT(bp != NULL); 14616 ASSERT(un != NULL); 14617 ASSERT(mutex_owned(SD_MUTEX(un))); 14618 14619 /* If the queue is empty, add the buf as the only entry & return. */ 14620 if (un->un_waitq_headp == NULL) { 14621 ASSERT(un->un_waitq_tailp == NULL); 14622 un->un_waitq_headp = un->un_waitq_tailp = bp; 14623 bp->av_forw = NULL; 14624 return; 14625 } 14626 14627 ASSERT(un->un_waitq_tailp != NULL); 14628 14629 /* 14630 * If sorting is disabled, just add the buf to the tail end of 14631 * the wait queue and return. 14632 */ 14633 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) { 14634 un->un_waitq_tailp->av_forw = bp; 14635 un->un_waitq_tailp = bp; 14636 bp->av_forw = NULL; 14637 return; 14638 } 14639 14640 /* 14641 * Sort thru the list of requests currently on the wait queue 14642 * and add the new buf request at the appropriate position. 14643 * 14644 * The un->un_waitq_headp is an activity chain pointer on which 14645 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14646 * first queue holds those requests which are positioned after 14647 * the current SD_GET_BLKNO() (in the first request); the second holds 14648 * requests which came in after their SD_GET_BLKNO() number was passed. 14649 * Thus we implement a one way scan, retracting after reaching 14650 * the end of the drive to the first request on the second 14651 * queue, at which time it becomes the first queue. 14652 * A one-way scan is natural because of the way UNIX read-ahead 14653 * blocks are allocated. 14654 * 14655 * If we lie after the first request, then we must locate the 14656 * second request list and add ourselves to it. 14657 */ 14658 ap = un->un_waitq_headp; 14659 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14660 while (ap->av_forw != NULL) { 14661 /* 14662 * Look for an "inversion" in the (normally 14663 * ascending) block numbers. This indicates 14664 * the start of the second request list. 14665 */ 14666 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14667 /* 14668 * Search the second request list for the 14669 * first request at a larger block number. 14670 * We go before that; however if there is 14671 * no such request, we go at the end. 14672 */ 14673 do { 14674 if (SD_GET_BLKNO(bp) < 14675 SD_GET_BLKNO(ap->av_forw)) { 14676 goto insert; 14677 } 14678 ap = ap->av_forw; 14679 } while (ap->av_forw != NULL); 14680 goto insert; /* after last */ 14681 } 14682 ap = ap->av_forw; 14683 } 14684 14685 /* 14686 * No inversions... we will go after the last, and 14687 * be the first request in the second request list. 14688 */ 14689 goto insert; 14690 } 14691 14692 /* 14693 * Request is at/after the current request... 14694 * sort in the first request list. 14695 */ 14696 while (ap->av_forw != NULL) { 14697 /* 14698 * We want to go after the current request (1) if 14699 * there is an inversion after it (i.e. it is the end 14700 * of the first request list), or (2) if the next 14701 * request is a larger block no. than our request. 14702 */ 14703 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14704 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14705 goto insert; 14706 } 14707 ap = ap->av_forw; 14708 } 14709 14710 /* 14711 * Neither a second list nor a larger request, therefore 14712 * we go at the end of the first list (which is the same 14713 * as the end of the whole schebang). 14714 */ 14715 insert: 14716 bp->av_forw = ap->av_forw; 14717 ap->av_forw = bp; 14718 14719 /* 14720 * If we inserted onto the tail end of the waitq, make sure the 14721 * tail pointer is updated. 14722 */ 14723 if (ap == un->un_waitq_tailp) { 14724 un->un_waitq_tailp = bp; 14725 } 14726 } 14727 14728 14729 /* 14730 * Function: sd_start_cmds 14731 * 14732 * Description: Remove and transport cmds from the driver queues. 14733 * 14734 * Arguments: un - pointer to the unit (soft state) struct for the target. 14735 * 14736 * immed_bp - ptr to a buf to be transported immediately. Only 14737 * the immed_bp is transported; bufs on the waitq are not 14738 * processed and the un_retry_bp is not checked. If immed_bp is 14739 * NULL, then normal queue processing is performed. 14740 * 14741 * Context: May be called from kernel thread context, interrupt context, 14742 * or runout callback context. This function may not block or 14743 * call routines that block. 14744 */ 14745 14746 static void 14747 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14748 { 14749 struct sd_xbuf *xp; 14750 struct buf *bp; 14751 void (*statp)(kstat_io_t *); 14752 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14753 void (*saved_statp)(kstat_io_t *); 14754 #endif 14755 int rval; 14756 struct sd_fm_internal *sfip = NULL; 14757 14758 ASSERT(un != NULL); 14759 ASSERT(mutex_owned(SD_MUTEX(un))); 14760 ASSERT(un->un_ncmds_in_transport >= 0); 14761 ASSERT(un->un_throttle >= 0); 14762 14763 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14764 14765 do { 14766 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14767 saved_statp = NULL; 14768 #endif 14769 14770 /* 14771 * If we are syncing or dumping, fail the command to 14772 * avoid recursively calling back into scsi_transport(). 14773 * The dump I/O itself uses a separate code path so this 14774 * only prevents non-dump I/O from being sent while dumping. 14775 * File system sync takes place before dumping begins. 14776 * During panic, filesystem I/O is allowed provided 14777 * un_in_callback is <= 1. This is to prevent recursion 14778 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14779 * sd_start_cmds and so on. See panic.c for more information 14780 * about the states the system can be in during panic. 14781 */ 14782 if ((un->un_state == SD_STATE_DUMPING) || 14783 (ddi_in_panic() && (un->un_in_callback > 1))) { 14784 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14785 "sd_start_cmds: panicking\n"); 14786 goto exit; 14787 } 14788 14789 if ((bp = immed_bp) != NULL) { 14790 /* 14791 * We have a bp that must be transported immediately. 14792 * It's OK to transport the immed_bp here without doing 14793 * the throttle limit check because the immed_bp is 14794 * always used in a retry/recovery case. This means 14795 * that we know we are not at the throttle limit by 14796 * virtue of the fact that to get here we must have 14797 * already gotten a command back via sdintr(). This also 14798 * relies on (1) the command on un_retry_bp preventing 14799 * further commands from the waitq from being issued; 14800 * and (2) the code in sd_retry_command checking the 14801 * throttle limit before issuing a delayed or immediate 14802 * retry. This holds even if the throttle limit is 14803 * currently ratcheted down from its maximum value. 14804 */ 14805 statp = kstat_runq_enter; 14806 if (bp == un->un_retry_bp) { 14807 ASSERT((un->un_retry_statp == NULL) || 14808 (un->un_retry_statp == kstat_waitq_enter) || 14809 (un->un_retry_statp == 14810 kstat_runq_back_to_waitq)); 14811 /* 14812 * If the waitq kstat was incremented when 14813 * sd_set_retry_bp() queued this bp for a retry, 14814 * then we must set up statp so that the waitq 14815 * count will get decremented correctly below. 14816 * Also we must clear un->un_retry_statp to 14817 * ensure that we do not act on a stale value 14818 * in this field. 14819 */ 14820 if ((un->un_retry_statp == kstat_waitq_enter) || 14821 (un->un_retry_statp == 14822 kstat_runq_back_to_waitq)) { 14823 statp = kstat_waitq_to_runq; 14824 } 14825 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14826 saved_statp = un->un_retry_statp; 14827 #endif 14828 un->un_retry_statp = NULL; 14829 14830 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14831 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14832 "un_throttle:%d un_ncmds_in_transport:%d\n", 14833 un, un->un_retry_bp, un->un_throttle, 14834 un->un_ncmds_in_transport); 14835 } else { 14836 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14837 "processing priority bp:0x%p\n", bp); 14838 } 14839 14840 } else if ((bp = un->un_waitq_headp) != NULL) { 14841 /* 14842 * A command on the waitq is ready to go, but do not 14843 * send it if: 14844 * 14845 * (1) the throttle limit has been reached, or 14846 * (2) a retry is pending, or 14847 * (3) a START_STOP_UNIT callback pending, or 14848 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14849 * command is pending. 14850 * 14851 * For all of these conditions, IO processing will 14852 * restart after the condition is cleared. 14853 */ 14854 if (un->un_ncmds_in_transport >= un->un_throttle) { 14855 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14856 "sd_start_cmds: exiting, " 14857 "throttle limit reached!\n"); 14858 goto exit; 14859 } 14860 if (un->un_retry_bp != NULL) { 14861 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14862 "sd_start_cmds: exiting, retry pending!\n"); 14863 goto exit; 14864 } 14865 if (un->un_startstop_timeid != NULL) { 14866 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14867 "sd_start_cmds: exiting, " 14868 "START_STOP pending!\n"); 14869 goto exit; 14870 } 14871 if (un->un_direct_priority_timeid != NULL) { 14872 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14873 "sd_start_cmds: exiting, " 14874 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14875 goto exit; 14876 } 14877 14878 /* Dequeue the command */ 14879 un->un_waitq_headp = bp->av_forw; 14880 if (un->un_waitq_headp == NULL) { 14881 un->un_waitq_tailp = NULL; 14882 } 14883 bp->av_forw = NULL; 14884 statp = kstat_waitq_to_runq; 14885 SD_TRACE(SD_LOG_IO_CORE, un, 14886 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14887 14888 } else { 14889 /* No work to do so bail out now */ 14890 SD_TRACE(SD_LOG_IO_CORE, un, 14891 "sd_start_cmds: no more work, exiting!\n"); 14892 goto exit; 14893 } 14894 14895 /* 14896 * Reset the state to normal. This is the mechanism by which 14897 * the state transitions from either SD_STATE_RWAIT or 14898 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14899 * If state is SD_STATE_PM_CHANGING then this command is 14900 * part of the device power control and the state must 14901 * not be put back to normal. Doing so would would 14902 * allow new commands to proceed when they shouldn't, 14903 * the device may be going off. 14904 */ 14905 if ((un->un_state != SD_STATE_SUSPENDED) && 14906 (un->un_state != SD_STATE_PM_CHANGING)) { 14907 New_state(un, SD_STATE_NORMAL); 14908 } 14909 14910 xp = SD_GET_XBUF(bp); 14911 ASSERT(xp != NULL); 14912 14913 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14914 /* 14915 * Allocate the scsi_pkt if we need one, or attach DMA 14916 * resources if we have a scsi_pkt that needs them. The 14917 * latter should only occur for commands that are being 14918 * retried. 14919 */ 14920 if ((xp->xb_pktp == NULL) || 14921 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14922 #else 14923 if (xp->xb_pktp == NULL) { 14924 #endif 14925 /* 14926 * There is no scsi_pkt allocated for this buf. Call 14927 * the initpkt function to allocate & init one. 14928 * 14929 * The scsi_init_pkt runout callback functionality is 14930 * implemented as follows: 14931 * 14932 * 1) The initpkt function always calls 14933 * scsi_init_pkt(9F) with sdrunout specified as the 14934 * callback routine. 14935 * 2) A successful packet allocation is initialized and 14936 * the I/O is transported. 14937 * 3) The I/O associated with an allocation resource 14938 * failure is left on its queue to be retried via 14939 * runout or the next I/O. 14940 * 4) The I/O associated with a DMA error is removed 14941 * from the queue and failed with EIO. Processing of 14942 * the transport queues is also halted to be 14943 * restarted via runout or the next I/O. 14944 * 5) The I/O associated with a CDB size or packet 14945 * size error is removed from the queue and failed 14946 * with EIO. Processing of the transport queues is 14947 * continued. 14948 * 14949 * Note: there is no interface for canceling a runout 14950 * callback. To prevent the driver from detaching or 14951 * suspending while a runout is pending the driver 14952 * state is set to SD_STATE_RWAIT 14953 * 14954 * Note: using the scsi_init_pkt callback facility can 14955 * result in an I/O request persisting at the head of 14956 * the list which cannot be satisfied even after 14957 * multiple retries. In the future the driver may 14958 * implement some kind of maximum runout count before 14959 * failing an I/O. 14960 * 14961 * Note: the use of funcp below may seem superfluous, 14962 * but it helps warlock figure out the correct 14963 * initpkt function calls (see [s]sd.wlcmd). 14964 */ 14965 struct scsi_pkt *pktp; 14966 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14967 14968 ASSERT(bp != un->un_rqs_bp); 14969 14970 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14971 switch ((*funcp)(bp, &pktp)) { 14972 case SD_PKT_ALLOC_SUCCESS: 14973 xp->xb_pktp = pktp; 14974 SD_TRACE(SD_LOG_IO_CORE, un, 14975 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14976 pktp); 14977 goto got_pkt; 14978 14979 case SD_PKT_ALLOC_FAILURE: 14980 /* 14981 * Temporary (hopefully) resource depletion. 14982 * Since retries and RQS commands always have a 14983 * scsi_pkt allocated, these cases should never 14984 * get here. So the only cases this needs to 14985 * handle is a bp from the waitq (which we put 14986 * back onto the waitq for sdrunout), or a bp 14987 * sent as an immed_bp (which we just fail). 14988 */ 14989 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14990 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14991 14992 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14993 14994 if (bp == immed_bp) { 14995 /* 14996 * If SD_XB_DMA_FREED is clear, then 14997 * this is a failure to allocate a 14998 * scsi_pkt, and we must fail the 14999 * command. 15000 */ 15001 if ((xp->xb_pkt_flags & 15002 SD_XB_DMA_FREED) == 0) { 15003 break; 15004 } 15005 15006 /* 15007 * If this immediate command is NOT our 15008 * un_retry_bp, then we must fail it. 15009 */ 15010 if (bp != un->un_retry_bp) { 15011 break; 15012 } 15013 15014 /* 15015 * We get here if this cmd is our 15016 * un_retry_bp that was DMAFREED, but 15017 * scsi_init_pkt() failed to reallocate 15018 * DMA resources when we attempted to 15019 * retry it. This can happen when an 15020 * mpxio failover is in progress, but 15021 * we don't want to just fail the 15022 * command in this case. 15023 * 15024 * Use timeout(9F) to restart it after 15025 * a 100ms delay. We don't want to 15026 * let sdrunout() restart it, because 15027 * sdrunout() is just supposed to start 15028 * commands that are sitting on the 15029 * wait queue. The un_retry_bp stays 15030 * set until the command completes, but 15031 * sdrunout can be called many times 15032 * before that happens. Since sdrunout 15033 * cannot tell if the un_retry_bp is 15034 * already in the transport, it could 15035 * end up calling scsi_transport() for 15036 * the un_retry_bp multiple times. 15037 * 15038 * Also: don't schedule the callback 15039 * if some other callback is already 15040 * pending. 15041 */ 15042 if (un->un_retry_statp == NULL) { 15043 /* 15044 * restore the kstat pointer to 15045 * keep kstat counts coherent 15046 * when we do retry the command. 15047 */ 15048 un->un_retry_statp = 15049 saved_statp; 15050 } 15051 15052 if ((un->un_startstop_timeid == NULL) && 15053 (un->un_retry_timeid == NULL) && 15054 (un->un_direct_priority_timeid == 15055 NULL)) { 15056 15057 un->un_retry_timeid = 15058 timeout( 15059 sd_start_retry_command, 15060 un, SD_RESTART_TIMEOUT); 15061 } 15062 goto exit; 15063 } 15064 15065 #else 15066 if (bp == immed_bp) { 15067 break; /* Just fail the command */ 15068 } 15069 #endif 15070 15071 /* Add the buf back to the head of the waitq */ 15072 bp->av_forw = un->un_waitq_headp; 15073 un->un_waitq_headp = bp; 15074 if (un->un_waitq_tailp == NULL) { 15075 un->un_waitq_tailp = bp; 15076 } 15077 goto exit; 15078 15079 case SD_PKT_ALLOC_FAILURE_NO_DMA: 15080 /* 15081 * HBA DMA resource failure. Fail the command 15082 * and continue processing of the queues. 15083 */ 15084 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15085 "sd_start_cmds: " 15086 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 15087 break; 15088 15089 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 15090 /* 15091 * Note:x86: Partial DMA mapping not supported 15092 * for USCSI commands, and all the needed DMA 15093 * resources were not allocated. 15094 */ 15095 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15096 "sd_start_cmds: " 15097 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 15098 break; 15099 15100 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 15101 /* 15102 * Note:x86: Request cannot fit into CDB based 15103 * on lba and len. 15104 */ 15105 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15106 "sd_start_cmds: " 15107 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 15108 break; 15109 15110 default: 15111 /* Should NEVER get here! */ 15112 panic("scsi_initpkt error"); 15113 /*NOTREACHED*/ 15114 } 15115 15116 /* 15117 * Fatal error in allocating a scsi_pkt for this buf. 15118 * Update kstats & return the buf with an error code. 15119 * We must use sd_return_failed_command_no_restart() to 15120 * avoid a recursive call back into sd_start_cmds(). 15121 * However this also means that we must keep processing 15122 * the waitq here in order to avoid stalling. 15123 */ 15124 if (statp == kstat_waitq_to_runq) { 15125 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 15126 } 15127 sd_return_failed_command_no_restart(un, bp, EIO); 15128 if (bp == immed_bp) { 15129 /* immed_bp is gone by now, so clear this */ 15130 immed_bp = NULL; 15131 } 15132 continue; 15133 } 15134 got_pkt: 15135 if (bp == immed_bp) { 15136 /* goto the head of the class.... */ 15137 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15138 } 15139 15140 un->un_ncmds_in_transport++; 15141 SD_UPDATE_KSTATS(un, statp, bp); 15142 15143 /* 15144 * Call scsi_transport() to send the command to the target. 15145 * According to SCSA architecture, we must drop the mutex here 15146 * before calling scsi_transport() in order to avoid deadlock. 15147 * Note that the scsi_pkt's completion routine can be executed 15148 * (from interrupt context) even before the call to 15149 * scsi_transport() returns. 15150 */ 15151 SD_TRACE(SD_LOG_IO_CORE, un, 15152 "sd_start_cmds: calling scsi_transport()\n"); 15153 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 15154 15155 mutex_exit(SD_MUTEX(un)); 15156 rval = scsi_transport(xp->xb_pktp); 15157 mutex_enter(SD_MUTEX(un)); 15158 15159 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15160 "sd_start_cmds: scsi_transport() returned %d\n", rval); 15161 15162 switch (rval) { 15163 case TRAN_ACCEPT: 15164 /* Clear this with every pkt accepted by the HBA */ 15165 un->un_tran_fatal_count = 0; 15166 break; /* Success; try the next cmd (if any) */ 15167 15168 case TRAN_BUSY: 15169 un->un_ncmds_in_transport--; 15170 ASSERT(un->un_ncmds_in_transport >= 0); 15171 15172 /* 15173 * Don't retry request sense, the sense data 15174 * is lost when another request is sent. 15175 * Free up the rqs buf and retry 15176 * the original failed cmd. Update kstat. 15177 */ 15178 if (bp == un->un_rqs_bp) { 15179 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15180 bp = sd_mark_rqs_idle(un, xp); 15181 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15182 NULL, NULL, EIO, un->un_busy_timeout / 500, 15183 kstat_waitq_enter); 15184 goto exit; 15185 } 15186 15187 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15188 /* 15189 * Free the DMA resources for the scsi_pkt. This will 15190 * allow mpxio to select another path the next time 15191 * we call scsi_transport() with this scsi_pkt. 15192 * See sdintr() for the rationalization behind this. 15193 */ 15194 if ((un->un_f_is_fibre == TRUE) && 15195 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15196 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 15197 scsi_dmafree(xp->xb_pktp); 15198 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15199 } 15200 #endif 15201 15202 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 15203 /* 15204 * Commands that are SD_PATH_DIRECT_PRIORITY 15205 * are for error recovery situations. These do 15206 * not use the normal command waitq, so if they 15207 * get a TRAN_BUSY we cannot put them back onto 15208 * the waitq for later retry. One possible 15209 * problem is that there could already be some 15210 * other command on un_retry_bp that is waiting 15211 * for this one to complete, so we would be 15212 * deadlocked if we put this command back onto 15213 * the waitq for later retry (since un_retry_bp 15214 * must complete before the driver gets back to 15215 * commands on the waitq). 15216 * 15217 * To avoid deadlock we must schedule a callback 15218 * that will restart this command after a set 15219 * interval. This should keep retrying for as 15220 * long as the underlying transport keeps 15221 * returning TRAN_BUSY (just like for other 15222 * commands). Use the same timeout interval as 15223 * for the ordinary TRAN_BUSY retry. 15224 */ 15225 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15226 "sd_start_cmds: scsi_transport() returned " 15227 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 15228 15229 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15230 un->un_direct_priority_timeid = 15231 timeout(sd_start_direct_priority_command, 15232 bp, un->un_busy_timeout / 500); 15233 15234 goto exit; 15235 } 15236 15237 /* 15238 * For TRAN_BUSY, we want to reduce the throttle value, 15239 * unless we are retrying a command. 15240 */ 15241 if (bp != un->un_retry_bp) { 15242 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 15243 } 15244 15245 /* 15246 * Set up the bp to be tried again 10 ms later. 15247 * Note:x86: Is there a timeout value in the sd_lun 15248 * for this condition? 15249 */ 15250 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 15251 kstat_runq_back_to_waitq); 15252 goto exit; 15253 15254 case TRAN_FATAL_ERROR: 15255 un->un_tran_fatal_count++; 15256 /* FALLTHRU */ 15257 15258 case TRAN_BADPKT: 15259 default: 15260 un->un_ncmds_in_transport--; 15261 ASSERT(un->un_ncmds_in_transport >= 0); 15262 15263 /* 15264 * If this is our REQUEST SENSE command with a 15265 * transport error, we must get back the pointers 15266 * to the original buf, and mark the REQUEST 15267 * SENSE command as "available". 15268 */ 15269 if (bp == un->un_rqs_bp) { 15270 bp = sd_mark_rqs_idle(un, xp); 15271 xp = SD_GET_XBUF(bp); 15272 } else { 15273 /* 15274 * Legacy behavior: do not update transport 15275 * error count for request sense commands. 15276 */ 15277 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15278 } 15279 15280 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15281 sd_print_transport_rejected_message(un, xp, rval); 15282 15283 /* 15284 * This command will be terminated by SD driver due 15285 * to a fatal transport error. We should post 15286 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15287 * of "fail" for any command to indicate this 15288 * situation. 15289 */ 15290 if (xp->xb_ena > 0) { 15291 ASSERT(un->un_fm_private != NULL); 15292 sfip = un->un_fm_private; 15293 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15294 sd_ssc_extract_info(&sfip->fm_ssc, un, 15295 xp->xb_pktp, bp, xp); 15296 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15297 } 15298 15299 /* 15300 * We must use sd_return_failed_command_no_restart() to 15301 * avoid a recursive call back into sd_start_cmds(). 15302 * However this also means that we must keep processing 15303 * the waitq here in order to avoid stalling. 15304 */ 15305 sd_return_failed_command_no_restart(un, bp, EIO); 15306 15307 /* 15308 * Notify any threads waiting in sd_ddi_suspend() that 15309 * a command completion has occurred. 15310 */ 15311 if (un->un_state == SD_STATE_SUSPENDED) { 15312 cv_broadcast(&un->un_disk_busy_cv); 15313 } 15314 15315 if (bp == immed_bp) { 15316 /* immed_bp is gone by now, so clear this */ 15317 immed_bp = NULL; 15318 } 15319 break; 15320 } 15321 15322 } while (immed_bp == NULL); 15323 15324 exit: 15325 ASSERT(mutex_owned(SD_MUTEX(un))); 15326 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15327 } 15328 15329 15330 /* 15331 * Function: sd_return_command 15332 * 15333 * Description: Returns a command to its originator (with or without an 15334 * error). Also starts commands waiting to be transported 15335 * to the target. 15336 * 15337 * Context: May be called from interrupt, kernel, or timeout context 15338 */ 15339 15340 static void 15341 sd_return_command(struct sd_lun *un, struct buf *bp) 15342 { 15343 struct sd_xbuf *xp; 15344 struct scsi_pkt *pktp; 15345 struct sd_fm_internal *sfip; 15346 15347 ASSERT(bp != NULL); 15348 ASSERT(un != NULL); 15349 ASSERT(mutex_owned(SD_MUTEX(un))); 15350 ASSERT(bp != un->un_rqs_bp); 15351 xp = SD_GET_XBUF(bp); 15352 ASSERT(xp != NULL); 15353 15354 pktp = SD_GET_PKTP(bp); 15355 sfip = (struct sd_fm_internal *)un->un_fm_private; 15356 ASSERT(sfip != NULL); 15357 15358 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15359 15360 /* 15361 * Note: check for the "sdrestart failed" case. 15362 */ 15363 if ((un->un_partial_dma_supported == 1) && 15364 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15365 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15366 (xp->xb_pktp->pkt_resid == 0)) { 15367 15368 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15369 /* 15370 * Successfully set up next portion of cmd 15371 * transfer, try sending it 15372 */ 15373 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15374 NULL, NULL, 0, (clock_t)0, NULL); 15375 sd_start_cmds(un, NULL); 15376 return; /* Note:x86: need a return here? */ 15377 } 15378 } 15379 15380 /* 15381 * If this is the failfast bp, clear it from un_failfast_bp. This 15382 * can happen if upon being re-tried the failfast bp either 15383 * succeeded or encountered another error (possibly even a different 15384 * error than the one that precipitated the failfast state, but in 15385 * that case it would have had to exhaust retries as well). Regardless, 15386 * this should not occur whenever the instance is in the active 15387 * failfast state. 15388 */ 15389 if (bp == un->un_failfast_bp) { 15390 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15391 un->un_failfast_bp = NULL; 15392 } 15393 15394 /* 15395 * Clear the failfast state upon successful completion of ANY cmd. 15396 */ 15397 if (bp->b_error == 0) { 15398 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15399 /* 15400 * If this is a successful command, but used to be retried, 15401 * we will take it as a recovered command and post an 15402 * ereport with driver-assessment of "recovered". 15403 */ 15404 if (xp->xb_ena > 0) { 15405 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15406 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15407 } 15408 } else { 15409 /* 15410 * If this is a failed non-USCSI command we will post an 15411 * ereport with driver-assessment set accordingly("fail" or 15412 * "fatal"). 15413 */ 15414 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15415 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15416 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15417 } 15418 } 15419 15420 /* 15421 * This is used if the command was retried one or more times. Show that 15422 * we are done with it, and allow processing of the waitq to resume. 15423 */ 15424 if (bp == un->un_retry_bp) { 15425 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15426 "sd_return_command: un:0x%p: " 15427 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15428 un->un_retry_bp = NULL; 15429 un->un_retry_statp = NULL; 15430 } 15431 15432 SD_UPDATE_RDWR_STATS(un, bp); 15433 SD_UPDATE_PARTITION_STATS(un, bp); 15434 15435 switch (un->un_state) { 15436 case SD_STATE_SUSPENDED: 15437 /* 15438 * Notify any threads waiting in sd_ddi_suspend() that 15439 * a command completion has occurred. 15440 */ 15441 cv_broadcast(&un->un_disk_busy_cv); 15442 break; 15443 default: 15444 sd_start_cmds(un, NULL); 15445 break; 15446 } 15447 15448 /* Return this command up the iodone chain to its originator. */ 15449 mutex_exit(SD_MUTEX(un)); 15450 15451 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15452 xp->xb_pktp = NULL; 15453 15454 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15455 15456 ASSERT(!mutex_owned(SD_MUTEX(un))); 15457 mutex_enter(SD_MUTEX(un)); 15458 15459 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15460 } 15461 15462 15463 /* 15464 * Function: sd_return_failed_command 15465 * 15466 * Description: Command completion when an error occurred. 15467 * 15468 * Context: May be called from interrupt context 15469 */ 15470 15471 static void 15472 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15473 { 15474 ASSERT(bp != NULL); 15475 ASSERT(un != NULL); 15476 ASSERT(mutex_owned(SD_MUTEX(un))); 15477 15478 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15479 "sd_return_failed_command: entry\n"); 15480 15481 /* 15482 * b_resid could already be nonzero due to a partial data 15483 * transfer, so do not change it here. 15484 */ 15485 SD_BIOERROR(bp, errcode); 15486 15487 sd_return_command(un, bp); 15488 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15489 "sd_return_failed_command: exit\n"); 15490 } 15491 15492 15493 /* 15494 * Function: sd_return_failed_command_no_restart 15495 * 15496 * Description: Same as sd_return_failed_command, but ensures that no 15497 * call back into sd_start_cmds will be issued. 15498 * 15499 * Context: May be called from interrupt context 15500 */ 15501 15502 static void 15503 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15504 int errcode) 15505 { 15506 struct sd_xbuf *xp; 15507 15508 ASSERT(bp != NULL); 15509 ASSERT(un != NULL); 15510 ASSERT(mutex_owned(SD_MUTEX(un))); 15511 xp = SD_GET_XBUF(bp); 15512 ASSERT(xp != NULL); 15513 ASSERT(errcode != 0); 15514 15515 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15516 "sd_return_failed_command_no_restart: entry\n"); 15517 15518 /* 15519 * b_resid could already be nonzero due to a partial data 15520 * transfer, so do not change it here. 15521 */ 15522 SD_BIOERROR(bp, errcode); 15523 15524 /* 15525 * If this is the failfast bp, clear it. This can happen if the 15526 * failfast bp encounterd a fatal error when we attempted to 15527 * re-try it (such as a scsi_transport(9F) failure). However 15528 * we should NOT be in an active failfast state if the failfast 15529 * bp is not NULL. 15530 */ 15531 if (bp == un->un_failfast_bp) { 15532 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15533 un->un_failfast_bp = NULL; 15534 } 15535 15536 if (bp == un->un_retry_bp) { 15537 /* 15538 * This command was retried one or more times. Show that we are 15539 * done with it, and allow processing of the waitq to resume. 15540 */ 15541 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15542 "sd_return_failed_command_no_restart: " 15543 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15544 un->un_retry_bp = NULL; 15545 un->un_retry_statp = NULL; 15546 } 15547 15548 SD_UPDATE_RDWR_STATS(un, bp); 15549 SD_UPDATE_PARTITION_STATS(un, bp); 15550 15551 mutex_exit(SD_MUTEX(un)); 15552 15553 if (xp->xb_pktp != NULL) { 15554 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15555 xp->xb_pktp = NULL; 15556 } 15557 15558 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15559 15560 mutex_enter(SD_MUTEX(un)); 15561 15562 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15563 "sd_return_failed_command_no_restart: exit\n"); 15564 } 15565 15566 15567 /* 15568 * Function: sd_retry_command 15569 * 15570 * Description: queue up a command for retry, or (optionally) fail it 15571 * if retry counts are exhausted. 15572 * 15573 * Arguments: un - Pointer to the sd_lun struct for the target. 15574 * 15575 * bp - Pointer to the buf for the command to be retried. 15576 * 15577 * retry_check_flag - Flag to see which (if any) of the retry 15578 * counts should be decremented/checked. If the indicated 15579 * retry count is exhausted, then the command will not be 15580 * retried; it will be failed instead. This should use a 15581 * value equal to one of the following: 15582 * 15583 * SD_RETRIES_NOCHECK 15584 * SD_RESD_RETRIES_STANDARD 15585 * SD_RETRIES_VICTIM 15586 * 15587 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15588 * if the check should be made to see of FLAG_ISOLATE is set 15589 * in the pkt. If FLAG_ISOLATE is set, then the command is 15590 * not retried, it is simply failed. 15591 * 15592 * user_funcp - Ptr to function to call before dispatching the 15593 * command. May be NULL if no action needs to be performed. 15594 * (Primarily intended for printing messages.) 15595 * 15596 * user_arg - Optional argument to be passed along to 15597 * the user_funcp call. 15598 * 15599 * failure_code - errno return code to set in the bp if the 15600 * command is going to be failed. 15601 * 15602 * retry_delay - Retry delay interval in (clock_t) units. May 15603 * be zero which indicates that the retry should be retried 15604 * immediately (ie, without an intervening delay). 15605 * 15606 * statp - Ptr to kstat function to be updated if the command 15607 * is queued for a delayed retry. May be NULL if no kstat 15608 * update is desired. 15609 * 15610 * Context: May be called from interrupt context. 15611 */ 15612 15613 static void 15614 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15615 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15616 code), void *user_arg, int failure_code, clock_t retry_delay, 15617 void (*statp)(kstat_io_t *)) 15618 { 15619 struct sd_xbuf *xp; 15620 struct scsi_pkt *pktp; 15621 struct sd_fm_internal *sfip; 15622 15623 ASSERT(un != NULL); 15624 ASSERT(mutex_owned(SD_MUTEX(un))); 15625 ASSERT(bp != NULL); 15626 xp = SD_GET_XBUF(bp); 15627 ASSERT(xp != NULL); 15628 pktp = SD_GET_PKTP(bp); 15629 ASSERT(pktp != NULL); 15630 15631 sfip = (struct sd_fm_internal *)un->un_fm_private; 15632 ASSERT(sfip != NULL); 15633 15634 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15635 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15636 15637 /* 15638 * If we are syncing or dumping, fail the command to avoid 15639 * recursively calling back into scsi_transport(). 15640 */ 15641 if (ddi_in_panic()) { 15642 goto fail_command_no_log; 15643 } 15644 15645 /* 15646 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15647 * log an error and fail the command. 15648 */ 15649 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15650 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15651 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15652 sd_dump_memory(un, SD_LOG_IO, "CDB", 15653 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15654 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15655 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15656 goto fail_command; 15657 } 15658 15659 /* 15660 * If we are suspended, then put the command onto head of the 15661 * wait queue since we don't want to start more commands, and 15662 * clear the un_retry_bp. Next time when we are resumed, will 15663 * handle the command in the wait queue. 15664 */ 15665 switch (un->un_state) { 15666 case SD_STATE_SUSPENDED: 15667 case SD_STATE_DUMPING: 15668 bp->av_forw = un->un_waitq_headp; 15669 un->un_waitq_headp = bp; 15670 if (un->un_waitq_tailp == NULL) { 15671 un->un_waitq_tailp = bp; 15672 } 15673 if (bp == un->un_retry_bp) { 15674 un->un_retry_bp = NULL; 15675 un->un_retry_statp = NULL; 15676 } 15677 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15678 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15679 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15680 return; 15681 default: 15682 break; 15683 } 15684 15685 /* 15686 * If the caller wants us to check FLAG_ISOLATE, then see if that 15687 * is set; if it is then we do not want to retry the command. 15688 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15689 */ 15690 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15691 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15692 goto fail_command; 15693 } 15694 } 15695 15696 15697 /* 15698 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15699 * command timeout or a selection timeout has occurred. This means 15700 * that we were unable to establish an kind of communication with 15701 * the target, and subsequent retries and/or commands are likely 15702 * to encounter similar results and take a long time to complete. 15703 * 15704 * If this is a failfast error condition, we need to update the 15705 * failfast state, even if this bp does not have B_FAILFAST set. 15706 */ 15707 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15708 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15709 ASSERT(un->un_failfast_bp == NULL); 15710 /* 15711 * If we are already in the active failfast state, and 15712 * another failfast error condition has been detected, 15713 * then fail this command if it has B_FAILFAST set. 15714 * If B_FAILFAST is clear, then maintain the legacy 15715 * behavior of retrying heroically, even tho this will 15716 * take a lot more time to fail the command. 15717 */ 15718 if (bp->b_flags & B_FAILFAST) { 15719 goto fail_command; 15720 } 15721 } else { 15722 /* 15723 * We're not in the active failfast state, but we 15724 * have a failfast error condition, so we must begin 15725 * transition to the next state. We do this regardless 15726 * of whether or not this bp has B_FAILFAST set. 15727 */ 15728 if (un->un_failfast_bp == NULL) { 15729 /* 15730 * This is the first bp to meet a failfast 15731 * condition so save it on un_failfast_bp & 15732 * do normal retry processing. Do not enter 15733 * active failfast state yet. This marks 15734 * entry into the "failfast pending" state. 15735 */ 15736 un->un_failfast_bp = bp; 15737 15738 } else if (un->un_failfast_bp == bp) { 15739 /* 15740 * This is the second time *this* bp has 15741 * encountered a failfast error condition, 15742 * so enter active failfast state & flush 15743 * queues as appropriate. 15744 */ 15745 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15746 un->un_failfast_bp = NULL; 15747 sd_failfast_flushq(un); 15748 15749 /* 15750 * Fail this bp now if B_FAILFAST set; 15751 * otherwise continue with retries. (It would 15752 * be pretty ironic if this bp succeeded on a 15753 * subsequent retry after we just flushed all 15754 * the queues). 15755 */ 15756 if (bp->b_flags & B_FAILFAST) { 15757 goto fail_command; 15758 } 15759 15760 #if !defined(lint) && !defined(__lint) 15761 } else { 15762 /* 15763 * If neither of the preceeding conditionals 15764 * was true, it means that there is some 15765 * *other* bp that has met an inital failfast 15766 * condition and is currently either being 15767 * retried or is waiting to be retried. In 15768 * that case we should perform normal retry 15769 * processing on *this* bp, since there is a 15770 * chance that the current failfast condition 15771 * is transient and recoverable. If that does 15772 * not turn out to be the case, then retries 15773 * will be cleared when the wait queue is 15774 * flushed anyway. 15775 */ 15776 #endif 15777 } 15778 } 15779 } else { 15780 /* 15781 * SD_RETRIES_FAILFAST is clear, which indicates that we 15782 * likely were able to at least establish some level of 15783 * communication with the target and subsequent commands 15784 * and/or retries are likely to get through to the target, 15785 * In this case we want to be aggressive about clearing 15786 * the failfast state. Note that this does not affect 15787 * the "failfast pending" condition. 15788 */ 15789 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15790 } 15791 15792 15793 /* 15794 * Check the specified retry count to see if we can still do 15795 * any retries with this pkt before we should fail it. 15796 */ 15797 switch (retry_check_flag & SD_RETRIES_MASK) { 15798 case SD_RETRIES_VICTIM: 15799 /* 15800 * Check the victim retry count. If exhausted, then fall 15801 * thru & check against the standard retry count. 15802 */ 15803 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15804 /* Increment count & proceed with the retry */ 15805 xp->xb_victim_retry_count++; 15806 break; 15807 } 15808 /* Victim retries exhausted, fall back to std. retries... */ 15809 /* FALLTHRU */ 15810 15811 case SD_RETRIES_STANDARD: 15812 if (xp->xb_retry_count >= un->un_retry_count) { 15813 /* Retries exhausted, fail the command */ 15814 SD_TRACE(SD_LOG_IO_CORE, un, 15815 "sd_retry_command: retries exhausted!\n"); 15816 /* 15817 * update b_resid for failed SCMD_READ & SCMD_WRITE 15818 * commands with nonzero pkt_resid. 15819 */ 15820 if ((pktp->pkt_reason == CMD_CMPLT) && 15821 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15822 (pktp->pkt_resid != 0)) { 15823 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15824 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15825 SD_UPDATE_B_RESID(bp, pktp); 15826 } 15827 } 15828 goto fail_command; 15829 } 15830 xp->xb_retry_count++; 15831 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15832 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15833 break; 15834 15835 case SD_RETRIES_UA: 15836 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15837 /* Retries exhausted, fail the command */ 15838 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15839 "Unit Attention retries exhausted. " 15840 "Check the target.\n"); 15841 goto fail_command; 15842 } 15843 xp->xb_ua_retry_count++; 15844 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15845 "sd_retry_command: retry count:%d\n", 15846 xp->xb_ua_retry_count); 15847 break; 15848 15849 case SD_RETRIES_BUSY: 15850 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15851 /* Retries exhausted, fail the command */ 15852 SD_TRACE(SD_LOG_IO_CORE, un, 15853 "sd_retry_command: retries exhausted!\n"); 15854 goto fail_command; 15855 } 15856 xp->xb_retry_count++; 15857 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15858 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15859 break; 15860 15861 case SD_RETRIES_NOCHECK: 15862 default: 15863 /* No retry count to check. Just proceed with the retry */ 15864 break; 15865 } 15866 15867 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15868 15869 /* 15870 * If this is a non-USCSI command being retried 15871 * during execution last time, we should post an ereport with 15872 * driver-assessment of the value "retry". 15873 * For partial DMA, request sense and STATUS_QFULL, there are no 15874 * hardware errors, we bypass ereport posting. 15875 */ 15876 if (failure_code != 0) { 15877 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15878 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15879 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15880 } 15881 } 15882 15883 /* 15884 * If we were given a zero timeout, we must attempt to retry the 15885 * command immediately (ie, without a delay). 15886 */ 15887 if (retry_delay == 0) { 15888 /* 15889 * Check some limiting conditions to see if we can actually 15890 * do the immediate retry. If we cannot, then we must 15891 * fall back to queueing up a delayed retry. 15892 */ 15893 if (un->un_ncmds_in_transport >= un->un_throttle) { 15894 /* 15895 * We are at the throttle limit for the target, 15896 * fall back to delayed retry. 15897 */ 15898 retry_delay = un->un_busy_timeout; 15899 statp = kstat_waitq_enter; 15900 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15901 "sd_retry_command: immed. retry hit " 15902 "throttle!\n"); 15903 } else { 15904 /* 15905 * We're clear to proceed with the immediate retry. 15906 * First call the user-provided function (if any) 15907 */ 15908 if (user_funcp != NULL) { 15909 (*user_funcp)(un, bp, user_arg, 15910 SD_IMMEDIATE_RETRY_ISSUED); 15911 #ifdef __lock_lint 15912 sd_print_incomplete_msg(un, bp, user_arg, 15913 SD_IMMEDIATE_RETRY_ISSUED); 15914 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15915 SD_IMMEDIATE_RETRY_ISSUED); 15916 sd_print_sense_failed_msg(un, bp, user_arg, 15917 SD_IMMEDIATE_RETRY_ISSUED); 15918 #endif 15919 } 15920 15921 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15922 "sd_retry_command: issuing immediate retry\n"); 15923 15924 /* 15925 * Call sd_start_cmds() to transport the command to 15926 * the target. 15927 */ 15928 sd_start_cmds(un, bp); 15929 15930 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15931 "sd_retry_command exit\n"); 15932 return; 15933 } 15934 } 15935 15936 /* 15937 * Set up to retry the command after a delay. 15938 * First call the user-provided function (if any) 15939 */ 15940 if (user_funcp != NULL) { 15941 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15942 } 15943 15944 sd_set_retry_bp(un, bp, retry_delay, statp); 15945 15946 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15947 return; 15948 15949 fail_command: 15950 15951 if (user_funcp != NULL) { 15952 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15953 } 15954 15955 fail_command_no_log: 15956 15957 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15958 "sd_retry_command: returning failed command\n"); 15959 15960 sd_return_failed_command(un, bp, failure_code); 15961 15962 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15963 } 15964 15965 15966 /* 15967 * Function: sd_set_retry_bp 15968 * 15969 * Description: Set up the given bp for retry. 15970 * 15971 * Arguments: un - ptr to associated softstate 15972 * bp - ptr to buf(9S) for the command 15973 * retry_delay - time interval before issuing retry (may be 0) 15974 * statp - optional pointer to kstat function 15975 * 15976 * Context: May be called under interrupt context 15977 */ 15978 15979 static void 15980 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15981 void (*statp)(kstat_io_t *)) 15982 { 15983 ASSERT(un != NULL); 15984 ASSERT(mutex_owned(SD_MUTEX(un))); 15985 ASSERT(bp != NULL); 15986 15987 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15988 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15989 15990 /* 15991 * Indicate that the command is being retried. This will not allow any 15992 * other commands on the wait queue to be transported to the target 15993 * until this command has been completed (success or failure). The 15994 * "retry command" is not transported to the target until the given 15995 * time delay expires, unless the user specified a 0 retry_delay. 15996 * 15997 * Note: the timeout(9F) callback routine is what actually calls 15998 * sd_start_cmds() to transport the command, with the exception of a 15999 * zero retry_delay. The only current implementor of a zero retry delay 16000 * is the case where a START_STOP_UNIT is sent to spin-up a device. 16001 */ 16002 if (un->un_retry_bp == NULL) { 16003 ASSERT(un->un_retry_statp == NULL); 16004 un->un_retry_bp = bp; 16005 16006 /* 16007 * If the user has not specified a delay the command should 16008 * be queued and no timeout should be scheduled. 16009 */ 16010 if (retry_delay == 0) { 16011 /* 16012 * Save the kstat pointer that will be used in the 16013 * call to SD_UPDATE_KSTATS() below, so that 16014 * sd_start_cmds() can correctly decrement the waitq 16015 * count when it is time to transport this command. 16016 */ 16017 un->un_retry_statp = statp; 16018 goto done; 16019 } 16020 } 16021 16022 if (un->un_retry_bp == bp) { 16023 /* 16024 * Save the kstat pointer that will be used in the call to 16025 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 16026 * correctly decrement the waitq count when it is time to 16027 * transport this command. 16028 */ 16029 un->un_retry_statp = statp; 16030 16031 /* 16032 * Schedule a timeout if: 16033 * 1) The user has specified a delay. 16034 * 2) There is not a START_STOP_UNIT callback pending. 16035 * 16036 * If no delay has been specified, then it is up to the caller 16037 * to ensure that IO processing continues without stalling. 16038 * Effectively, this means that the caller will issue the 16039 * required call to sd_start_cmds(). The START_STOP_UNIT 16040 * callback does this after the START STOP UNIT command has 16041 * completed. In either of these cases we should not schedule 16042 * a timeout callback here. Also don't schedule the timeout if 16043 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 16044 */ 16045 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 16046 (un->un_direct_priority_timeid == NULL)) { 16047 un->un_retry_timeid = 16048 timeout(sd_start_retry_command, un, retry_delay); 16049 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16050 "sd_set_retry_bp: setting timeout: un: 0x%p" 16051 " bp:0x%p un_retry_timeid:0x%p\n", 16052 un, bp, un->un_retry_timeid); 16053 } 16054 } else { 16055 /* 16056 * We only get in here if there is already another command 16057 * waiting to be retried. In this case, we just put the 16058 * given command onto the wait queue, so it can be transported 16059 * after the current retry command has completed. 16060 * 16061 * Also we have to make sure that if the command at the head 16062 * of the wait queue is the un_failfast_bp, that we do not 16063 * put ahead of it any other commands that are to be retried. 16064 */ 16065 if ((un->un_failfast_bp != NULL) && 16066 (un->un_failfast_bp == un->un_waitq_headp)) { 16067 /* 16068 * Enqueue this command AFTER the first command on 16069 * the wait queue (which is also un_failfast_bp). 16070 */ 16071 bp->av_forw = un->un_waitq_headp->av_forw; 16072 un->un_waitq_headp->av_forw = bp; 16073 if (un->un_waitq_headp == un->un_waitq_tailp) { 16074 un->un_waitq_tailp = bp; 16075 } 16076 } else { 16077 /* Enqueue this command at the head of the waitq. */ 16078 bp->av_forw = un->un_waitq_headp; 16079 un->un_waitq_headp = bp; 16080 if (un->un_waitq_tailp == NULL) { 16081 un->un_waitq_tailp = bp; 16082 } 16083 } 16084 16085 if (statp == NULL) { 16086 statp = kstat_waitq_enter; 16087 } 16088 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16089 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 16090 } 16091 16092 done: 16093 if (statp != NULL) { 16094 SD_UPDATE_KSTATS(un, statp, bp); 16095 } 16096 16097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16098 "sd_set_retry_bp: exit un:0x%p\n", un); 16099 } 16100 16101 16102 /* 16103 * Function: sd_start_retry_command 16104 * 16105 * Description: Start the command that has been waiting on the target's 16106 * retry queue. Called from timeout(9F) context after the 16107 * retry delay interval has expired. 16108 * 16109 * Arguments: arg - pointer to associated softstate for the device. 16110 * 16111 * Context: timeout(9F) thread context. May not sleep. 16112 */ 16113 16114 static void 16115 sd_start_retry_command(void *arg) 16116 { 16117 struct sd_lun *un = arg; 16118 16119 ASSERT(un != NULL); 16120 ASSERT(!mutex_owned(SD_MUTEX(un))); 16121 16122 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16123 "sd_start_retry_command: entry\n"); 16124 16125 mutex_enter(SD_MUTEX(un)); 16126 16127 un->un_retry_timeid = NULL; 16128 16129 if (un->un_retry_bp != NULL) { 16130 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16131 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 16132 un, un->un_retry_bp); 16133 sd_start_cmds(un, un->un_retry_bp); 16134 } 16135 16136 mutex_exit(SD_MUTEX(un)); 16137 16138 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16139 "sd_start_retry_command: exit\n"); 16140 } 16141 16142 /* 16143 * Function: sd_rmw_msg_print_handler 16144 * 16145 * Description: If RMW mode is enabled and warning message is triggered 16146 * print I/O count during a fixed interval. 16147 * 16148 * Arguments: arg - pointer to associated softstate for the device. 16149 * 16150 * Context: timeout(9F) thread context. May not sleep. 16151 */ 16152 static void 16153 sd_rmw_msg_print_handler(void *arg) 16154 { 16155 struct sd_lun *un = arg; 16156 16157 ASSERT(un != NULL); 16158 ASSERT(!mutex_owned(SD_MUTEX(un))); 16159 16160 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16161 "sd_rmw_msg_print_handler: entry\n"); 16162 16163 mutex_enter(SD_MUTEX(un)); 16164 16165 if (un->un_rmw_incre_count > 0) { 16166 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16167 "%"PRIu64" I/O requests are not aligned with %d disk " 16168 "sector size in %ld seconds. They are handled through " 16169 "Read Modify Write but the performance is very low!\n", 16170 un->un_rmw_incre_count, un->un_tgt_blocksize, 16171 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 16172 un->un_rmw_incre_count = 0; 16173 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 16174 un, SD_RMW_MSG_PRINT_TIMEOUT); 16175 } else { 16176 un->un_rmw_msg_timeid = NULL; 16177 } 16178 16179 mutex_exit(SD_MUTEX(un)); 16180 16181 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16182 "sd_rmw_msg_print_handler: exit\n"); 16183 } 16184 16185 /* 16186 * Function: sd_start_direct_priority_command 16187 * 16188 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 16189 * received TRAN_BUSY when we called scsi_transport() to send it 16190 * to the underlying HBA. This function is called from timeout(9F) 16191 * context after the delay interval has expired. 16192 * 16193 * Arguments: arg - pointer to associated buf(9S) to be restarted. 16194 * 16195 * Context: timeout(9F) thread context. May not sleep. 16196 */ 16197 16198 static void 16199 sd_start_direct_priority_command(void *arg) 16200 { 16201 struct buf *priority_bp = arg; 16202 struct sd_lun *un; 16203 16204 ASSERT(priority_bp != NULL); 16205 un = SD_GET_UN(priority_bp); 16206 ASSERT(un != NULL); 16207 ASSERT(!mutex_owned(SD_MUTEX(un))); 16208 16209 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16210 "sd_start_direct_priority_command: entry\n"); 16211 16212 mutex_enter(SD_MUTEX(un)); 16213 un->un_direct_priority_timeid = NULL; 16214 sd_start_cmds(un, priority_bp); 16215 mutex_exit(SD_MUTEX(un)); 16216 16217 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16218 "sd_start_direct_priority_command: exit\n"); 16219 } 16220 16221 16222 /* 16223 * Function: sd_send_request_sense_command 16224 * 16225 * Description: Sends a REQUEST SENSE command to the target 16226 * 16227 * Context: May be called from interrupt context. 16228 */ 16229 16230 static void 16231 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 16232 struct scsi_pkt *pktp) 16233 { 16234 ASSERT(bp != NULL); 16235 ASSERT(un != NULL); 16236 ASSERT(mutex_owned(SD_MUTEX(un))); 16237 16238 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 16239 "entry: buf:0x%p\n", bp); 16240 16241 /* 16242 * If we are syncing or dumping, then fail the command to avoid a 16243 * recursive callback into scsi_transport(). Also fail the command 16244 * if we are suspended (legacy behavior). 16245 */ 16246 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 16247 (un->un_state == SD_STATE_DUMPING)) { 16248 sd_return_failed_command(un, bp, EIO); 16249 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16250 "sd_send_request_sense_command: syncing/dumping, exit\n"); 16251 return; 16252 } 16253 16254 /* 16255 * Retry the failed command and don't issue the request sense if: 16256 * 1) the sense buf is busy 16257 * 2) we have 1 or more outstanding commands on the target 16258 * (the sense data will be cleared or invalidated any way) 16259 * 16260 * Note: There could be an issue with not checking a retry limit here, 16261 * the problem is determining which retry limit to check. 16262 */ 16263 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 16264 /* Don't retry if the command is flagged as non-retryable */ 16265 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16266 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16267 NULL, NULL, 0, un->un_busy_timeout, 16268 kstat_waitq_enter); 16269 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16270 "sd_send_request_sense_command: " 16271 "at full throttle, retrying exit\n"); 16272 } else { 16273 sd_return_failed_command(un, bp, EIO); 16274 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16275 "sd_send_request_sense_command: " 16276 "at full throttle, non-retryable exit\n"); 16277 } 16278 return; 16279 } 16280 16281 sd_mark_rqs_busy(un, bp); 16282 sd_start_cmds(un, un->un_rqs_bp); 16283 16284 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16285 "sd_send_request_sense_command: exit\n"); 16286 } 16287 16288 16289 /* 16290 * Function: sd_mark_rqs_busy 16291 * 16292 * Description: Indicate that the request sense bp for this instance is 16293 * in use. 16294 * 16295 * Context: May be called under interrupt context 16296 */ 16297 16298 static void 16299 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16300 { 16301 struct sd_xbuf *sense_xp; 16302 16303 ASSERT(un != NULL); 16304 ASSERT(bp != NULL); 16305 ASSERT(mutex_owned(SD_MUTEX(un))); 16306 ASSERT(un->un_sense_isbusy == 0); 16307 16308 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16309 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16310 16311 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16312 ASSERT(sense_xp != NULL); 16313 16314 SD_INFO(SD_LOG_IO, un, 16315 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16316 16317 ASSERT(sense_xp->xb_pktp != NULL); 16318 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16319 == (FLAG_SENSING | FLAG_HEAD)); 16320 16321 un->un_sense_isbusy = 1; 16322 un->un_rqs_bp->b_resid = 0; 16323 sense_xp->xb_pktp->pkt_resid = 0; 16324 sense_xp->xb_pktp->pkt_reason = 0; 16325 16326 /* So we can get back the bp at interrupt time! */ 16327 sense_xp->xb_sense_bp = bp; 16328 16329 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16330 16331 /* 16332 * Mark this buf as awaiting sense data. (This is already set in 16333 * the pkt_flags for the RQS packet.) 16334 */ 16335 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16336 16337 /* Request sense down same path */ 16338 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16339 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16340 sense_xp->xb_pktp->pkt_path_instance = 16341 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16342 16343 sense_xp->xb_retry_count = 0; 16344 sense_xp->xb_victim_retry_count = 0; 16345 sense_xp->xb_ua_retry_count = 0; 16346 sense_xp->xb_nr_retry_count = 0; 16347 sense_xp->xb_dma_resid = 0; 16348 16349 /* Clean up the fields for auto-request sense */ 16350 sense_xp->xb_sense_status = 0; 16351 sense_xp->xb_sense_state = 0; 16352 sense_xp->xb_sense_resid = 0; 16353 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16354 16355 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16356 } 16357 16358 16359 /* 16360 * Function: sd_mark_rqs_idle 16361 * 16362 * Description: SD_MUTEX must be held continuously through this routine 16363 * to prevent reuse of the rqs struct before the caller can 16364 * complete it's processing. 16365 * 16366 * Return Code: Pointer to the RQS buf 16367 * 16368 * Context: May be called under interrupt context 16369 */ 16370 16371 static struct buf * 16372 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16373 { 16374 struct buf *bp; 16375 ASSERT(un != NULL); 16376 ASSERT(sense_xp != NULL); 16377 ASSERT(mutex_owned(SD_MUTEX(un))); 16378 ASSERT(un->un_sense_isbusy != 0); 16379 16380 un->un_sense_isbusy = 0; 16381 bp = sense_xp->xb_sense_bp; 16382 sense_xp->xb_sense_bp = NULL; 16383 16384 /* This pkt is no longer interested in getting sense data */ 16385 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16386 16387 return (bp); 16388 } 16389 16390 16391 16392 /* 16393 * Function: sd_alloc_rqs 16394 * 16395 * Description: Set up the unit to receive auto request sense data 16396 * 16397 * Return Code: DDI_SUCCESS or DDI_FAILURE 16398 * 16399 * Context: Called under attach(9E) context 16400 */ 16401 16402 static int 16403 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16404 { 16405 struct sd_xbuf *xp; 16406 16407 ASSERT(un != NULL); 16408 ASSERT(!mutex_owned(SD_MUTEX(un))); 16409 ASSERT(un->un_rqs_bp == NULL); 16410 ASSERT(un->un_rqs_pktp == NULL); 16411 16412 /* 16413 * First allocate the required buf and scsi_pkt structs, then set up 16414 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16415 */ 16416 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16417 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16418 if (un->un_rqs_bp == NULL) { 16419 return (DDI_FAILURE); 16420 } 16421 16422 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16423 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16424 16425 if (un->un_rqs_pktp == NULL) { 16426 sd_free_rqs(un); 16427 return (DDI_FAILURE); 16428 } 16429 16430 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16431 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16432 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16433 16434 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16435 16436 /* Set up the other needed members in the ARQ scsi_pkt. */ 16437 un->un_rqs_pktp->pkt_comp = sdintr; 16438 un->un_rqs_pktp->pkt_time = sd_io_time; 16439 un->un_rqs_pktp->pkt_flags |= 16440 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16441 16442 /* 16443 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16444 * provide any intpkt, destroypkt routines as we take care of 16445 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16446 */ 16447 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16448 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16449 xp->xb_pktp = un->un_rqs_pktp; 16450 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16451 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16452 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16453 16454 /* 16455 * Save the pointer to the request sense private bp so it can 16456 * be retrieved in sdintr. 16457 */ 16458 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16459 ASSERT(un->un_rqs_bp->b_private == xp); 16460 16461 /* 16462 * See if the HBA supports auto-request sense for the specified 16463 * target/lun. If it does, then try to enable it (if not already 16464 * enabled). 16465 * 16466 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16467 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16468 * return success. However, in both of these cases ARQ is always 16469 * enabled and scsi_ifgetcap will always return true. The best approach 16470 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16471 * 16472 * The 3rd case is the HBA (adp) always return enabled on 16473 * scsi_ifgetgetcap even when it's not enable, the best approach 16474 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16475 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16476 */ 16477 16478 if (un->un_f_is_fibre == TRUE) { 16479 un->un_f_arq_enabled = TRUE; 16480 } else { 16481 #if defined(__i386) || defined(__amd64) 16482 /* 16483 * Circumvent the Adaptec bug, remove this code when 16484 * the bug is fixed 16485 */ 16486 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16487 #endif 16488 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16489 case 0: 16490 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16491 "sd_alloc_rqs: HBA supports ARQ\n"); 16492 /* 16493 * ARQ is supported by this HBA but currently is not 16494 * enabled. Attempt to enable it and if successful then 16495 * mark this instance as ARQ enabled. 16496 */ 16497 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16498 == 1) { 16499 /* Successfully enabled ARQ in the HBA */ 16500 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16501 "sd_alloc_rqs: ARQ enabled\n"); 16502 un->un_f_arq_enabled = TRUE; 16503 } else { 16504 /* Could not enable ARQ in the HBA */ 16505 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16506 "sd_alloc_rqs: failed ARQ enable\n"); 16507 un->un_f_arq_enabled = FALSE; 16508 } 16509 break; 16510 case 1: 16511 /* 16512 * ARQ is supported by this HBA and is already enabled. 16513 * Just mark ARQ as enabled for this instance. 16514 */ 16515 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16516 "sd_alloc_rqs: ARQ already enabled\n"); 16517 un->un_f_arq_enabled = TRUE; 16518 break; 16519 default: 16520 /* 16521 * ARQ is not supported by this HBA; disable it for this 16522 * instance. 16523 */ 16524 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16525 "sd_alloc_rqs: HBA does not support ARQ\n"); 16526 un->un_f_arq_enabled = FALSE; 16527 break; 16528 } 16529 } 16530 16531 return (DDI_SUCCESS); 16532 } 16533 16534 16535 /* 16536 * Function: sd_free_rqs 16537 * 16538 * Description: Cleanup for the pre-instance RQS command. 16539 * 16540 * Context: Kernel thread context 16541 */ 16542 16543 static void 16544 sd_free_rqs(struct sd_lun *un) 16545 { 16546 ASSERT(un != NULL); 16547 16548 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16549 16550 /* 16551 * If consistent memory is bound to a scsi_pkt, the pkt 16552 * has to be destroyed *before* freeing the consistent memory. 16553 * Don't change the sequence of this operations. 16554 * scsi_destroy_pkt() might access memory, which isn't allowed, 16555 * after it was freed in scsi_free_consistent_buf(). 16556 */ 16557 if (un->un_rqs_pktp != NULL) { 16558 scsi_destroy_pkt(un->un_rqs_pktp); 16559 un->un_rqs_pktp = NULL; 16560 } 16561 16562 if (un->un_rqs_bp != NULL) { 16563 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16564 if (xp != NULL) { 16565 kmem_free(xp, sizeof (struct sd_xbuf)); 16566 } 16567 scsi_free_consistent_buf(un->un_rqs_bp); 16568 un->un_rqs_bp = NULL; 16569 } 16570 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16571 } 16572 16573 16574 16575 /* 16576 * Function: sd_reduce_throttle 16577 * 16578 * Description: Reduces the maximum # of outstanding commands on a 16579 * target to the current number of outstanding commands. 16580 * Queues a tiemout(9F) callback to restore the limit 16581 * after a specified interval has elapsed. 16582 * Typically used when we get a TRAN_BUSY return code 16583 * back from scsi_transport(). 16584 * 16585 * Arguments: un - ptr to the sd_lun softstate struct 16586 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16587 * 16588 * Context: May be called from interrupt context 16589 */ 16590 16591 static void 16592 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16593 { 16594 ASSERT(un != NULL); 16595 ASSERT(mutex_owned(SD_MUTEX(un))); 16596 ASSERT(un->un_ncmds_in_transport >= 0); 16597 16598 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16599 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16600 un, un->un_throttle, un->un_ncmds_in_transport); 16601 16602 if (un->un_throttle > 1) { 16603 if (un->un_f_use_adaptive_throttle == TRUE) { 16604 switch (throttle_type) { 16605 case SD_THROTTLE_TRAN_BUSY: 16606 if (un->un_busy_throttle == 0) { 16607 un->un_busy_throttle = un->un_throttle; 16608 } 16609 break; 16610 case SD_THROTTLE_QFULL: 16611 un->un_busy_throttle = 0; 16612 break; 16613 default: 16614 ASSERT(FALSE); 16615 } 16616 16617 if (un->un_ncmds_in_transport > 0) { 16618 un->un_throttle = un->un_ncmds_in_transport; 16619 } 16620 16621 } else { 16622 if (un->un_ncmds_in_transport == 0) { 16623 un->un_throttle = 1; 16624 } else { 16625 un->un_throttle = un->un_ncmds_in_transport; 16626 } 16627 } 16628 } 16629 16630 /* Reschedule the timeout if none is currently active */ 16631 if (un->un_reset_throttle_timeid == NULL) { 16632 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16633 un, SD_THROTTLE_RESET_INTERVAL); 16634 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16635 "sd_reduce_throttle: timeout scheduled!\n"); 16636 } 16637 16638 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16639 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16640 } 16641 16642 16643 16644 /* 16645 * Function: sd_restore_throttle 16646 * 16647 * Description: Callback function for timeout(9F). Resets the current 16648 * value of un->un_throttle to its default. 16649 * 16650 * Arguments: arg - pointer to associated softstate for the device. 16651 * 16652 * Context: May be called from interrupt context 16653 */ 16654 16655 static void 16656 sd_restore_throttle(void *arg) 16657 { 16658 struct sd_lun *un = arg; 16659 16660 ASSERT(un != NULL); 16661 ASSERT(!mutex_owned(SD_MUTEX(un))); 16662 16663 mutex_enter(SD_MUTEX(un)); 16664 16665 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16666 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16667 16668 un->un_reset_throttle_timeid = NULL; 16669 16670 if (un->un_f_use_adaptive_throttle == TRUE) { 16671 /* 16672 * If un_busy_throttle is nonzero, then it contains the 16673 * value that un_throttle was when we got a TRAN_BUSY back 16674 * from scsi_transport(). We want to revert back to this 16675 * value. 16676 * 16677 * In the QFULL case, the throttle limit will incrementally 16678 * increase until it reaches max throttle. 16679 */ 16680 if (un->un_busy_throttle > 0) { 16681 un->un_throttle = un->un_busy_throttle; 16682 un->un_busy_throttle = 0; 16683 } else { 16684 /* 16685 * increase throttle by 10% open gate slowly, schedule 16686 * another restore if saved throttle has not been 16687 * reached 16688 */ 16689 short throttle; 16690 if (sd_qfull_throttle_enable) { 16691 throttle = un->un_throttle + 16692 max((un->un_throttle / 10), 1); 16693 un->un_throttle = 16694 (throttle < un->un_saved_throttle) ? 16695 throttle : un->un_saved_throttle; 16696 if (un->un_throttle < un->un_saved_throttle) { 16697 un->un_reset_throttle_timeid = 16698 timeout(sd_restore_throttle, 16699 un, 16700 SD_QFULL_THROTTLE_RESET_INTERVAL); 16701 } 16702 } 16703 } 16704 16705 /* 16706 * If un_throttle has fallen below the low-water mark, we 16707 * restore the maximum value here (and allow it to ratchet 16708 * down again if necessary). 16709 */ 16710 if (un->un_throttle < un->un_min_throttle) { 16711 un->un_throttle = un->un_saved_throttle; 16712 } 16713 } else { 16714 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16715 "restoring limit from 0x%x to 0x%x\n", 16716 un->un_throttle, un->un_saved_throttle); 16717 un->un_throttle = un->un_saved_throttle; 16718 } 16719 16720 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16721 "sd_restore_throttle: calling sd_start_cmds!\n"); 16722 16723 sd_start_cmds(un, NULL); 16724 16725 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16726 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16727 un, un->un_throttle); 16728 16729 mutex_exit(SD_MUTEX(un)); 16730 16731 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16732 } 16733 16734 /* 16735 * Function: sdrunout 16736 * 16737 * Description: Callback routine for scsi_init_pkt when a resource allocation 16738 * fails. 16739 * 16740 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16741 * soft state instance. 16742 * 16743 * Return Code: The scsi_init_pkt routine allows for the callback function to 16744 * return a 0 indicating the callback should be rescheduled or a 1 16745 * indicating not to reschedule. This routine always returns 1 16746 * because the driver always provides a callback function to 16747 * scsi_init_pkt. This results in a callback always being scheduled 16748 * (via the scsi_init_pkt callback implementation) if a resource 16749 * failure occurs. 16750 * 16751 * Context: This callback function may not block or call routines that block 16752 * 16753 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16754 * request persisting at the head of the list which cannot be 16755 * satisfied even after multiple retries. In the future the driver 16756 * may implement some time of maximum runout count before failing 16757 * an I/O. 16758 */ 16759 16760 static int 16761 sdrunout(caddr_t arg) 16762 { 16763 struct sd_lun *un = (struct sd_lun *)arg; 16764 16765 ASSERT(un != NULL); 16766 ASSERT(!mutex_owned(SD_MUTEX(un))); 16767 16768 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16769 16770 mutex_enter(SD_MUTEX(un)); 16771 sd_start_cmds(un, NULL); 16772 mutex_exit(SD_MUTEX(un)); 16773 /* 16774 * This callback routine always returns 1 (i.e. do not reschedule) 16775 * because we always specify sdrunout as the callback handler for 16776 * scsi_init_pkt inside the call to sd_start_cmds. 16777 */ 16778 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16779 return (1); 16780 } 16781 16782 16783 /* 16784 * Function: sdintr 16785 * 16786 * Description: Completion callback routine for scsi_pkt(9S) structs 16787 * sent to the HBA driver via scsi_transport(9F). 16788 * 16789 * Context: Interrupt context 16790 */ 16791 16792 static void 16793 sdintr(struct scsi_pkt *pktp) 16794 { 16795 struct buf *bp; 16796 struct sd_xbuf *xp; 16797 struct sd_lun *un; 16798 size_t actual_len; 16799 sd_ssc_t *sscp; 16800 16801 ASSERT(pktp != NULL); 16802 bp = (struct buf *)pktp->pkt_private; 16803 ASSERT(bp != NULL); 16804 xp = SD_GET_XBUF(bp); 16805 ASSERT(xp != NULL); 16806 ASSERT(xp->xb_pktp != NULL); 16807 un = SD_GET_UN(bp); 16808 ASSERT(un != NULL); 16809 ASSERT(!mutex_owned(SD_MUTEX(un))); 16810 16811 #ifdef SD_FAULT_INJECTION 16812 16813 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16814 /* SD FaultInjection */ 16815 sd_faultinjection(pktp); 16816 16817 #endif /* SD_FAULT_INJECTION */ 16818 16819 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16820 " xp:0x%p, un:0x%p\n", bp, xp, un); 16821 16822 mutex_enter(SD_MUTEX(un)); 16823 16824 ASSERT(un->un_fm_private != NULL); 16825 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16826 ASSERT(sscp != NULL); 16827 16828 /* Reduce the count of the #commands currently in transport */ 16829 un->un_ncmds_in_transport--; 16830 ASSERT(un->un_ncmds_in_transport >= 0); 16831 16832 /* Increment counter to indicate that the callback routine is active */ 16833 un->un_in_callback++; 16834 16835 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16836 16837 #ifdef SDDEBUG 16838 if (bp == un->un_retry_bp) { 16839 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16840 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16841 un, un->un_retry_bp, un->un_ncmds_in_transport); 16842 } 16843 #endif 16844 16845 /* 16846 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16847 * state if needed. 16848 */ 16849 if (pktp->pkt_reason == CMD_DEV_GONE) { 16850 /* Prevent multiple console messages for the same failure. */ 16851 if (un->un_last_pkt_reason != CMD_DEV_GONE) { 16852 un->un_last_pkt_reason = CMD_DEV_GONE; 16853 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16854 "Command failed to complete...Device is gone\n"); 16855 } 16856 if (un->un_mediastate != DKIO_DEV_GONE) { 16857 un->un_mediastate = DKIO_DEV_GONE; 16858 cv_broadcast(&un->un_state_cv); 16859 } 16860 /* 16861 * If the command happens to be the REQUEST SENSE command, 16862 * free up the rqs buf and fail the original command. 16863 */ 16864 if (bp == un->un_rqs_bp) { 16865 bp = sd_mark_rqs_idle(un, xp); 16866 } 16867 sd_return_failed_command(un, bp, EIO); 16868 goto exit; 16869 } 16870 16871 if (pktp->pkt_state & STATE_XARQ_DONE) { 16872 SD_TRACE(SD_LOG_COMMON, un, 16873 "sdintr: extra sense data received. pkt=%p\n", pktp); 16874 } 16875 16876 /* 16877 * First see if the pkt has auto-request sense data with it.... 16878 * Look at the packet state first so we don't take a performance 16879 * hit looking at the arq enabled flag unless absolutely necessary. 16880 */ 16881 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16882 (un->un_f_arq_enabled == TRUE)) { 16883 /* 16884 * The HBA did an auto request sense for this command so check 16885 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16886 * driver command that should not be retried. 16887 */ 16888 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16889 /* 16890 * Save the relevant sense info into the xp for the 16891 * original cmd. 16892 */ 16893 struct scsi_arq_status *asp; 16894 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16895 xp->xb_sense_status = 16896 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16897 xp->xb_sense_state = asp->sts_rqpkt_state; 16898 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16899 if (pktp->pkt_state & STATE_XARQ_DONE) { 16900 actual_len = MAX_SENSE_LENGTH - 16901 xp->xb_sense_resid; 16902 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16903 MAX_SENSE_LENGTH); 16904 } else { 16905 if (xp->xb_sense_resid > SENSE_LENGTH) { 16906 actual_len = MAX_SENSE_LENGTH - 16907 xp->xb_sense_resid; 16908 } else { 16909 actual_len = SENSE_LENGTH - 16910 xp->xb_sense_resid; 16911 } 16912 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16913 if ((((struct uscsi_cmd *) 16914 (xp->xb_pktinfo))->uscsi_rqlen) > 16915 actual_len) { 16916 xp->xb_sense_resid = 16917 (((struct uscsi_cmd *) 16918 (xp->xb_pktinfo))-> 16919 uscsi_rqlen) - actual_len; 16920 } else { 16921 xp->xb_sense_resid = 0; 16922 } 16923 } 16924 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16925 SENSE_LENGTH); 16926 } 16927 16928 /* fail the command */ 16929 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16930 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16931 sd_return_failed_command(un, bp, EIO); 16932 goto exit; 16933 } 16934 16935 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16936 /* 16937 * We want to either retry or fail this command, so free 16938 * the DMA resources here. If we retry the command then 16939 * the DMA resources will be reallocated in sd_start_cmds(). 16940 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16941 * causes the *entire* transfer to start over again from the 16942 * beginning of the request, even for PARTIAL chunks that 16943 * have already transferred successfully. 16944 */ 16945 if ((un->un_f_is_fibre == TRUE) && 16946 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16947 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16948 scsi_dmafree(pktp); 16949 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16950 } 16951 #endif 16952 16953 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16954 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16955 16956 sd_handle_auto_request_sense(un, bp, xp, pktp); 16957 goto exit; 16958 } 16959 16960 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16961 if (pktp->pkt_flags & FLAG_SENSING) { 16962 /* This pktp is from the unit's REQUEST_SENSE command */ 16963 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16964 "sdintr: sd_handle_request_sense\n"); 16965 sd_handle_request_sense(un, bp, xp, pktp); 16966 goto exit; 16967 } 16968 16969 /* 16970 * Check to see if the command successfully completed as requested; 16971 * this is the most common case (and also the hot performance path). 16972 * 16973 * Requirements for successful completion are: 16974 * pkt_reason is CMD_CMPLT and packet status is status good. 16975 * In addition: 16976 * - A residual of zero indicates successful completion no matter what 16977 * the command is. 16978 * - If the residual is not zero and the command is not a read or 16979 * write, then it's still defined as successful completion. In other 16980 * words, if the command is a read or write the residual must be 16981 * zero for successful completion. 16982 * - If the residual is not zero and the command is a read or 16983 * write, and it's a USCSICMD, then it's still defined as 16984 * successful completion. 16985 */ 16986 if ((pktp->pkt_reason == CMD_CMPLT) && 16987 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16988 16989 /* 16990 * Since this command is returned with a good status, we 16991 * can reset the count for Sonoma failover. 16992 */ 16993 un->un_sonoma_failure_count = 0; 16994 16995 /* 16996 * Return all USCSI commands on good status 16997 */ 16998 if (pktp->pkt_resid == 0) { 16999 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17000 "sdintr: returning command for resid == 0\n"); 17001 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 17002 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 17003 SD_UPDATE_B_RESID(bp, pktp); 17004 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17005 "sdintr: returning command for resid != 0\n"); 17006 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17007 SD_UPDATE_B_RESID(bp, pktp); 17008 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17009 "sdintr: returning uscsi command\n"); 17010 } else { 17011 goto not_successful; 17012 } 17013 sd_return_command(un, bp); 17014 17015 /* 17016 * Decrement counter to indicate that the callback routine 17017 * is done. 17018 */ 17019 un->un_in_callback--; 17020 ASSERT(un->un_in_callback >= 0); 17021 mutex_exit(SD_MUTEX(un)); 17022 17023 return; 17024 } 17025 17026 not_successful: 17027 17028 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 17029 /* 17030 * The following is based upon knowledge of the underlying transport 17031 * and its use of DMA resources. This code should be removed when 17032 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 17033 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 17034 * and sd_start_cmds(). 17035 * 17036 * Free any DMA resources associated with this command if there 17037 * is a chance it could be retried or enqueued for later retry. 17038 * If we keep the DMA binding then mpxio cannot reissue the 17039 * command on another path whenever a path failure occurs. 17040 * 17041 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 17042 * causes the *entire* transfer to start over again from the 17043 * beginning of the request, even for PARTIAL chunks that 17044 * have already transferred successfully. 17045 * 17046 * This is only done for non-uscsi commands (and also skipped for the 17047 * driver's internal RQS command). Also just do this for Fibre Channel 17048 * devices as these are the only ones that support mpxio. 17049 */ 17050 if ((un->un_f_is_fibre == TRUE) && 17051 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 17052 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 17053 scsi_dmafree(pktp); 17054 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 17055 } 17056 #endif 17057 17058 /* 17059 * The command did not successfully complete as requested so check 17060 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 17061 * driver command that should not be retried so just return. If 17062 * FLAG_DIAGNOSE is not set the error will be processed below. 17063 */ 17064 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 17065 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17066 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 17067 /* 17068 * Issue a request sense if a check condition caused the error 17069 * (we handle the auto request sense case above), otherwise 17070 * just fail the command. 17071 */ 17072 if ((pktp->pkt_reason == CMD_CMPLT) && 17073 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 17074 sd_send_request_sense_command(un, bp, pktp); 17075 } else { 17076 sd_return_failed_command(un, bp, EIO); 17077 } 17078 goto exit; 17079 } 17080 17081 /* 17082 * The command did not successfully complete as requested so process 17083 * the error, retry, and/or attempt recovery. 17084 */ 17085 switch (pktp->pkt_reason) { 17086 case CMD_CMPLT: 17087 switch (SD_GET_PKT_STATUS(pktp)) { 17088 case STATUS_GOOD: 17089 /* 17090 * The command completed successfully with a non-zero 17091 * residual 17092 */ 17093 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17094 "sdintr: STATUS_GOOD \n"); 17095 sd_pkt_status_good(un, bp, xp, pktp); 17096 break; 17097 17098 case STATUS_CHECK: 17099 case STATUS_TERMINATED: 17100 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17101 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 17102 sd_pkt_status_check_condition(un, bp, xp, pktp); 17103 break; 17104 17105 case STATUS_BUSY: 17106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17107 "sdintr: STATUS_BUSY\n"); 17108 sd_pkt_status_busy(un, bp, xp, pktp); 17109 break; 17110 17111 case STATUS_RESERVATION_CONFLICT: 17112 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17113 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 17114 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17115 break; 17116 17117 case STATUS_QFULL: 17118 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17119 "sdintr: STATUS_QFULL\n"); 17120 sd_pkt_status_qfull(un, bp, xp, pktp); 17121 break; 17122 17123 case STATUS_MET: 17124 case STATUS_INTERMEDIATE: 17125 case STATUS_SCSI2: 17126 case STATUS_INTERMEDIATE_MET: 17127 case STATUS_ACA_ACTIVE: 17128 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17129 "Unexpected SCSI status received: 0x%x\n", 17130 SD_GET_PKT_STATUS(pktp)); 17131 /* 17132 * Mark the ssc_flags when detected invalid status 17133 * code for non-USCSI command. 17134 */ 17135 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17136 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17137 0, "stat-code"); 17138 } 17139 sd_return_failed_command(un, bp, EIO); 17140 break; 17141 17142 default: 17143 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17144 "Invalid SCSI status received: 0x%x\n", 17145 SD_GET_PKT_STATUS(pktp)); 17146 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17147 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17148 0, "stat-code"); 17149 } 17150 sd_return_failed_command(un, bp, EIO); 17151 break; 17152 17153 } 17154 break; 17155 17156 case CMD_INCOMPLETE: 17157 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17158 "sdintr: CMD_INCOMPLETE\n"); 17159 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 17160 break; 17161 case CMD_TRAN_ERR: 17162 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17163 "sdintr: CMD_TRAN_ERR\n"); 17164 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 17165 break; 17166 case CMD_RESET: 17167 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17168 "sdintr: CMD_RESET \n"); 17169 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 17170 break; 17171 case CMD_ABORTED: 17172 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17173 "sdintr: CMD_ABORTED \n"); 17174 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 17175 break; 17176 case CMD_TIMEOUT: 17177 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17178 "sdintr: CMD_TIMEOUT\n"); 17179 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 17180 break; 17181 case CMD_UNX_BUS_FREE: 17182 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17183 "sdintr: CMD_UNX_BUS_FREE \n"); 17184 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 17185 break; 17186 case CMD_TAG_REJECT: 17187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17188 "sdintr: CMD_TAG_REJECT\n"); 17189 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 17190 break; 17191 default: 17192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17193 "sdintr: default\n"); 17194 /* 17195 * Mark the ssc_flags for detecting invliad pkt_reason. 17196 */ 17197 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17198 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 17199 0, "pkt-reason"); 17200 } 17201 sd_pkt_reason_default(un, bp, xp, pktp); 17202 break; 17203 } 17204 17205 exit: 17206 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 17207 17208 /* Decrement counter to indicate that the callback routine is done. */ 17209 un->un_in_callback--; 17210 ASSERT(un->un_in_callback >= 0); 17211 17212 /* 17213 * At this point, the pkt has been dispatched, ie, it is either 17214 * being re-tried or has been returned to its caller and should 17215 * not be referenced. 17216 */ 17217 17218 mutex_exit(SD_MUTEX(un)); 17219 } 17220 17221 17222 /* 17223 * Function: sd_print_incomplete_msg 17224 * 17225 * Description: Prints the error message for a CMD_INCOMPLETE error. 17226 * 17227 * Arguments: un - ptr to associated softstate for the device. 17228 * bp - ptr to the buf(9S) for the command. 17229 * arg - message string ptr 17230 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 17231 * or SD_NO_RETRY_ISSUED. 17232 * 17233 * Context: May be called under interrupt context 17234 */ 17235 17236 static void 17237 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17238 { 17239 struct scsi_pkt *pktp; 17240 char *msgp; 17241 char *cmdp = arg; 17242 17243 ASSERT(un != NULL); 17244 ASSERT(mutex_owned(SD_MUTEX(un))); 17245 ASSERT(bp != NULL); 17246 ASSERT(arg != NULL); 17247 pktp = SD_GET_PKTP(bp); 17248 ASSERT(pktp != NULL); 17249 17250 switch (code) { 17251 case SD_DELAYED_RETRY_ISSUED: 17252 case SD_IMMEDIATE_RETRY_ISSUED: 17253 msgp = "retrying"; 17254 break; 17255 case SD_NO_RETRY_ISSUED: 17256 default: 17257 msgp = "giving up"; 17258 break; 17259 } 17260 17261 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17262 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17263 "incomplete %s- %s\n", cmdp, msgp); 17264 } 17265 } 17266 17267 17268 17269 /* 17270 * Function: sd_pkt_status_good 17271 * 17272 * Description: Processing for a STATUS_GOOD code in pkt_status. 17273 * 17274 * Context: May be called under interrupt context 17275 */ 17276 17277 static void 17278 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17279 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17280 { 17281 char *cmdp; 17282 17283 ASSERT(un != NULL); 17284 ASSERT(mutex_owned(SD_MUTEX(un))); 17285 ASSERT(bp != NULL); 17286 ASSERT(xp != NULL); 17287 ASSERT(pktp != NULL); 17288 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17289 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17290 ASSERT(pktp->pkt_resid != 0); 17291 17292 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17293 17294 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17295 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17296 case SCMD_READ: 17297 cmdp = "read"; 17298 break; 17299 case SCMD_WRITE: 17300 cmdp = "write"; 17301 break; 17302 default: 17303 SD_UPDATE_B_RESID(bp, pktp); 17304 sd_return_command(un, bp); 17305 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17306 return; 17307 } 17308 17309 /* 17310 * See if we can retry the read/write, preferrably immediately. 17311 * If retries are exhaused, then sd_retry_command() will update 17312 * the b_resid count. 17313 */ 17314 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17315 cmdp, EIO, (clock_t)0, NULL); 17316 17317 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17318 } 17319 17320 17321 17322 17323 17324 /* 17325 * Function: sd_handle_request_sense 17326 * 17327 * Description: Processing for non-auto Request Sense command. 17328 * 17329 * Arguments: un - ptr to associated softstate 17330 * sense_bp - ptr to buf(9S) for the RQS command 17331 * sense_xp - ptr to the sd_xbuf for the RQS command 17332 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17333 * 17334 * Context: May be called under interrupt context 17335 */ 17336 17337 static void 17338 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17339 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17340 { 17341 struct buf *cmd_bp; /* buf for the original command */ 17342 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17343 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17344 size_t actual_len; /* actual sense data length */ 17345 17346 ASSERT(un != NULL); 17347 ASSERT(mutex_owned(SD_MUTEX(un))); 17348 ASSERT(sense_bp != NULL); 17349 ASSERT(sense_xp != NULL); 17350 ASSERT(sense_pktp != NULL); 17351 17352 /* 17353 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17354 * RQS command and not the original command. 17355 */ 17356 ASSERT(sense_pktp == un->un_rqs_pktp); 17357 ASSERT(sense_bp == un->un_rqs_bp); 17358 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17359 (FLAG_SENSING | FLAG_HEAD)); 17360 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17361 FLAG_SENSING) == FLAG_SENSING); 17362 17363 /* These are the bp, xp, and pktp for the original command */ 17364 cmd_bp = sense_xp->xb_sense_bp; 17365 cmd_xp = SD_GET_XBUF(cmd_bp); 17366 cmd_pktp = SD_GET_PKTP(cmd_bp); 17367 17368 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17369 /* 17370 * The REQUEST SENSE command failed. Release the REQUEST 17371 * SENSE command for re-use, get back the bp for the original 17372 * command, and attempt to re-try the original command if 17373 * FLAG_DIAGNOSE is not set in the original packet. 17374 */ 17375 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17376 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17377 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17378 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17379 NULL, NULL, EIO, (clock_t)0, NULL); 17380 return; 17381 } 17382 } 17383 17384 /* 17385 * Save the relevant sense info into the xp for the original cmd. 17386 * 17387 * Note: if the request sense failed the state info will be zero 17388 * as set in sd_mark_rqs_busy() 17389 */ 17390 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17391 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17392 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17393 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17394 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17395 SENSE_LENGTH)) { 17396 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17397 MAX_SENSE_LENGTH); 17398 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17399 } else { 17400 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17401 SENSE_LENGTH); 17402 if (actual_len < SENSE_LENGTH) { 17403 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17404 } else { 17405 cmd_xp->xb_sense_resid = 0; 17406 } 17407 } 17408 17409 /* 17410 * Free up the RQS command.... 17411 * NOTE: 17412 * Must do this BEFORE calling sd_validate_sense_data! 17413 * sd_validate_sense_data may return the original command in 17414 * which case the pkt will be freed and the flags can no 17415 * longer be touched. 17416 * SD_MUTEX is held through this process until the command 17417 * is dispatched based upon the sense data, so there are 17418 * no race conditions. 17419 */ 17420 (void) sd_mark_rqs_idle(un, sense_xp); 17421 17422 /* 17423 * For a retryable command see if we have valid sense data, if so then 17424 * turn it over to sd_decode_sense() to figure out the right course of 17425 * action. Just fail a non-retryable command. 17426 */ 17427 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17428 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17429 SD_SENSE_DATA_IS_VALID) { 17430 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17431 } 17432 } else { 17433 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17434 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17435 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17436 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17437 sd_return_failed_command(un, cmd_bp, EIO); 17438 } 17439 } 17440 17441 17442 17443 17444 /* 17445 * Function: sd_handle_auto_request_sense 17446 * 17447 * Description: Processing for auto-request sense information. 17448 * 17449 * Arguments: un - ptr to associated softstate 17450 * bp - ptr to buf(9S) for the command 17451 * xp - ptr to the sd_xbuf for the command 17452 * pktp - ptr to the scsi_pkt(9S) for the command 17453 * 17454 * Context: May be called under interrupt context 17455 */ 17456 17457 static void 17458 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17459 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17460 { 17461 struct scsi_arq_status *asp; 17462 size_t actual_len; 17463 17464 ASSERT(un != NULL); 17465 ASSERT(mutex_owned(SD_MUTEX(un))); 17466 ASSERT(bp != NULL); 17467 ASSERT(xp != NULL); 17468 ASSERT(pktp != NULL); 17469 ASSERT(pktp != un->un_rqs_pktp); 17470 ASSERT(bp != un->un_rqs_bp); 17471 17472 /* 17473 * For auto-request sense, we get a scsi_arq_status back from 17474 * the HBA, with the sense data in the sts_sensedata member. 17475 * The pkt_scbp of the packet points to this scsi_arq_status. 17476 */ 17477 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17478 17479 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17480 /* 17481 * The auto REQUEST SENSE failed; see if we can re-try 17482 * the original command. 17483 */ 17484 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17485 "auto request sense failed (reason=%s)\n", 17486 scsi_rname(asp->sts_rqpkt_reason)); 17487 17488 sd_reset_target(un, pktp); 17489 17490 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17491 NULL, NULL, EIO, (clock_t)0, NULL); 17492 return; 17493 } 17494 17495 /* Save the relevant sense info into the xp for the original cmd. */ 17496 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17497 xp->xb_sense_state = asp->sts_rqpkt_state; 17498 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17499 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17500 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17501 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17502 MAX_SENSE_LENGTH); 17503 } else { 17504 if (xp->xb_sense_resid > SENSE_LENGTH) { 17505 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17506 } else { 17507 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17508 } 17509 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17510 if ((((struct uscsi_cmd *) 17511 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17512 xp->xb_sense_resid = (((struct uscsi_cmd *) 17513 (xp->xb_pktinfo))->uscsi_rqlen) - 17514 actual_len; 17515 } else { 17516 xp->xb_sense_resid = 0; 17517 } 17518 } 17519 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17520 } 17521 17522 /* 17523 * See if we have valid sense data, if so then turn it over to 17524 * sd_decode_sense() to figure out the right course of action. 17525 */ 17526 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17527 SD_SENSE_DATA_IS_VALID) { 17528 sd_decode_sense(un, bp, xp, pktp); 17529 } 17530 } 17531 17532 17533 /* 17534 * Function: sd_print_sense_failed_msg 17535 * 17536 * Description: Print log message when RQS has failed. 17537 * 17538 * Arguments: un - ptr to associated softstate 17539 * bp - ptr to buf(9S) for the command 17540 * arg - generic message string ptr 17541 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17542 * or SD_NO_RETRY_ISSUED 17543 * 17544 * Context: May be called from interrupt context 17545 */ 17546 17547 static void 17548 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17549 int code) 17550 { 17551 char *msgp = arg; 17552 17553 ASSERT(un != NULL); 17554 ASSERT(mutex_owned(SD_MUTEX(un))); 17555 ASSERT(bp != NULL); 17556 17557 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17558 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17559 } 17560 } 17561 17562 17563 /* 17564 * Function: sd_validate_sense_data 17565 * 17566 * Description: Check the given sense data for validity. 17567 * If the sense data is not valid, the command will 17568 * be either failed or retried! 17569 * 17570 * Return Code: SD_SENSE_DATA_IS_INVALID 17571 * SD_SENSE_DATA_IS_VALID 17572 * 17573 * Context: May be called from interrupt context 17574 */ 17575 17576 static int 17577 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17578 size_t actual_len) 17579 { 17580 struct scsi_extended_sense *esp; 17581 struct scsi_pkt *pktp; 17582 char *msgp = NULL; 17583 sd_ssc_t *sscp; 17584 17585 ASSERT(un != NULL); 17586 ASSERT(mutex_owned(SD_MUTEX(un))); 17587 ASSERT(bp != NULL); 17588 ASSERT(bp != un->un_rqs_bp); 17589 ASSERT(xp != NULL); 17590 ASSERT(un->un_fm_private != NULL); 17591 17592 pktp = SD_GET_PKTP(bp); 17593 ASSERT(pktp != NULL); 17594 17595 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17596 ASSERT(sscp != NULL); 17597 17598 /* 17599 * Check the status of the RQS command (auto or manual). 17600 */ 17601 switch (xp->xb_sense_status & STATUS_MASK) { 17602 case STATUS_GOOD: 17603 break; 17604 17605 case STATUS_RESERVATION_CONFLICT: 17606 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17607 return (SD_SENSE_DATA_IS_INVALID); 17608 17609 case STATUS_BUSY: 17610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17611 "Busy Status on REQUEST SENSE\n"); 17612 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17613 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17614 return (SD_SENSE_DATA_IS_INVALID); 17615 17616 case STATUS_QFULL: 17617 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17618 "QFULL Status on REQUEST SENSE\n"); 17619 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17620 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17621 return (SD_SENSE_DATA_IS_INVALID); 17622 17623 case STATUS_CHECK: 17624 case STATUS_TERMINATED: 17625 msgp = "Check Condition on REQUEST SENSE\n"; 17626 goto sense_failed; 17627 17628 default: 17629 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17630 goto sense_failed; 17631 } 17632 17633 /* 17634 * See if we got the minimum required amount of sense data. 17635 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17636 * or less. 17637 */ 17638 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17639 (actual_len == 0)) { 17640 msgp = "Request Sense couldn't get sense data\n"; 17641 goto sense_failed; 17642 } 17643 17644 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17645 msgp = "Not enough sense information\n"; 17646 /* Mark the ssc_flags for detecting invalid sense data */ 17647 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17648 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17649 "sense-data"); 17650 } 17651 goto sense_failed; 17652 } 17653 17654 /* 17655 * We require the extended sense data 17656 */ 17657 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17658 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17659 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17660 static char tmp[8]; 17661 static char buf[148]; 17662 char *p = (char *)(xp->xb_sense_data); 17663 int i; 17664 17665 mutex_enter(&sd_sense_mutex); 17666 (void) strcpy(buf, "undecodable sense information:"); 17667 for (i = 0; i < actual_len; i++) { 17668 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17669 (void) strcpy(&buf[strlen(buf)], tmp); 17670 } 17671 i = strlen(buf); 17672 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17673 17674 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17675 scsi_log(SD_DEVINFO(un), sd_label, 17676 CE_WARN, buf); 17677 } 17678 mutex_exit(&sd_sense_mutex); 17679 } 17680 17681 /* Mark the ssc_flags for detecting invalid sense data */ 17682 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17683 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17684 "sense-data"); 17685 } 17686 17687 /* Note: Legacy behavior, fail the command with no retry */ 17688 sd_return_failed_command(un, bp, EIO); 17689 return (SD_SENSE_DATA_IS_INVALID); 17690 } 17691 17692 /* 17693 * Check that es_code is valid (es_class concatenated with es_code 17694 * make up the "response code" field. es_class will always be 7, so 17695 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17696 * format. 17697 */ 17698 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17699 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17700 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17701 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17702 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17703 /* Mark the ssc_flags for detecting invalid sense data */ 17704 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17705 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17706 "sense-data"); 17707 } 17708 goto sense_failed; 17709 } 17710 17711 return (SD_SENSE_DATA_IS_VALID); 17712 17713 sense_failed: 17714 /* 17715 * If the request sense failed (for whatever reason), attempt 17716 * to retry the original command. 17717 */ 17718 #if defined(__i386) || defined(__amd64) 17719 /* 17720 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17721 * sddef.h for Sparc platform, and x86 uses 1 binary 17722 * for both SCSI/FC. 17723 * The SD_RETRY_DELAY value need to be adjusted here 17724 * when SD_RETRY_DELAY change in sddef.h 17725 */ 17726 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17727 sd_print_sense_failed_msg, msgp, EIO, 17728 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17729 #else 17730 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17731 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17732 #endif 17733 17734 return (SD_SENSE_DATA_IS_INVALID); 17735 } 17736 17737 /* 17738 * Function: sd_decode_sense 17739 * 17740 * Description: Take recovery action(s) when SCSI Sense Data is received. 17741 * 17742 * Context: Interrupt context. 17743 */ 17744 17745 static void 17746 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17747 struct scsi_pkt *pktp) 17748 { 17749 uint8_t sense_key; 17750 17751 ASSERT(un != NULL); 17752 ASSERT(mutex_owned(SD_MUTEX(un))); 17753 ASSERT(bp != NULL); 17754 ASSERT(bp != un->un_rqs_bp); 17755 ASSERT(xp != NULL); 17756 ASSERT(pktp != NULL); 17757 17758 sense_key = scsi_sense_key(xp->xb_sense_data); 17759 17760 switch (sense_key) { 17761 case KEY_NO_SENSE: 17762 sd_sense_key_no_sense(un, bp, xp, pktp); 17763 break; 17764 case KEY_RECOVERABLE_ERROR: 17765 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17766 bp, xp, pktp); 17767 break; 17768 case KEY_NOT_READY: 17769 sd_sense_key_not_ready(un, xp->xb_sense_data, 17770 bp, xp, pktp); 17771 break; 17772 case KEY_MEDIUM_ERROR: 17773 case KEY_HARDWARE_ERROR: 17774 sd_sense_key_medium_or_hardware_error(un, 17775 xp->xb_sense_data, bp, xp, pktp); 17776 break; 17777 case KEY_ILLEGAL_REQUEST: 17778 sd_sense_key_illegal_request(un, bp, xp, pktp); 17779 break; 17780 case KEY_UNIT_ATTENTION: 17781 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17782 bp, xp, pktp); 17783 break; 17784 case KEY_WRITE_PROTECT: 17785 case KEY_VOLUME_OVERFLOW: 17786 case KEY_MISCOMPARE: 17787 sd_sense_key_fail_command(un, bp, xp, pktp); 17788 break; 17789 case KEY_BLANK_CHECK: 17790 sd_sense_key_blank_check(un, bp, xp, pktp); 17791 break; 17792 case KEY_ABORTED_COMMAND: 17793 sd_sense_key_aborted_command(un, bp, xp, pktp); 17794 break; 17795 case KEY_VENDOR_UNIQUE: 17796 case KEY_COPY_ABORTED: 17797 case KEY_EQUAL: 17798 case KEY_RESERVED: 17799 default: 17800 sd_sense_key_default(un, xp->xb_sense_data, 17801 bp, xp, pktp); 17802 break; 17803 } 17804 } 17805 17806 17807 /* 17808 * Function: sd_dump_memory 17809 * 17810 * Description: Debug logging routine to print the contents of a user provided 17811 * buffer. The output of the buffer is broken up into 256 byte 17812 * segments due to a size constraint of the scsi_log. 17813 * implementation. 17814 * 17815 * Arguments: un - ptr to softstate 17816 * comp - component mask 17817 * title - "title" string to preceed data when printed 17818 * data - ptr to data block to be printed 17819 * len - size of data block to be printed 17820 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17821 * 17822 * Context: May be called from interrupt context 17823 */ 17824 17825 #define SD_DUMP_MEMORY_BUF_SIZE 256 17826 17827 static char *sd_dump_format_string[] = { 17828 " 0x%02x", 17829 " %c" 17830 }; 17831 17832 static void 17833 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17834 int len, int fmt) 17835 { 17836 int i, j; 17837 int avail_count; 17838 int start_offset; 17839 int end_offset; 17840 size_t entry_len; 17841 char *bufp; 17842 char *local_buf; 17843 char *format_string; 17844 17845 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17846 17847 /* 17848 * In the debug version of the driver, this function is called from a 17849 * number of places which are NOPs in the release driver. 17850 * The debug driver therefore has additional methods of filtering 17851 * debug output. 17852 */ 17853 #ifdef SDDEBUG 17854 /* 17855 * In the debug version of the driver we can reduce the amount of debug 17856 * messages by setting sd_error_level to something other than 17857 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17858 * sd_component_mask. 17859 */ 17860 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17861 (sd_error_level != SCSI_ERR_ALL)) { 17862 return; 17863 } 17864 if (((sd_component_mask & comp) == 0) || 17865 (sd_error_level != SCSI_ERR_ALL)) { 17866 return; 17867 } 17868 #else 17869 if (sd_error_level != SCSI_ERR_ALL) { 17870 return; 17871 } 17872 #endif 17873 17874 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17875 bufp = local_buf; 17876 /* 17877 * Available length is the length of local_buf[], minus the 17878 * length of the title string, minus one for the ":", minus 17879 * one for the newline, minus one for the NULL terminator. 17880 * This gives the #bytes available for holding the printed 17881 * values from the given data buffer. 17882 */ 17883 if (fmt == SD_LOG_HEX) { 17884 format_string = sd_dump_format_string[0]; 17885 } else /* SD_LOG_CHAR */ { 17886 format_string = sd_dump_format_string[1]; 17887 } 17888 /* 17889 * Available count is the number of elements from the given 17890 * data buffer that we can fit into the available length. 17891 * This is based upon the size of the format string used. 17892 * Make one entry and find it's size. 17893 */ 17894 (void) sprintf(bufp, format_string, data[0]); 17895 entry_len = strlen(bufp); 17896 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17897 17898 j = 0; 17899 while (j < len) { 17900 bufp = local_buf; 17901 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17902 start_offset = j; 17903 17904 end_offset = start_offset + avail_count; 17905 17906 (void) sprintf(bufp, "%s:", title); 17907 bufp += strlen(bufp); 17908 for (i = start_offset; ((i < end_offset) && (j < len)); 17909 i++, j++) { 17910 (void) sprintf(bufp, format_string, data[i]); 17911 bufp += entry_len; 17912 } 17913 (void) sprintf(bufp, "\n"); 17914 17915 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17916 } 17917 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17918 } 17919 17920 /* 17921 * Function: sd_print_sense_msg 17922 * 17923 * Description: Log a message based upon the given sense data. 17924 * 17925 * Arguments: un - ptr to associated softstate 17926 * bp - ptr to buf(9S) for the command 17927 * arg - ptr to associate sd_sense_info struct 17928 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17929 * or SD_NO_RETRY_ISSUED 17930 * 17931 * Context: May be called from interrupt context 17932 */ 17933 17934 static void 17935 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17936 { 17937 struct sd_xbuf *xp; 17938 struct scsi_pkt *pktp; 17939 uint8_t *sensep; 17940 daddr_t request_blkno; 17941 diskaddr_t err_blkno; 17942 int severity; 17943 int pfa_flag; 17944 extern struct scsi_key_strings scsi_cmds[]; 17945 17946 ASSERT(un != NULL); 17947 ASSERT(mutex_owned(SD_MUTEX(un))); 17948 ASSERT(bp != NULL); 17949 xp = SD_GET_XBUF(bp); 17950 ASSERT(xp != NULL); 17951 pktp = SD_GET_PKTP(bp); 17952 ASSERT(pktp != NULL); 17953 ASSERT(arg != NULL); 17954 17955 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17956 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17957 17958 if ((code == SD_DELAYED_RETRY_ISSUED) || 17959 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17960 severity = SCSI_ERR_RETRYABLE; 17961 } 17962 17963 /* Use absolute block number for the request block number */ 17964 request_blkno = xp->xb_blkno; 17965 17966 /* 17967 * Now try to get the error block number from the sense data 17968 */ 17969 sensep = xp->xb_sense_data; 17970 17971 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17972 (uint64_t *)&err_blkno)) { 17973 /* 17974 * We retrieved the error block number from the information 17975 * portion of the sense data. 17976 * 17977 * For USCSI commands we are better off using the error 17978 * block no. as the requested block no. (This is the best 17979 * we can estimate.) 17980 */ 17981 if ((SD_IS_BUFIO(xp) == FALSE) && 17982 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17983 request_blkno = err_blkno; 17984 } 17985 } else { 17986 /* 17987 * Without the es_valid bit set (for fixed format) or an 17988 * information descriptor (for descriptor format) we cannot 17989 * be certain of the error blkno, so just use the 17990 * request_blkno. 17991 */ 17992 err_blkno = (diskaddr_t)request_blkno; 17993 } 17994 17995 /* 17996 * The following will log the buffer contents for the release driver 17997 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17998 * level is set to verbose. 17999 */ 18000 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 18001 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 18002 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 18003 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 18004 18005 if (pfa_flag == FALSE) { 18006 /* This is normally only set for USCSI */ 18007 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 18008 return; 18009 } 18010 18011 if ((SD_IS_BUFIO(xp) == TRUE) && 18012 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 18013 (severity < sd_error_level))) { 18014 return; 18015 } 18016 } 18017 /* 18018 * Check for Sonoma Failover and keep a count of how many failed I/O's 18019 */ 18020 if ((SD_IS_LSI(un)) && 18021 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 18022 (scsi_sense_asc(sensep) == 0x94) && 18023 (scsi_sense_ascq(sensep) == 0x01)) { 18024 un->un_sonoma_failure_count++; 18025 if (un->un_sonoma_failure_count > 1) { 18026 return; 18027 } 18028 } 18029 18030 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 18031 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 18032 (pktp->pkt_resid == 0))) { 18033 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 18034 request_blkno, err_blkno, scsi_cmds, 18035 (struct scsi_extended_sense *)sensep, 18036 un->un_additional_codes, NULL); 18037 } 18038 } 18039 18040 /* 18041 * Function: sd_sense_key_no_sense 18042 * 18043 * Description: Recovery action when sense data was not received. 18044 * 18045 * Context: May be called from interrupt context 18046 */ 18047 18048 static void 18049 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 18050 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18051 { 18052 struct sd_sense_info si; 18053 18054 ASSERT(un != NULL); 18055 ASSERT(mutex_owned(SD_MUTEX(un))); 18056 ASSERT(bp != NULL); 18057 ASSERT(xp != NULL); 18058 ASSERT(pktp != NULL); 18059 18060 si.ssi_severity = SCSI_ERR_FATAL; 18061 si.ssi_pfa_flag = FALSE; 18062 18063 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18064 18065 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18066 &si, EIO, (clock_t)0, NULL); 18067 } 18068 18069 18070 /* 18071 * Function: sd_sense_key_recoverable_error 18072 * 18073 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 18074 * 18075 * Context: May be called from interrupt context 18076 */ 18077 18078 static void 18079 sd_sense_key_recoverable_error(struct sd_lun *un, 18080 uint8_t *sense_datap, 18081 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18082 { 18083 struct sd_sense_info si; 18084 uint8_t asc = scsi_sense_asc(sense_datap); 18085 18086 ASSERT(un != NULL); 18087 ASSERT(mutex_owned(SD_MUTEX(un))); 18088 ASSERT(bp != NULL); 18089 ASSERT(xp != NULL); 18090 ASSERT(pktp != NULL); 18091 18092 /* 18093 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 18094 */ 18095 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 18096 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18097 si.ssi_severity = SCSI_ERR_INFO; 18098 si.ssi_pfa_flag = TRUE; 18099 } else { 18100 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18101 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 18102 si.ssi_severity = SCSI_ERR_RECOVERED; 18103 si.ssi_pfa_flag = FALSE; 18104 } 18105 18106 if (pktp->pkt_resid == 0) { 18107 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18108 sd_return_command(un, bp); 18109 return; 18110 } 18111 18112 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18113 &si, EIO, (clock_t)0, NULL); 18114 } 18115 18116 18117 18118 18119 /* 18120 * Function: sd_sense_key_not_ready 18121 * 18122 * Description: Recovery actions for a SCSI "Not Ready" sense key. 18123 * 18124 * Context: May be called from interrupt context 18125 */ 18126 18127 static void 18128 sd_sense_key_not_ready(struct sd_lun *un, 18129 uint8_t *sense_datap, 18130 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18131 { 18132 struct sd_sense_info si; 18133 uint8_t asc = scsi_sense_asc(sense_datap); 18134 uint8_t ascq = scsi_sense_ascq(sense_datap); 18135 18136 ASSERT(un != NULL); 18137 ASSERT(mutex_owned(SD_MUTEX(un))); 18138 ASSERT(bp != NULL); 18139 ASSERT(xp != NULL); 18140 ASSERT(pktp != NULL); 18141 18142 si.ssi_severity = SCSI_ERR_FATAL; 18143 si.ssi_pfa_flag = FALSE; 18144 18145 /* 18146 * Update error stats after first NOT READY error. Disks may have 18147 * been powered down and may need to be restarted. For CDROMs, 18148 * report NOT READY errors only if media is present. 18149 */ 18150 if ((ISCD(un) && (asc == 0x3A)) || 18151 (xp->xb_nr_retry_count > 0)) { 18152 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18153 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 18154 } 18155 18156 /* 18157 * Just fail if the "not ready" retry limit has been reached. 18158 */ 18159 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 18160 /* Special check for error message printing for removables. */ 18161 if (un->un_f_has_removable_media && (asc == 0x04) && 18162 (ascq >= 0x04)) { 18163 si.ssi_severity = SCSI_ERR_ALL; 18164 } 18165 goto fail_command; 18166 } 18167 18168 /* 18169 * Check the ASC and ASCQ in the sense data as needed, to determine 18170 * what to do. 18171 */ 18172 switch (asc) { 18173 case 0x04: /* LOGICAL UNIT NOT READY */ 18174 /* 18175 * disk drives that don't spin up result in a very long delay 18176 * in format without warning messages. We will log a message 18177 * if the error level is set to verbose. 18178 */ 18179 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18180 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18181 "logical unit not ready, resetting disk\n"); 18182 } 18183 18184 /* 18185 * There are different requirements for CDROMs and disks for 18186 * the number of retries. If a CD-ROM is giving this, it is 18187 * probably reading TOC and is in the process of getting 18188 * ready, so we should keep on trying for a long time to make 18189 * sure that all types of media are taken in account (for 18190 * some media the drive takes a long time to read TOC). For 18191 * disks we do not want to retry this too many times as this 18192 * can cause a long hang in format when the drive refuses to 18193 * spin up (a very common failure). 18194 */ 18195 switch (ascq) { 18196 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 18197 /* 18198 * Disk drives frequently refuse to spin up which 18199 * results in a very long hang in format without 18200 * warning messages. 18201 * 18202 * Note: This code preserves the legacy behavior of 18203 * comparing xb_nr_retry_count against zero for fibre 18204 * channel targets instead of comparing against the 18205 * un_reset_retry_count value. The reason for this 18206 * discrepancy has been so utterly lost beneath the 18207 * Sands of Time that even Indiana Jones could not 18208 * find it. 18209 */ 18210 if (un->un_f_is_fibre == TRUE) { 18211 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18212 (xp->xb_nr_retry_count > 0)) && 18213 (un->un_startstop_timeid == NULL)) { 18214 scsi_log(SD_DEVINFO(un), sd_label, 18215 CE_WARN, "logical unit not ready, " 18216 "resetting disk\n"); 18217 sd_reset_target(un, pktp); 18218 } 18219 } else { 18220 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18221 (xp->xb_nr_retry_count > 18222 un->un_reset_retry_count)) && 18223 (un->un_startstop_timeid == NULL)) { 18224 scsi_log(SD_DEVINFO(un), sd_label, 18225 CE_WARN, "logical unit not ready, " 18226 "resetting disk\n"); 18227 sd_reset_target(un, pktp); 18228 } 18229 } 18230 break; 18231 18232 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 18233 /* 18234 * If the target is in the process of becoming 18235 * ready, just proceed with the retry. This can 18236 * happen with CD-ROMs that take a long time to 18237 * read TOC after a power cycle or reset. 18238 */ 18239 goto do_retry; 18240 18241 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 18242 break; 18243 18244 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 18245 /* 18246 * Retries cannot help here so just fail right away. 18247 */ 18248 goto fail_command; 18249 18250 case 0x88: 18251 /* 18252 * Vendor-unique code for T3/T4: it indicates a 18253 * path problem in a mutipathed config, but as far as 18254 * the target driver is concerned it equates to a fatal 18255 * error, so we should just fail the command right away 18256 * (without printing anything to the console). If this 18257 * is not a T3/T4, fall thru to the default recovery 18258 * action. 18259 * T3/T4 is FC only, don't need to check is_fibre 18260 */ 18261 if (SD_IS_T3(un) || SD_IS_T4(un)) { 18262 sd_return_failed_command(un, bp, EIO); 18263 return; 18264 } 18265 /* FALLTHRU */ 18266 18267 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 18268 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 18269 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 18270 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 18271 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 18272 default: /* Possible future codes in SCSI spec? */ 18273 /* 18274 * For removable-media devices, do not retry if 18275 * ASCQ > 2 as these result mostly from USCSI commands 18276 * on MMC devices issued to check status of an 18277 * operation initiated in immediate mode. Also for 18278 * ASCQ >= 4 do not print console messages as these 18279 * mainly represent a user-initiated operation 18280 * instead of a system failure. 18281 */ 18282 if (un->un_f_has_removable_media) { 18283 si.ssi_severity = SCSI_ERR_ALL; 18284 goto fail_command; 18285 } 18286 break; 18287 } 18288 18289 /* 18290 * As part of our recovery attempt for the NOT READY 18291 * condition, we issue a START STOP UNIT command. However 18292 * we want to wait for a short delay before attempting this 18293 * as there may still be more commands coming back from the 18294 * target with the check condition. To do this we use 18295 * timeout(9F) to call sd_start_stop_unit_callback() after 18296 * the delay interval expires. (sd_start_stop_unit_callback() 18297 * dispatches sd_start_stop_unit_task(), which will issue 18298 * the actual START STOP UNIT command. The delay interval 18299 * is one-half of the delay that we will use to retry the 18300 * command that generated the NOT READY condition. 18301 * 18302 * Note that we could just dispatch sd_start_stop_unit_task() 18303 * from here and allow it to sleep for the delay interval, 18304 * but then we would be tying up the taskq thread 18305 * uncesessarily for the duration of the delay. 18306 * 18307 * Do not issue the START STOP UNIT if the current command 18308 * is already a START STOP UNIT. 18309 */ 18310 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18311 break; 18312 } 18313 18314 /* 18315 * Do not schedule the timeout if one is already pending. 18316 */ 18317 if (un->un_startstop_timeid != NULL) { 18318 SD_INFO(SD_LOG_ERROR, un, 18319 "sd_sense_key_not_ready: restart already issued to" 18320 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18321 ddi_get_instance(SD_DEVINFO(un))); 18322 break; 18323 } 18324 18325 /* 18326 * Schedule the START STOP UNIT command, then queue the command 18327 * for a retry. 18328 * 18329 * Note: A timeout is not scheduled for this retry because we 18330 * want the retry to be serial with the START_STOP_UNIT. The 18331 * retry will be started when the START_STOP_UNIT is completed 18332 * in sd_start_stop_unit_task. 18333 */ 18334 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18335 un, un->un_busy_timeout / 2); 18336 xp->xb_nr_retry_count++; 18337 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18338 return; 18339 18340 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18341 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18342 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18343 "unit does not respond to selection\n"); 18344 } 18345 break; 18346 18347 case 0x3A: /* MEDIUM NOT PRESENT */ 18348 if (sd_error_level >= SCSI_ERR_FATAL) { 18349 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18350 "Caddy not inserted in drive\n"); 18351 } 18352 18353 sr_ejected(un); 18354 un->un_mediastate = DKIO_EJECTED; 18355 /* The state has changed, inform the media watch routines */ 18356 cv_broadcast(&un->un_state_cv); 18357 /* Just fail if no media is present in the drive. */ 18358 goto fail_command; 18359 18360 default: 18361 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18362 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18363 "Unit not Ready. Additional sense code 0x%x\n", 18364 asc); 18365 } 18366 break; 18367 } 18368 18369 do_retry: 18370 18371 /* 18372 * Retry the command, as some targets may report NOT READY for 18373 * several seconds after being reset. 18374 */ 18375 xp->xb_nr_retry_count++; 18376 si.ssi_severity = SCSI_ERR_RETRYABLE; 18377 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18378 &si, EIO, un->un_busy_timeout, NULL); 18379 18380 return; 18381 18382 fail_command: 18383 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18384 sd_return_failed_command(un, bp, EIO); 18385 } 18386 18387 18388 18389 /* 18390 * Function: sd_sense_key_medium_or_hardware_error 18391 * 18392 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18393 * sense key. 18394 * 18395 * Context: May be called from interrupt context 18396 */ 18397 18398 static void 18399 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 18400 uint8_t *sense_datap, 18401 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18402 { 18403 struct sd_sense_info si; 18404 uint8_t sense_key = scsi_sense_key(sense_datap); 18405 uint8_t asc = scsi_sense_asc(sense_datap); 18406 18407 ASSERT(un != NULL); 18408 ASSERT(mutex_owned(SD_MUTEX(un))); 18409 ASSERT(bp != NULL); 18410 ASSERT(xp != NULL); 18411 ASSERT(pktp != NULL); 18412 18413 si.ssi_severity = SCSI_ERR_FATAL; 18414 si.ssi_pfa_flag = FALSE; 18415 18416 if (sense_key == KEY_MEDIUM_ERROR) { 18417 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18418 } 18419 18420 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18421 18422 if ((un->un_reset_retry_count != 0) && 18423 (xp->xb_retry_count == un->un_reset_retry_count)) { 18424 mutex_exit(SD_MUTEX(un)); 18425 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18426 if (un->un_f_allow_bus_device_reset == TRUE) { 18427 18428 boolean_t try_resetting_target = B_TRUE; 18429 18430 /* 18431 * We need to be able to handle specific ASC when we are 18432 * handling a KEY_HARDWARE_ERROR. In particular 18433 * taking the default action of resetting the target may 18434 * not be the appropriate way to attempt recovery. 18435 * Resetting a target because of a single LUN failure 18436 * victimizes all LUNs on that target. 18437 * 18438 * This is true for the LSI arrays, if an LSI 18439 * array controller returns an ASC of 0x84 (LUN Dead) we 18440 * should trust it. 18441 */ 18442 18443 if (sense_key == KEY_HARDWARE_ERROR) { 18444 switch (asc) { 18445 case 0x84: 18446 if (SD_IS_LSI(un)) { 18447 try_resetting_target = B_FALSE; 18448 } 18449 break; 18450 default: 18451 break; 18452 } 18453 } 18454 18455 if (try_resetting_target == B_TRUE) { 18456 int reset_retval = 0; 18457 if (un->un_f_lun_reset_enabled == TRUE) { 18458 SD_TRACE(SD_LOG_IO_CORE, un, 18459 "sd_sense_key_medium_or_hardware_" 18460 "error: issuing RESET_LUN\n"); 18461 reset_retval = 18462 scsi_reset(SD_ADDRESS(un), 18463 RESET_LUN); 18464 } 18465 if (reset_retval == 0) { 18466 SD_TRACE(SD_LOG_IO_CORE, un, 18467 "sd_sense_key_medium_or_hardware_" 18468 "error: issuing RESET_TARGET\n"); 18469 (void) scsi_reset(SD_ADDRESS(un), 18470 RESET_TARGET); 18471 } 18472 } 18473 } 18474 mutex_enter(SD_MUTEX(un)); 18475 } 18476 18477 /* 18478 * This really ought to be a fatal error, but we will retry anyway 18479 * as some drives report this as a spurious error. 18480 */ 18481 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18482 &si, EIO, (clock_t)0, NULL); 18483 } 18484 18485 18486 18487 /* 18488 * Function: sd_sense_key_illegal_request 18489 * 18490 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18491 * 18492 * Context: May be called from interrupt context 18493 */ 18494 18495 static void 18496 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18497 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18498 { 18499 struct sd_sense_info si; 18500 18501 ASSERT(un != NULL); 18502 ASSERT(mutex_owned(SD_MUTEX(un))); 18503 ASSERT(bp != NULL); 18504 ASSERT(xp != NULL); 18505 ASSERT(pktp != NULL); 18506 18507 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18508 18509 si.ssi_severity = SCSI_ERR_INFO; 18510 si.ssi_pfa_flag = FALSE; 18511 18512 /* Pointless to retry if the target thinks it's an illegal request */ 18513 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18514 sd_return_failed_command(un, bp, EIO); 18515 } 18516 18517 18518 18519 18520 /* 18521 * Function: sd_sense_key_unit_attention 18522 * 18523 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18524 * 18525 * Context: May be called from interrupt context 18526 */ 18527 18528 static void 18529 sd_sense_key_unit_attention(struct sd_lun *un, 18530 uint8_t *sense_datap, 18531 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18532 { 18533 /* 18534 * For UNIT ATTENTION we allow retries for one minute. Devices 18535 * like Sonoma can return UNIT ATTENTION close to a minute 18536 * under certain conditions. 18537 */ 18538 int retry_check_flag = SD_RETRIES_UA; 18539 boolean_t kstat_updated = B_FALSE; 18540 struct sd_sense_info si; 18541 uint8_t asc = scsi_sense_asc(sense_datap); 18542 uint8_t ascq = scsi_sense_ascq(sense_datap); 18543 18544 ASSERT(un != NULL); 18545 ASSERT(mutex_owned(SD_MUTEX(un))); 18546 ASSERT(bp != NULL); 18547 ASSERT(xp != NULL); 18548 ASSERT(pktp != NULL); 18549 18550 si.ssi_severity = SCSI_ERR_INFO; 18551 si.ssi_pfa_flag = FALSE; 18552 18553 18554 switch (asc) { 18555 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18556 if (sd_report_pfa != 0) { 18557 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18558 si.ssi_pfa_flag = TRUE; 18559 retry_check_flag = SD_RETRIES_STANDARD; 18560 goto do_retry; 18561 } 18562 18563 break; 18564 18565 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18566 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18567 un->un_resvd_status |= 18568 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18569 } 18570 #ifdef _LP64 18571 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18572 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18573 un, KM_NOSLEEP) == 0) { 18574 /* 18575 * If we can't dispatch the task we'll just 18576 * live without descriptor sense. We can 18577 * try again on the next "unit attention" 18578 */ 18579 SD_ERROR(SD_LOG_ERROR, un, 18580 "sd_sense_key_unit_attention: " 18581 "Could not dispatch " 18582 "sd_reenable_dsense_task\n"); 18583 } 18584 } 18585 #endif /* _LP64 */ 18586 /* FALLTHRU */ 18587 18588 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18589 if (!un->un_f_has_removable_media) { 18590 break; 18591 } 18592 18593 /* 18594 * When we get a unit attention from a removable-media device, 18595 * it may be in a state that will take a long time to recover 18596 * (e.g., from a reset). Since we are executing in interrupt 18597 * context here, we cannot wait around for the device to come 18598 * back. So hand this command off to sd_media_change_task() 18599 * for deferred processing under taskq thread context. (Note 18600 * that the command still may be failed if a problem is 18601 * encountered at a later time.) 18602 */ 18603 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18604 KM_NOSLEEP) == 0) { 18605 /* 18606 * Cannot dispatch the request so fail the command. 18607 */ 18608 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18609 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18610 si.ssi_severity = SCSI_ERR_FATAL; 18611 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18612 sd_return_failed_command(un, bp, EIO); 18613 } 18614 18615 /* 18616 * If failed to dispatch sd_media_change_task(), we already 18617 * updated kstat. If succeed to dispatch sd_media_change_task(), 18618 * we should update kstat later if it encounters an error. So, 18619 * we update kstat_updated flag here. 18620 */ 18621 kstat_updated = B_TRUE; 18622 18623 /* 18624 * Either the command has been successfully dispatched to a 18625 * task Q for retrying, or the dispatch failed. In either case 18626 * do NOT retry again by calling sd_retry_command. This sets up 18627 * two retries of the same command and when one completes and 18628 * frees the resources the other will access freed memory, 18629 * a bad thing. 18630 */ 18631 return; 18632 18633 default: 18634 break; 18635 } 18636 18637 /* 18638 * ASC ASCQ 18639 * 2A 09 Capacity data has changed 18640 * 2A 01 Mode parameters changed 18641 * 3F 0E Reported luns data has changed 18642 * Arrays that support logical unit expansion should report 18643 * capacity changes(2Ah/09). Mode parameters changed and 18644 * reported luns data has changed are the approximation. 18645 */ 18646 if (((asc == 0x2a) && (ascq == 0x09)) || 18647 ((asc == 0x2a) && (ascq == 0x01)) || 18648 ((asc == 0x3f) && (ascq == 0x0e))) { 18649 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18650 KM_NOSLEEP) == 0) { 18651 SD_ERROR(SD_LOG_ERROR, un, 18652 "sd_sense_key_unit_attention: " 18653 "Could not dispatch sd_target_change_task\n"); 18654 } 18655 } 18656 18657 /* 18658 * Update kstat if we haven't done that. 18659 */ 18660 if (!kstat_updated) { 18661 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18662 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18663 } 18664 18665 do_retry: 18666 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18667 EIO, SD_UA_RETRY_DELAY, NULL); 18668 } 18669 18670 18671 18672 /* 18673 * Function: sd_sense_key_fail_command 18674 * 18675 * Description: Use to fail a command when we don't like the sense key that 18676 * was returned. 18677 * 18678 * Context: May be called from interrupt context 18679 */ 18680 18681 static void 18682 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18683 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18684 { 18685 struct sd_sense_info si; 18686 18687 ASSERT(un != NULL); 18688 ASSERT(mutex_owned(SD_MUTEX(un))); 18689 ASSERT(bp != NULL); 18690 ASSERT(xp != NULL); 18691 ASSERT(pktp != NULL); 18692 18693 si.ssi_severity = SCSI_ERR_FATAL; 18694 si.ssi_pfa_flag = FALSE; 18695 18696 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18697 sd_return_failed_command(un, bp, EIO); 18698 } 18699 18700 18701 18702 /* 18703 * Function: sd_sense_key_blank_check 18704 * 18705 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18706 * Has no monetary connotation. 18707 * 18708 * Context: May be called from interrupt context 18709 */ 18710 18711 static void 18712 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18713 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18714 { 18715 struct sd_sense_info si; 18716 18717 ASSERT(un != NULL); 18718 ASSERT(mutex_owned(SD_MUTEX(un))); 18719 ASSERT(bp != NULL); 18720 ASSERT(xp != NULL); 18721 ASSERT(pktp != NULL); 18722 18723 /* 18724 * Blank check is not fatal for removable devices, therefore 18725 * it does not require a console message. 18726 */ 18727 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18728 SCSI_ERR_FATAL; 18729 si.ssi_pfa_flag = FALSE; 18730 18731 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18732 sd_return_failed_command(un, bp, EIO); 18733 } 18734 18735 18736 18737 18738 /* 18739 * Function: sd_sense_key_aborted_command 18740 * 18741 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18742 * 18743 * Context: May be called from interrupt context 18744 */ 18745 18746 static void 18747 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18748 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18749 { 18750 struct sd_sense_info si; 18751 18752 ASSERT(un != NULL); 18753 ASSERT(mutex_owned(SD_MUTEX(un))); 18754 ASSERT(bp != NULL); 18755 ASSERT(xp != NULL); 18756 ASSERT(pktp != NULL); 18757 18758 si.ssi_severity = SCSI_ERR_FATAL; 18759 si.ssi_pfa_flag = FALSE; 18760 18761 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18762 18763 /* 18764 * This really ought to be a fatal error, but we will retry anyway 18765 * as some drives report this as a spurious error. 18766 */ 18767 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18768 &si, EIO, drv_usectohz(100000), NULL); 18769 } 18770 18771 18772 18773 /* 18774 * Function: sd_sense_key_default 18775 * 18776 * Description: Default recovery action for several SCSI sense keys (basically 18777 * attempts a retry). 18778 * 18779 * Context: May be called from interrupt context 18780 */ 18781 18782 static void 18783 sd_sense_key_default(struct sd_lun *un, 18784 uint8_t *sense_datap, 18785 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18786 { 18787 struct sd_sense_info si; 18788 uint8_t sense_key = scsi_sense_key(sense_datap); 18789 18790 ASSERT(un != NULL); 18791 ASSERT(mutex_owned(SD_MUTEX(un))); 18792 ASSERT(bp != NULL); 18793 ASSERT(xp != NULL); 18794 ASSERT(pktp != NULL); 18795 18796 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18797 18798 /* 18799 * Undecoded sense key. Attempt retries and hope that will fix 18800 * the problem. Otherwise, we're dead. 18801 */ 18802 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18803 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18804 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18805 } 18806 18807 si.ssi_severity = SCSI_ERR_FATAL; 18808 si.ssi_pfa_flag = FALSE; 18809 18810 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18811 &si, EIO, (clock_t)0, NULL); 18812 } 18813 18814 18815 18816 /* 18817 * Function: sd_print_retry_msg 18818 * 18819 * Description: Print a message indicating the retry action being taken. 18820 * 18821 * Arguments: un - ptr to associated softstate 18822 * bp - ptr to buf(9S) for the command 18823 * arg - not used. 18824 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18825 * or SD_NO_RETRY_ISSUED 18826 * 18827 * Context: May be called from interrupt context 18828 */ 18829 /* ARGSUSED */ 18830 static void 18831 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18832 { 18833 struct sd_xbuf *xp; 18834 struct scsi_pkt *pktp; 18835 char *reasonp; 18836 char *msgp; 18837 18838 ASSERT(un != NULL); 18839 ASSERT(mutex_owned(SD_MUTEX(un))); 18840 ASSERT(bp != NULL); 18841 pktp = SD_GET_PKTP(bp); 18842 ASSERT(pktp != NULL); 18843 xp = SD_GET_XBUF(bp); 18844 ASSERT(xp != NULL); 18845 18846 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18847 mutex_enter(&un->un_pm_mutex); 18848 if ((un->un_state == SD_STATE_SUSPENDED) || 18849 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18850 (pktp->pkt_flags & FLAG_SILENT)) { 18851 mutex_exit(&un->un_pm_mutex); 18852 goto update_pkt_reason; 18853 } 18854 mutex_exit(&un->un_pm_mutex); 18855 18856 /* 18857 * Suppress messages if they are all the same pkt_reason; with 18858 * TQ, many (up to 256) are returned with the same pkt_reason. 18859 * If we are in panic, then suppress the retry messages. 18860 */ 18861 switch (flag) { 18862 case SD_NO_RETRY_ISSUED: 18863 msgp = "giving up"; 18864 break; 18865 case SD_IMMEDIATE_RETRY_ISSUED: 18866 case SD_DELAYED_RETRY_ISSUED: 18867 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18868 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18869 (sd_error_level != SCSI_ERR_ALL))) { 18870 return; 18871 } 18872 msgp = "retrying command"; 18873 break; 18874 default: 18875 goto update_pkt_reason; 18876 } 18877 18878 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18879 scsi_rname(pktp->pkt_reason)); 18880 18881 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18882 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18883 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18884 } 18885 18886 update_pkt_reason: 18887 /* 18888 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18889 * This is to prevent multiple console messages for the same failure 18890 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18891 * when the command is retried successfully because there still may be 18892 * more commands coming back with the same value of pktp->pkt_reason. 18893 */ 18894 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18895 un->un_last_pkt_reason = pktp->pkt_reason; 18896 } 18897 } 18898 18899 18900 /* 18901 * Function: sd_print_cmd_incomplete_msg 18902 * 18903 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18904 * 18905 * Arguments: un - ptr to associated softstate 18906 * bp - ptr to buf(9S) for the command 18907 * arg - passed to sd_print_retry_msg() 18908 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18909 * or SD_NO_RETRY_ISSUED 18910 * 18911 * Context: May be called from interrupt context 18912 */ 18913 18914 static void 18915 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18916 int code) 18917 { 18918 dev_info_t *dip; 18919 18920 ASSERT(un != NULL); 18921 ASSERT(mutex_owned(SD_MUTEX(un))); 18922 ASSERT(bp != NULL); 18923 18924 switch (code) { 18925 case SD_NO_RETRY_ISSUED: 18926 /* Command was failed. Someone turned off this target? */ 18927 if (un->un_state != SD_STATE_OFFLINE) { 18928 /* 18929 * Suppress message if we are detaching and 18930 * device has been disconnected 18931 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18932 * private interface and not part of the DDI 18933 */ 18934 dip = un->un_sd->sd_dev; 18935 if (!(DEVI_IS_DETACHING(dip) && 18936 DEVI_IS_DEVICE_REMOVED(dip))) { 18937 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18938 "disk not responding to selection\n"); 18939 } 18940 New_state(un, SD_STATE_OFFLINE); 18941 } 18942 break; 18943 18944 case SD_DELAYED_RETRY_ISSUED: 18945 case SD_IMMEDIATE_RETRY_ISSUED: 18946 default: 18947 /* Command was successfully queued for retry */ 18948 sd_print_retry_msg(un, bp, arg, code); 18949 break; 18950 } 18951 } 18952 18953 18954 /* 18955 * Function: sd_pkt_reason_cmd_incomplete 18956 * 18957 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18958 * 18959 * Context: May be called from interrupt context 18960 */ 18961 18962 static void 18963 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18964 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18965 { 18966 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18967 18968 ASSERT(un != NULL); 18969 ASSERT(mutex_owned(SD_MUTEX(un))); 18970 ASSERT(bp != NULL); 18971 ASSERT(xp != NULL); 18972 ASSERT(pktp != NULL); 18973 18974 /* Do not do a reset if selection did not complete */ 18975 /* Note: Should this not just check the bit? */ 18976 if (pktp->pkt_state != STATE_GOT_BUS) { 18977 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18978 sd_reset_target(un, pktp); 18979 } 18980 18981 /* 18982 * If the target was not successfully selected, then set 18983 * SD_RETRIES_FAILFAST to indicate that we lost communication 18984 * with the target, and further retries and/or commands are 18985 * likely to take a long time. 18986 */ 18987 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18988 flag |= SD_RETRIES_FAILFAST; 18989 } 18990 18991 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18992 18993 sd_retry_command(un, bp, flag, 18994 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18995 } 18996 18997 18998 18999 /* 19000 * Function: sd_pkt_reason_cmd_tran_err 19001 * 19002 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 19003 * 19004 * Context: May be called from interrupt context 19005 */ 19006 19007 static void 19008 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 19009 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19010 { 19011 ASSERT(un != NULL); 19012 ASSERT(mutex_owned(SD_MUTEX(un))); 19013 ASSERT(bp != NULL); 19014 ASSERT(xp != NULL); 19015 ASSERT(pktp != NULL); 19016 19017 /* 19018 * Do not reset if we got a parity error, or if 19019 * selection did not complete. 19020 */ 19021 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19022 /* Note: Should this not just check the bit for pkt_state? */ 19023 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 19024 (pktp->pkt_state != STATE_GOT_BUS)) { 19025 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19026 sd_reset_target(un, pktp); 19027 } 19028 19029 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19030 19031 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19032 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19033 } 19034 19035 19036 19037 /* 19038 * Function: sd_pkt_reason_cmd_reset 19039 * 19040 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 19041 * 19042 * Context: May be called from interrupt context 19043 */ 19044 19045 static void 19046 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 19047 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19048 { 19049 ASSERT(un != NULL); 19050 ASSERT(mutex_owned(SD_MUTEX(un))); 19051 ASSERT(bp != NULL); 19052 ASSERT(xp != NULL); 19053 ASSERT(pktp != NULL); 19054 19055 /* The target may still be running the command, so try to reset. */ 19056 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19057 sd_reset_target(un, pktp); 19058 19059 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19060 19061 /* 19062 * If pkt_reason is CMD_RESET chances are that this pkt got 19063 * reset because another target on this bus caused it. The target 19064 * that caused it should get CMD_TIMEOUT with pkt_statistics 19065 * of STAT_TIMEOUT/STAT_DEV_RESET. 19066 */ 19067 19068 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19069 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19070 } 19071 19072 19073 19074 19075 /* 19076 * Function: sd_pkt_reason_cmd_aborted 19077 * 19078 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 19079 * 19080 * Context: May be called from interrupt context 19081 */ 19082 19083 static void 19084 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 19085 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19086 { 19087 ASSERT(un != NULL); 19088 ASSERT(mutex_owned(SD_MUTEX(un))); 19089 ASSERT(bp != NULL); 19090 ASSERT(xp != NULL); 19091 ASSERT(pktp != NULL); 19092 19093 /* The target may still be running the command, so try to reset. */ 19094 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19095 sd_reset_target(un, pktp); 19096 19097 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19098 19099 /* 19100 * If pkt_reason is CMD_ABORTED chances are that this pkt got 19101 * aborted because another target on this bus caused it. The target 19102 * that caused it should get CMD_TIMEOUT with pkt_statistics 19103 * of STAT_TIMEOUT/STAT_DEV_RESET. 19104 */ 19105 19106 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19107 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19108 } 19109 19110 19111 19112 /* 19113 * Function: sd_pkt_reason_cmd_timeout 19114 * 19115 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 19116 * 19117 * Context: May be called from interrupt context 19118 */ 19119 19120 static void 19121 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 19122 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19123 { 19124 ASSERT(un != NULL); 19125 ASSERT(mutex_owned(SD_MUTEX(un))); 19126 ASSERT(bp != NULL); 19127 ASSERT(xp != NULL); 19128 ASSERT(pktp != NULL); 19129 19130 19131 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19132 sd_reset_target(un, pktp); 19133 19134 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19135 19136 /* 19137 * A command timeout indicates that we could not establish 19138 * communication with the target, so set SD_RETRIES_FAILFAST 19139 * as further retries/commands are likely to take a long time. 19140 */ 19141 sd_retry_command(un, bp, 19142 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 19143 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19144 } 19145 19146 19147 19148 /* 19149 * Function: sd_pkt_reason_cmd_unx_bus_free 19150 * 19151 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 19152 * 19153 * Context: May be called from interrupt context 19154 */ 19155 19156 static void 19157 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 19158 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19159 { 19160 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 19161 19162 ASSERT(un != NULL); 19163 ASSERT(mutex_owned(SD_MUTEX(un))); 19164 ASSERT(bp != NULL); 19165 ASSERT(xp != NULL); 19166 ASSERT(pktp != NULL); 19167 19168 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19169 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19170 19171 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 19172 sd_print_retry_msg : NULL; 19173 19174 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19175 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19176 } 19177 19178 19179 /* 19180 * Function: sd_pkt_reason_cmd_tag_reject 19181 * 19182 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 19183 * 19184 * Context: May be called from interrupt context 19185 */ 19186 19187 static void 19188 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 19189 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19190 { 19191 ASSERT(un != NULL); 19192 ASSERT(mutex_owned(SD_MUTEX(un))); 19193 ASSERT(bp != NULL); 19194 ASSERT(xp != NULL); 19195 ASSERT(pktp != NULL); 19196 19197 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19198 pktp->pkt_flags = 0; 19199 un->un_tagflags = 0; 19200 if (un->un_f_opt_queueing == TRUE) { 19201 un->un_throttle = min(un->un_throttle, 3); 19202 } else { 19203 un->un_throttle = 1; 19204 } 19205 mutex_exit(SD_MUTEX(un)); 19206 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 19207 mutex_enter(SD_MUTEX(un)); 19208 19209 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19210 19211 /* Legacy behavior not to check retry counts here. */ 19212 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 19213 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19214 } 19215 19216 19217 /* 19218 * Function: sd_pkt_reason_default 19219 * 19220 * Description: Default recovery actions for SCSA pkt_reason values that 19221 * do not have more explicit recovery actions. 19222 * 19223 * Context: May be called from interrupt context 19224 */ 19225 19226 static void 19227 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 19228 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19229 { 19230 ASSERT(un != NULL); 19231 ASSERT(mutex_owned(SD_MUTEX(un))); 19232 ASSERT(bp != NULL); 19233 ASSERT(xp != NULL); 19234 ASSERT(pktp != NULL); 19235 19236 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19237 sd_reset_target(un, pktp); 19238 19239 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19240 19241 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19242 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19243 } 19244 19245 19246 19247 /* 19248 * Function: sd_pkt_status_check_condition 19249 * 19250 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 19251 * 19252 * Context: May be called from interrupt context 19253 */ 19254 19255 static void 19256 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 19257 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19258 { 19259 ASSERT(un != NULL); 19260 ASSERT(mutex_owned(SD_MUTEX(un))); 19261 ASSERT(bp != NULL); 19262 ASSERT(xp != NULL); 19263 ASSERT(pktp != NULL); 19264 19265 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 19266 "entry: buf:0x%p xp:0x%p\n", bp, xp); 19267 19268 /* 19269 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 19270 * command will be retried after the request sense). Otherwise, retry 19271 * the command. Note: we are issuing the request sense even though the 19272 * retry limit may have been reached for the failed command. 19273 */ 19274 if (un->un_f_arq_enabled == FALSE) { 19275 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19276 "no ARQ, sending request sense command\n"); 19277 sd_send_request_sense_command(un, bp, pktp); 19278 } else { 19279 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19280 "ARQ,retrying request sense command\n"); 19281 #if defined(__i386) || defined(__amd64) 19282 /* 19283 * The SD_RETRY_DELAY value need to be adjusted here 19284 * when SD_RETRY_DELAY change in sddef.h 19285 */ 19286 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19287 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19288 NULL); 19289 #else 19290 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19291 EIO, SD_RETRY_DELAY, NULL); 19292 #endif 19293 } 19294 19295 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19296 } 19297 19298 19299 /* 19300 * Function: sd_pkt_status_busy 19301 * 19302 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19303 * 19304 * Context: May be called from interrupt context 19305 */ 19306 19307 static void 19308 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19309 struct scsi_pkt *pktp) 19310 { 19311 ASSERT(un != NULL); 19312 ASSERT(mutex_owned(SD_MUTEX(un))); 19313 ASSERT(bp != NULL); 19314 ASSERT(xp != NULL); 19315 ASSERT(pktp != NULL); 19316 19317 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19318 "sd_pkt_status_busy: entry\n"); 19319 19320 /* If retries are exhausted, just fail the command. */ 19321 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19322 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19323 "device busy too long\n"); 19324 sd_return_failed_command(un, bp, EIO); 19325 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19326 "sd_pkt_status_busy: exit\n"); 19327 return; 19328 } 19329 xp->xb_retry_count++; 19330 19331 /* 19332 * Try to reset the target. However, we do not want to perform 19333 * more than one reset if the device continues to fail. The reset 19334 * will be performed when the retry count reaches the reset 19335 * threshold. This threshold should be set such that at least 19336 * one retry is issued before the reset is performed. 19337 */ 19338 if (xp->xb_retry_count == 19339 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19340 int rval = 0; 19341 mutex_exit(SD_MUTEX(un)); 19342 if (un->un_f_allow_bus_device_reset == TRUE) { 19343 /* 19344 * First try to reset the LUN; if we cannot then 19345 * try to reset the target. 19346 */ 19347 if (un->un_f_lun_reset_enabled == TRUE) { 19348 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19349 "sd_pkt_status_busy: RESET_LUN\n"); 19350 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19351 } 19352 if (rval == 0) { 19353 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19354 "sd_pkt_status_busy: RESET_TARGET\n"); 19355 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19356 } 19357 } 19358 if (rval == 0) { 19359 /* 19360 * If the RESET_LUN and/or RESET_TARGET failed, 19361 * try RESET_ALL 19362 */ 19363 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19364 "sd_pkt_status_busy: RESET_ALL\n"); 19365 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19366 } 19367 mutex_enter(SD_MUTEX(un)); 19368 if (rval == 0) { 19369 /* 19370 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19371 * At this point we give up & fail the command. 19372 */ 19373 sd_return_failed_command(un, bp, EIO); 19374 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19375 "sd_pkt_status_busy: exit (failed cmd)\n"); 19376 return; 19377 } 19378 } 19379 19380 /* 19381 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19382 * we have already checked the retry counts above. 19383 */ 19384 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19385 EIO, un->un_busy_timeout, NULL); 19386 19387 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19388 "sd_pkt_status_busy: exit\n"); 19389 } 19390 19391 19392 /* 19393 * Function: sd_pkt_status_reservation_conflict 19394 * 19395 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19396 * command status. 19397 * 19398 * Context: May be called from interrupt context 19399 */ 19400 19401 static void 19402 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19403 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19404 { 19405 ASSERT(un != NULL); 19406 ASSERT(mutex_owned(SD_MUTEX(un))); 19407 ASSERT(bp != NULL); 19408 ASSERT(xp != NULL); 19409 ASSERT(pktp != NULL); 19410 19411 /* 19412 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19413 * conflict could be due to various reasons like incorrect keys, not 19414 * registered or not reserved etc. So, we return EACCES to the caller. 19415 */ 19416 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19417 int cmd = SD_GET_PKT_OPCODE(pktp); 19418 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19419 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19420 sd_return_failed_command(un, bp, EACCES); 19421 return; 19422 } 19423 } 19424 19425 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19426 19427 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19428 if (sd_failfast_enable != 0) { 19429 /* By definition, we must panic here.... */ 19430 sd_panic_for_res_conflict(un); 19431 /*NOTREACHED*/ 19432 } 19433 SD_ERROR(SD_LOG_IO, un, 19434 "sd_handle_resv_conflict: Disk Reserved\n"); 19435 sd_return_failed_command(un, bp, EACCES); 19436 return; 19437 } 19438 19439 /* 19440 * 1147670: retry only if sd_retry_on_reservation_conflict 19441 * property is set (default is 1). Retries will not succeed 19442 * on a disk reserved by another initiator. HA systems 19443 * may reset this via sd.conf to avoid these retries. 19444 * 19445 * Note: The legacy return code for this failure is EIO, however EACCES 19446 * seems more appropriate for a reservation conflict. 19447 */ 19448 if (sd_retry_on_reservation_conflict == 0) { 19449 SD_ERROR(SD_LOG_IO, un, 19450 "sd_handle_resv_conflict: Device Reserved\n"); 19451 sd_return_failed_command(un, bp, EIO); 19452 return; 19453 } 19454 19455 /* 19456 * Retry the command if we can. 19457 * 19458 * Note: The legacy return code for this failure is EIO, however EACCES 19459 * seems more appropriate for a reservation conflict. 19460 */ 19461 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19462 (clock_t)2, NULL); 19463 } 19464 19465 19466 19467 /* 19468 * Function: sd_pkt_status_qfull 19469 * 19470 * Description: Handle a QUEUE FULL condition from the target. This can 19471 * occur if the HBA does not handle the queue full condition. 19472 * (Basically this means third-party HBAs as Sun HBAs will 19473 * handle the queue full condition.) Note that if there are 19474 * some commands already in the transport, then the queue full 19475 * has occurred because the queue for this nexus is actually 19476 * full. If there are no commands in the transport, then the 19477 * queue full is resulting from some other initiator or lun 19478 * consuming all the resources at the target. 19479 * 19480 * Context: May be called from interrupt context 19481 */ 19482 19483 static void 19484 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 19485 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19486 { 19487 ASSERT(un != NULL); 19488 ASSERT(mutex_owned(SD_MUTEX(un))); 19489 ASSERT(bp != NULL); 19490 ASSERT(xp != NULL); 19491 ASSERT(pktp != NULL); 19492 19493 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19494 "sd_pkt_status_qfull: entry\n"); 19495 19496 /* 19497 * Just lower the QFULL throttle and retry the command. Note that 19498 * we do not limit the number of retries here. 19499 */ 19500 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19501 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19502 SD_RESTART_TIMEOUT, NULL); 19503 19504 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19505 "sd_pkt_status_qfull: exit\n"); 19506 } 19507 19508 19509 /* 19510 * Function: sd_reset_target 19511 * 19512 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19513 * RESET_TARGET, or RESET_ALL. 19514 * 19515 * Context: May be called under interrupt context. 19516 */ 19517 19518 static void 19519 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19520 { 19521 int rval = 0; 19522 19523 ASSERT(un != NULL); 19524 ASSERT(mutex_owned(SD_MUTEX(un))); 19525 ASSERT(pktp != NULL); 19526 19527 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19528 19529 /* 19530 * No need to reset if the transport layer has already done so. 19531 */ 19532 if ((pktp->pkt_statistics & 19533 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19534 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19535 "sd_reset_target: no reset\n"); 19536 return; 19537 } 19538 19539 mutex_exit(SD_MUTEX(un)); 19540 19541 if (un->un_f_allow_bus_device_reset == TRUE) { 19542 if (un->un_f_lun_reset_enabled == TRUE) { 19543 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19544 "sd_reset_target: RESET_LUN\n"); 19545 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19546 } 19547 if (rval == 0) { 19548 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19549 "sd_reset_target: RESET_TARGET\n"); 19550 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19551 } 19552 } 19553 19554 if (rval == 0) { 19555 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19556 "sd_reset_target: RESET_ALL\n"); 19557 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19558 } 19559 19560 mutex_enter(SD_MUTEX(un)); 19561 19562 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19563 } 19564 19565 /* 19566 * Function: sd_target_change_task 19567 * 19568 * Description: Handle dynamic target change 19569 * 19570 * Context: Executes in a taskq() thread context 19571 */ 19572 static void 19573 sd_target_change_task(void *arg) 19574 { 19575 struct sd_lun *un = arg; 19576 uint64_t capacity; 19577 diskaddr_t label_cap; 19578 uint_t lbasize; 19579 sd_ssc_t *ssc; 19580 19581 ASSERT(un != NULL); 19582 ASSERT(!mutex_owned(SD_MUTEX(un))); 19583 19584 if ((un->un_f_blockcount_is_valid == FALSE) || 19585 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19586 return; 19587 } 19588 19589 ssc = sd_ssc_init(un); 19590 19591 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19592 &lbasize, SD_PATH_DIRECT) != 0) { 19593 SD_ERROR(SD_LOG_ERROR, un, 19594 "sd_target_change_task: fail to read capacity\n"); 19595 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19596 goto task_exit; 19597 } 19598 19599 mutex_enter(SD_MUTEX(un)); 19600 if (capacity <= un->un_blockcount) { 19601 mutex_exit(SD_MUTEX(un)); 19602 goto task_exit; 19603 } 19604 19605 sd_update_block_info(un, lbasize, capacity); 19606 mutex_exit(SD_MUTEX(un)); 19607 19608 /* 19609 * If lun is EFI labeled and lun capacity is greater than the 19610 * capacity contained in the label, log a sys event. 19611 */ 19612 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19613 (void*)SD_PATH_DIRECT) == 0) { 19614 mutex_enter(SD_MUTEX(un)); 19615 if (un->un_f_blockcount_is_valid && 19616 un->un_blockcount > label_cap) { 19617 mutex_exit(SD_MUTEX(un)); 19618 sd_log_lun_expansion_event(un, KM_SLEEP); 19619 } else { 19620 mutex_exit(SD_MUTEX(un)); 19621 } 19622 } 19623 19624 task_exit: 19625 sd_ssc_fini(ssc); 19626 } 19627 19628 19629 /* 19630 * Function: sd_log_dev_status_event 19631 * 19632 * Description: Log EC_dev_status sysevent 19633 * 19634 * Context: Never called from interrupt context 19635 */ 19636 static void 19637 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag) 19638 { 19639 int err; 19640 char *path; 19641 nvlist_t *attr_list; 19642 19643 /* Allocate and build sysevent attribute list */ 19644 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19645 if (err != 0) { 19646 SD_ERROR(SD_LOG_ERROR, un, 19647 "sd_log_dev_status_event: fail to allocate space\n"); 19648 return; 19649 } 19650 19651 path = kmem_alloc(MAXPATHLEN, km_flag); 19652 if (path == NULL) { 19653 nvlist_free(attr_list); 19654 SD_ERROR(SD_LOG_ERROR, un, 19655 "sd_log_dev_status_event: fail to allocate space\n"); 19656 return; 19657 } 19658 /* 19659 * Add path attribute to identify the lun. 19660 * We are using minor node 'a' as the sysevent attribute. 19661 */ 19662 (void) snprintf(path, MAXPATHLEN, "/devices"); 19663 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19664 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19665 ":a"); 19666 19667 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path); 19668 if (err != 0) { 19669 nvlist_free(attr_list); 19670 kmem_free(path, MAXPATHLEN); 19671 SD_ERROR(SD_LOG_ERROR, un, 19672 "sd_log_dev_status_event: fail to add attribute\n"); 19673 return; 19674 } 19675 19676 /* Log dynamic lun expansion sysevent */ 19677 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19678 esc, attr_list, NULL, km_flag); 19679 if (err != DDI_SUCCESS) { 19680 SD_ERROR(SD_LOG_ERROR, un, 19681 "sd_log_dev_status_event: fail to log sysevent\n"); 19682 } 19683 19684 nvlist_free(attr_list); 19685 kmem_free(path, MAXPATHLEN); 19686 } 19687 19688 19689 /* 19690 * Function: sd_log_lun_expansion_event 19691 * 19692 * Description: Log lun expansion sys event 19693 * 19694 * Context: Never called from interrupt context 19695 */ 19696 static void 19697 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19698 { 19699 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag); 19700 } 19701 19702 19703 /* 19704 * Function: sd_log_eject_request_event 19705 * 19706 * Description: Log eject request sysevent 19707 * 19708 * Context: Never called from interrupt context 19709 */ 19710 static void 19711 sd_log_eject_request_event(struct sd_lun *un, int km_flag) 19712 { 19713 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag); 19714 } 19715 19716 19717 /* 19718 * Function: sd_media_change_task 19719 * 19720 * Description: Recovery action for CDROM to become available. 19721 * 19722 * Context: Executes in a taskq() thread context 19723 */ 19724 19725 static void 19726 sd_media_change_task(void *arg) 19727 { 19728 struct scsi_pkt *pktp = arg; 19729 struct sd_lun *un; 19730 struct buf *bp; 19731 struct sd_xbuf *xp; 19732 int err = 0; 19733 int retry_count = 0; 19734 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19735 struct sd_sense_info si; 19736 19737 ASSERT(pktp != NULL); 19738 bp = (struct buf *)pktp->pkt_private; 19739 ASSERT(bp != NULL); 19740 xp = SD_GET_XBUF(bp); 19741 ASSERT(xp != NULL); 19742 un = SD_GET_UN(bp); 19743 ASSERT(un != NULL); 19744 ASSERT(!mutex_owned(SD_MUTEX(un))); 19745 ASSERT(un->un_f_monitor_media_state); 19746 19747 si.ssi_severity = SCSI_ERR_INFO; 19748 si.ssi_pfa_flag = FALSE; 19749 19750 /* 19751 * When a reset is issued on a CDROM, it takes a long time to 19752 * recover. First few attempts to read capacity and other things 19753 * related to handling unit attention fail (with a ASC 0x4 and 19754 * ASCQ 0x1). In that case we want to do enough retries and we want 19755 * to limit the retries in other cases of genuine failures like 19756 * no media in drive. 19757 */ 19758 while (retry_count++ < retry_limit) { 19759 if ((err = sd_handle_mchange(un)) == 0) { 19760 break; 19761 } 19762 if (err == EAGAIN) { 19763 retry_limit = SD_UNIT_ATTENTION_RETRY; 19764 } 19765 /* Sleep for 0.5 sec. & try again */ 19766 delay(drv_usectohz(500000)); 19767 } 19768 19769 /* 19770 * Dispatch (retry or fail) the original command here, 19771 * along with appropriate console messages.... 19772 * 19773 * Must grab the mutex before calling sd_retry_command, 19774 * sd_print_sense_msg and sd_return_failed_command. 19775 */ 19776 mutex_enter(SD_MUTEX(un)); 19777 if (err != SD_CMD_SUCCESS) { 19778 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19779 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19780 si.ssi_severity = SCSI_ERR_FATAL; 19781 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19782 sd_return_failed_command(un, bp, EIO); 19783 } else { 19784 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg, 19785 &si, EIO, (clock_t)0, NULL); 19786 } 19787 mutex_exit(SD_MUTEX(un)); 19788 } 19789 19790 19791 19792 /* 19793 * Function: sd_handle_mchange 19794 * 19795 * Description: Perform geometry validation & other recovery when CDROM 19796 * has been removed from drive. 19797 * 19798 * Return Code: 0 for success 19799 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19800 * sd_send_scsi_READ_CAPACITY() 19801 * 19802 * Context: Executes in a taskq() thread context 19803 */ 19804 19805 static int 19806 sd_handle_mchange(struct sd_lun *un) 19807 { 19808 uint64_t capacity; 19809 uint32_t lbasize; 19810 int rval; 19811 sd_ssc_t *ssc; 19812 19813 ASSERT(!mutex_owned(SD_MUTEX(un))); 19814 ASSERT(un->un_f_monitor_media_state); 19815 19816 ssc = sd_ssc_init(un); 19817 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19818 SD_PATH_DIRECT_PRIORITY); 19819 19820 if (rval != 0) 19821 goto failed; 19822 19823 mutex_enter(SD_MUTEX(un)); 19824 sd_update_block_info(un, lbasize, capacity); 19825 19826 if (un->un_errstats != NULL) { 19827 struct sd_errstats *stp = 19828 (struct sd_errstats *)un->un_errstats->ks_data; 19829 stp->sd_capacity.value.ui64 = (uint64_t) 19830 ((uint64_t)un->un_blockcount * 19831 (uint64_t)un->un_tgt_blocksize); 19832 } 19833 19834 /* 19835 * Check if the media in the device is writable or not 19836 */ 19837 if (ISCD(un)) { 19838 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19839 } 19840 19841 /* 19842 * Note: Maybe let the strategy/partitioning chain worry about getting 19843 * valid geometry. 19844 */ 19845 mutex_exit(SD_MUTEX(un)); 19846 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19847 19848 19849 if (cmlb_validate(un->un_cmlbhandle, 0, 19850 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19851 sd_ssc_fini(ssc); 19852 return (EIO); 19853 } else { 19854 if (un->un_f_pkstats_enabled) { 19855 sd_set_pstats(un); 19856 SD_TRACE(SD_LOG_IO_PARTITION, un, 19857 "sd_handle_mchange: un:0x%p pstats created and " 19858 "set\n", un); 19859 } 19860 } 19861 19862 /* 19863 * Try to lock the door 19864 */ 19865 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19866 SD_PATH_DIRECT_PRIORITY); 19867 failed: 19868 if (rval != 0) 19869 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19870 sd_ssc_fini(ssc); 19871 return (rval); 19872 } 19873 19874 19875 /* 19876 * Function: sd_send_scsi_DOORLOCK 19877 * 19878 * Description: Issue the scsi DOOR LOCK command 19879 * 19880 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19881 * structure for this target. 19882 * flag - SD_REMOVAL_ALLOW 19883 * SD_REMOVAL_PREVENT 19884 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19885 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19886 * to use the USCSI "direct" chain and bypass the normal 19887 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19888 * command is issued as part of an error recovery action. 19889 * 19890 * Return Code: 0 - Success 19891 * errno return code from sd_ssc_send() 19892 * 19893 * Context: Can sleep. 19894 */ 19895 19896 static int 19897 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19898 { 19899 struct scsi_extended_sense sense_buf; 19900 union scsi_cdb cdb; 19901 struct uscsi_cmd ucmd_buf; 19902 int status; 19903 struct sd_lun *un; 19904 19905 ASSERT(ssc != NULL); 19906 un = ssc->ssc_un; 19907 ASSERT(un != NULL); 19908 ASSERT(!mutex_owned(SD_MUTEX(un))); 19909 19910 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19911 19912 /* already determined doorlock is not supported, fake success */ 19913 if (un->un_f_doorlock_supported == FALSE) { 19914 return (0); 19915 } 19916 19917 /* 19918 * If we are ejecting and see an SD_REMOVAL_PREVENT 19919 * ignore the command so we can complete the eject 19920 * operation. 19921 */ 19922 if (flag == SD_REMOVAL_PREVENT) { 19923 mutex_enter(SD_MUTEX(un)); 19924 if (un->un_f_ejecting == TRUE) { 19925 mutex_exit(SD_MUTEX(un)); 19926 return (EAGAIN); 19927 } 19928 mutex_exit(SD_MUTEX(un)); 19929 } 19930 19931 bzero(&cdb, sizeof (cdb)); 19932 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19933 19934 cdb.scc_cmd = SCMD_DOORLOCK; 19935 cdb.cdb_opaque[4] = (uchar_t)flag; 19936 19937 ucmd_buf.uscsi_cdb = (char *)&cdb; 19938 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19939 ucmd_buf.uscsi_bufaddr = NULL; 19940 ucmd_buf.uscsi_buflen = 0; 19941 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19942 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19943 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19944 ucmd_buf.uscsi_timeout = 15; 19945 19946 SD_TRACE(SD_LOG_IO, un, 19947 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19948 19949 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19950 UIO_SYSSPACE, path_flag); 19951 19952 if (status == 0) 19953 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19954 19955 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19956 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19957 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19958 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19959 19960 /* fake success and skip subsequent doorlock commands */ 19961 un->un_f_doorlock_supported = FALSE; 19962 return (0); 19963 } 19964 19965 return (status); 19966 } 19967 19968 /* 19969 * Function: sd_send_scsi_READ_CAPACITY 19970 * 19971 * Description: This routine uses the scsi READ CAPACITY command to determine 19972 * the device capacity in number of blocks and the device native 19973 * block size. If this function returns a failure, then the 19974 * values in *capp and *lbap are undefined. If the capacity 19975 * returned is 0xffffffff then the lun is too large for a 19976 * normal READ CAPACITY command and the results of a 19977 * READ CAPACITY 16 will be used instead. 19978 * 19979 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19980 * capp - ptr to unsigned 64-bit variable to receive the 19981 * capacity value from the command. 19982 * lbap - ptr to unsigned 32-bit varaible to receive the 19983 * block size value from the command 19984 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19985 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19986 * to use the USCSI "direct" chain and bypass the normal 19987 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19988 * command is issued as part of an error recovery action. 19989 * 19990 * Return Code: 0 - Success 19991 * EIO - IO error 19992 * EACCES - Reservation conflict detected 19993 * EAGAIN - Device is becoming ready 19994 * errno return code from sd_ssc_send() 19995 * 19996 * Context: Can sleep. Blocks until command completes. 19997 */ 19998 19999 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 20000 20001 static int 20002 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20003 int path_flag) 20004 { 20005 struct scsi_extended_sense sense_buf; 20006 struct uscsi_cmd ucmd_buf; 20007 union scsi_cdb cdb; 20008 uint32_t *capacity_buf; 20009 uint64_t capacity; 20010 uint32_t lbasize; 20011 uint32_t pbsize; 20012 int status; 20013 struct sd_lun *un; 20014 20015 ASSERT(ssc != NULL); 20016 20017 un = ssc->ssc_un; 20018 ASSERT(un != NULL); 20019 ASSERT(!mutex_owned(SD_MUTEX(un))); 20020 ASSERT(capp != NULL); 20021 ASSERT(lbap != NULL); 20022 20023 SD_TRACE(SD_LOG_IO, un, 20024 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20025 20026 /* 20027 * First send a READ_CAPACITY command to the target. 20028 * (This command is mandatory under SCSI-2.) 20029 * 20030 * Set up the CDB for the READ_CAPACITY command. The Partial 20031 * Medium Indicator bit is cleared. The address field must be 20032 * zero if the PMI bit is zero. 20033 */ 20034 bzero(&cdb, sizeof (cdb)); 20035 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20036 20037 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 20038 20039 cdb.scc_cmd = SCMD_READ_CAPACITY; 20040 20041 ucmd_buf.uscsi_cdb = (char *)&cdb; 20042 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20043 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 20044 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 20045 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20046 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20047 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20048 ucmd_buf.uscsi_timeout = 60; 20049 20050 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20051 UIO_SYSSPACE, path_flag); 20052 20053 switch (status) { 20054 case 0: 20055 /* Return failure if we did not get valid capacity data. */ 20056 if (ucmd_buf.uscsi_resid != 0) { 20057 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20058 "sd_send_scsi_READ_CAPACITY received invalid " 20059 "capacity data"); 20060 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20061 return (EIO); 20062 } 20063 /* 20064 * Read capacity and block size from the READ CAPACITY 10 data. 20065 * This data may be adjusted later due to device specific 20066 * issues. 20067 * 20068 * According to the SCSI spec, the READ CAPACITY 10 20069 * command returns the following: 20070 * 20071 * bytes 0-3: Maximum logical block address available. 20072 * (MSB in byte:0 & LSB in byte:3) 20073 * 20074 * bytes 4-7: Block length in bytes 20075 * (MSB in byte:4 & LSB in byte:7) 20076 * 20077 */ 20078 capacity = BE_32(capacity_buf[0]); 20079 lbasize = BE_32(capacity_buf[1]); 20080 20081 /* 20082 * Done with capacity_buf 20083 */ 20084 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20085 20086 /* 20087 * if the reported capacity is set to all 0xf's, then 20088 * this disk is too large and requires SBC-2 commands. 20089 * Reissue the request using READ CAPACITY 16. 20090 */ 20091 if (capacity == 0xffffffff) { 20092 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20093 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 20094 &lbasize, &pbsize, path_flag); 20095 if (status != 0) { 20096 return (status); 20097 } else { 20098 goto rc16_done; 20099 } 20100 } 20101 break; /* Success! */ 20102 case EIO: 20103 switch (ucmd_buf.uscsi_status) { 20104 case STATUS_RESERVATION_CONFLICT: 20105 status = EACCES; 20106 break; 20107 case STATUS_CHECK: 20108 /* 20109 * Check condition; look for ASC/ASCQ of 0x04/0x01 20110 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20111 */ 20112 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20113 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20114 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20115 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20116 return (EAGAIN); 20117 } 20118 break; 20119 default: 20120 break; 20121 } 20122 /* FALLTHRU */ 20123 default: 20124 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20125 return (status); 20126 } 20127 20128 /* 20129 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20130 * (2352 and 0 are common) so for these devices always force the value 20131 * to 2048 as required by the ATAPI specs. 20132 */ 20133 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20134 lbasize = 2048; 20135 } 20136 20137 /* 20138 * Get the maximum LBA value from the READ CAPACITY data. 20139 * Here we assume that the Partial Medium Indicator (PMI) bit 20140 * was cleared when issuing the command. This means that the LBA 20141 * returned from the device is the LBA of the last logical block 20142 * on the logical unit. The actual logical block count will be 20143 * this value plus one. 20144 */ 20145 capacity += 1; 20146 20147 /* 20148 * Currently, for removable media, the capacity is saved in terms 20149 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20150 */ 20151 if (un->un_f_has_removable_media) 20152 capacity *= (lbasize / un->un_sys_blocksize); 20153 20154 rc16_done: 20155 20156 /* 20157 * Copy the values from the READ CAPACITY command into the space 20158 * provided by the caller. 20159 */ 20160 *capp = capacity; 20161 *lbap = lbasize; 20162 20163 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 20164 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 20165 20166 /* 20167 * Both the lbasize and capacity from the device must be nonzero, 20168 * otherwise we assume that the values are not valid and return 20169 * failure to the caller. (4203735) 20170 */ 20171 if ((capacity == 0) || (lbasize == 0)) { 20172 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20173 "sd_send_scsi_READ_CAPACITY received invalid value " 20174 "capacity %llu lbasize %d", capacity, lbasize); 20175 return (EIO); 20176 } 20177 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20178 return (0); 20179 } 20180 20181 /* 20182 * Function: sd_send_scsi_READ_CAPACITY_16 20183 * 20184 * Description: This routine uses the scsi READ CAPACITY 16 command to 20185 * determine the device capacity in number of blocks and the 20186 * device native block size. If this function returns a failure, 20187 * then the values in *capp and *lbap are undefined. 20188 * This routine should be called by sd_send_scsi_READ_CAPACITY 20189 * which will apply any device specific adjustments to capacity 20190 * and lbasize. One exception is it is also called by 20191 * sd_get_media_info_ext. In that function, there is no need to 20192 * adjust the capacity and lbasize. 20193 * 20194 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20195 * capp - ptr to unsigned 64-bit variable to receive the 20196 * capacity value from the command. 20197 * lbap - ptr to unsigned 32-bit varaible to receive the 20198 * block size value from the command 20199 * psp - ptr to unsigned 32-bit variable to receive the 20200 * physical block size value from the command 20201 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20202 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20203 * to use the USCSI "direct" chain and bypass the normal 20204 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 20205 * this command is issued as part of an error recovery 20206 * action. 20207 * 20208 * Return Code: 0 - Success 20209 * EIO - IO error 20210 * EACCES - Reservation conflict detected 20211 * EAGAIN - Device is becoming ready 20212 * errno return code from sd_ssc_send() 20213 * 20214 * Context: Can sleep. Blocks until command completes. 20215 */ 20216 20217 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 20218 20219 static int 20220 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 20221 uint32_t *lbap, uint32_t *psp, int path_flag) 20222 { 20223 struct scsi_extended_sense sense_buf; 20224 struct uscsi_cmd ucmd_buf; 20225 union scsi_cdb cdb; 20226 uint64_t *capacity16_buf; 20227 uint64_t capacity; 20228 uint32_t lbasize; 20229 uint32_t pbsize; 20230 uint32_t lbpb_exp; 20231 int status; 20232 struct sd_lun *un; 20233 20234 ASSERT(ssc != NULL); 20235 20236 un = ssc->ssc_un; 20237 ASSERT(un != NULL); 20238 ASSERT(!mutex_owned(SD_MUTEX(un))); 20239 ASSERT(capp != NULL); 20240 ASSERT(lbap != NULL); 20241 20242 SD_TRACE(SD_LOG_IO, un, 20243 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20244 20245 /* 20246 * First send a READ_CAPACITY_16 command to the target. 20247 * 20248 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 20249 * Medium Indicator bit is cleared. The address field must be 20250 * zero if the PMI bit is zero. 20251 */ 20252 bzero(&cdb, sizeof (cdb)); 20253 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20254 20255 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 20256 20257 ucmd_buf.uscsi_cdb = (char *)&cdb; 20258 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 20259 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 20260 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 20261 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20262 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20263 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20264 ucmd_buf.uscsi_timeout = 60; 20265 20266 /* 20267 * Read Capacity (16) is a Service Action In command. One 20268 * command byte (0x9E) is overloaded for multiple operations, 20269 * with the second CDB byte specifying the desired operation 20270 */ 20271 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 20272 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 20273 20274 /* 20275 * Fill in allocation length field 20276 */ 20277 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 20278 20279 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20280 UIO_SYSSPACE, path_flag); 20281 20282 switch (status) { 20283 case 0: 20284 /* Return failure if we did not get valid capacity data. */ 20285 if (ucmd_buf.uscsi_resid > 20) { 20286 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20287 "sd_send_scsi_READ_CAPACITY_16 received invalid " 20288 "capacity data"); 20289 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20290 return (EIO); 20291 } 20292 20293 /* 20294 * Read capacity and block size from the READ CAPACITY 16 data. 20295 * This data may be adjusted later due to device specific 20296 * issues. 20297 * 20298 * According to the SCSI spec, the READ CAPACITY 16 20299 * command returns the following: 20300 * 20301 * bytes 0-7: Maximum logical block address available. 20302 * (MSB in byte:0 & LSB in byte:7) 20303 * 20304 * bytes 8-11: Block length in bytes 20305 * (MSB in byte:8 & LSB in byte:11) 20306 * 20307 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 20308 */ 20309 capacity = BE_64(capacity16_buf[0]); 20310 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 20311 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f; 20312 20313 pbsize = lbasize << lbpb_exp; 20314 20315 /* 20316 * Done with capacity16_buf 20317 */ 20318 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20319 20320 /* 20321 * if the reported capacity is set to all 0xf's, then 20322 * this disk is too large. This could only happen with 20323 * a device that supports LBAs larger than 64 bits which 20324 * are not defined by any current T10 standards. 20325 */ 20326 if (capacity == 0xffffffffffffffff) { 20327 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20328 "disk is too large"); 20329 return (EIO); 20330 } 20331 break; /* Success! */ 20332 case EIO: 20333 switch (ucmd_buf.uscsi_status) { 20334 case STATUS_RESERVATION_CONFLICT: 20335 status = EACCES; 20336 break; 20337 case STATUS_CHECK: 20338 /* 20339 * Check condition; look for ASC/ASCQ of 0x04/0x01 20340 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20341 */ 20342 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20343 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20344 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20345 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20346 return (EAGAIN); 20347 } 20348 break; 20349 default: 20350 break; 20351 } 20352 /* FALLTHRU */ 20353 default: 20354 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20355 return (status); 20356 } 20357 20358 /* 20359 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20360 * (2352 and 0 are common) so for these devices always force the value 20361 * to 2048 as required by the ATAPI specs. 20362 */ 20363 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20364 lbasize = 2048; 20365 } 20366 20367 /* 20368 * Get the maximum LBA value from the READ CAPACITY 16 data. 20369 * Here we assume that the Partial Medium Indicator (PMI) bit 20370 * was cleared when issuing the command. This means that the LBA 20371 * returned from the device is the LBA of the last logical block 20372 * on the logical unit. The actual logical block count will be 20373 * this value plus one. 20374 */ 20375 capacity += 1; 20376 20377 /* 20378 * Currently, for removable media, the capacity is saved in terms 20379 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20380 */ 20381 if (un->un_f_has_removable_media) 20382 capacity *= (lbasize / un->un_sys_blocksize); 20383 20384 *capp = capacity; 20385 *lbap = lbasize; 20386 *psp = pbsize; 20387 20388 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20389 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20390 capacity, lbasize, pbsize); 20391 20392 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) { 20393 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20394 "sd_send_scsi_READ_CAPACITY_16 received invalid value " 20395 "capacity %llu lbasize %d pbsize %d", capacity, lbasize); 20396 return (EIO); 20397 } 20398 20399 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20400 return (0); 20401 } 20402 20403 20404 /* 20405 * Function: sd_send_scsi_START_STOP_UNIT 20406 * 20407 * Description: Issue a scsi START STOP UNIT command to the target. 20408 * 20409 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20410 * structure for this target. 20411 * pc_flag - SD_POWER_CONDITION 20412 * SD_START_STOP 20413 * flag - SD_TARGET_START 20414 * SD_TARGET_STOP 20415 * SD_TARGET_EJECT 20416 * SD_TARGET_CLOSE 20417 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20418 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20419 * to use the USCSI "direct" chain and bypass the normal 20420 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20421 * command is issued as part of an error recovery action. 20422 * 20423 * Return Code: 0 - Success 20424 * EIO - IO error 20425 * EACCES - Reservation conflict detected 20426 * ENXIO - Not Ready, medium not present 20427 * errno return code from sd_ssc_send() 20428 * 20429 * Context: Can sleep. 20430 */ 20431 20432 static int 20433 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag, 20434 int path_flag) 20435 { 20436 struct scsi_extended_sense sense_buf; 20437 union scsi_cdb cdb; 20438 struct uscsi_cmd ucmd_buf; 20439 int status; 20440 struct sd_lun *un; 20441 20442 ASSERT(ssc != NULL); 20443 un = ssc->ssc_un; 20444 ASSERT(un != NULL); 20445 ASSERT(!mutex_owned(SD_MUTEX(un))); 20446 20447 SD_TRACE(SD_LOG_IO, un, 20448 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20449 20450 if (un->un_f_check_start_stop && 20451 (pc_flag == SD_START_STOP) && 20452 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20453 (un->un_f_start_stop_supported != TRUE)) { 20454 return (0); 20455 } 20456 20457 /* 20458 * If we are performing an eject operation and 20459 * we receive any command other than SD_TARGET_EJECT 20460 * we should immediately return. 20461 */ 20462 if (flag != SD_TARGET_EJECT) { 20463 mutex_enter(SD_MUTEX(un)); 20464 if (un->un_f_ejecting == TRUE) { 20465 mutex_exit(SD_MUTEX(un)); 20466 return (EAGAIN); 20467 } 20468 mutex_exit(SD_MUTEX(un)); 20469 } 20470 20471 bzero(&cdb, sizeof (cdb)); 20472 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20473 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20474 20475 cdb.scc_cmd = SCMD_START_STOP; 20476 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ? 20477 (uchar_t)(flag << 4) : (uchar_t)flag; 20478 20479 ucmd_buf.uscsi_cdb = (char *)&cdb; 20480 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20481 ucmd_buf.uscsi_bufaddr = NULL; 20482 ucmd_buf.uscsi_buflen = 0; 20483 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20484 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20485 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20486 ucmd_buf.uscsi_timeout = 200; 20487 20488 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20489 UIO_SYSSPACE, path_flag); 20490 20491 switch (status) { 20492 case 0: 20493 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20494 break; /* Success! */ 20495 case EIO: 20496 switch (ucmd_buf.uscsi_status) { 20497 case STATUS_RESERVATION_CONFLICT: 20498 status = EACCES; 20499 break; 20500 case STATUS_CHECK: 20501 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20502 switch (scsi_sense_key( 20503 (uint8_t *)&sense_buf)) { 20504 case KEY_ILLEGAL_REQUEST: 20505 status = ENOTSUP; 20506 break; 20507 case KEY_NOT_READY: 20508 if (scsi_sense_asc( 20509 (uint8_t *)&sense_buf) 20510 == 0x3A) { 20511 status = ENXIO; 20512 } 20513 break; 20514 default: 20515 break; 20516 } 20517 } 20518 break; 20519 default: 20520 break; 20521 } 20522 break; 20523 default: 20524 break; 20525 } 20526 20527 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20528 20529 return (status); 20530 } 20531 20532 20533 /* 20534 * Function: sd_start_stop_unit_callback 20535 * 20536 * Description: timeout(9F) callback to begin recovery process for a 20537 * device that has spun down. 20538 * 20539 * Arguments: arg - pointer to associated softstate struct. 20540 * 20541 * Context: Executes in a timeout(9F) thread context 20542 */ 20543 20544 static void 20545 sd_start_stop_unit_callback(void *arg) 20546 { 20547 struct sd_lun *un = arg; 20548 ASSERT(un != NULL); 20549 ASSERT(!mutex_owned(SD_MUTEX(un))); 20550 20551 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20552 20553 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20554 } 20555 20556 20557 /* 20558 * Function: sd_start_stop_unit_task 20559 * 20560 * Description: Recovery procedure when a drive is spun down. 20561 * 20562 * Arguments: arg - pointer to associated softstate struct. 20563 * 20564 * Context: Executes in a taskq() thread context 20565 */ 20566 20567 static void 20568 sd_start_stop_unit_task(void *arg) 20569 { 20570 struct sd_lun *un = arg; 20571 sd_ssc_t *ssc; 20572 int power_level; 20573 int rval; 20574 20575 ASSERT(un != NULL); 20576 ASSERT(!mutex_owned(SD_MUTEX(un))); 20577 20578 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20579 20580 /* 20581 * Some unformatted drives report not ready error, no need to 20582 * restart if format has been initiated. 20583 */ 20584 mutex_enter(SD_MUTEX(un)); 20585 if (un->un_f_format_in_progress == TRUE) { 20586 mutex_exit(SD_MUTEX(un)); 20587 return; 20588 } 20589 mutex_exit(SD_MUTEX(un)); 20590 20591 ssc = sd_ssc_init(un); 20592 /* 20593 * When a START STOP command is issued from here, it is part of a 20594 * failure recovery operation and must be issued before any other 20595 * commands, including any pending retries. Thus it must be sent 20596 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20597 * succeeds or not, we will start I/O after the attempt. 20598 * If power condition is supported and the current power level 20599 * is capable of performing I/O, we should set the power condition 20600 * to that level. Otherwise, set the power condition to ACTIVE. 20601 */ 20602 if (un->un_f_power_condition_supported) { 20603 mutex_enter(SD_MUTEX(un)); 20604 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level)); 20605 power_level = sd_pwr_pc.ran_perf[un->un_power_level] 20606 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE; 20607 mutex_exit(SD_MUTEX(un)); 20608 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 20609 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY); 20610 } else { 20611 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 20612 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY); 20613 } 20614 20615 if (rval != 0) 20616 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20617 sd_ssc_fini(ssc); 20618 /* 20619 * The above call blocks until the START_STOP_UNIT command completes. 20620 * Now that it has completed, we must re-try the original IO that 20621 * received the NOT READY condition in the first place. There are 20622 * three possible conditions here: 20623 * 20624 * (1) The original IO is on un_retry_bp. 20625 * (2) The original IO is on the regular wait queue, and un_retry_bp 20626 * is NULL. 20627 * (3) The original IO is on the regular wait queue, and un_retry_bp 20628 * points to some other, unrelated bp. 20629 * 20630 * For each case, we must call sd_start_cmds() with un_retry_bp 20631 * as the argument. If un_retry_bp is NULL, this will initiate 20632 * processing of the regular wait queue. If un_retry_bp is not NULL, 20633 * then this will process the bp on un_retry_bp. That may or may not 20634 * be the original IO, but that does not matter: the important thing 20635 * is to keep the IO processing going at this point. 20636 * 20637 * Note: This is a very specific error recovery sequence associated 20638 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20639 * serialize the I/O with completion of the spin-up. 20640 */ 20641 mutex_enter(SD_MUTEX(un)); 20642 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20643 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20644 un, un->un_retry_bp); 20645 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20646 sd_start_cmds(un, un->un_retry_bp); 20647 mutex_exit(SD_MUTEX(un)); 20648 20649 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20650 } 20651 20652 20653 /* 20654 * Function: sd_send_scsi_INQUIRY 20655 * 20656 * Description: Issue the scsi INQUIRY command. 20657 * 20658 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20659 * structure for this target. 20660 * bufaddr 20661 * buflen 20662 * evpd 20663 * page_code 20664 * page_length 20665 * 20666 * Return Code: 0 - Success 20667 * errno return code from sd_ssc_send() 20668 * 20669 * Context: Can sleep. Does not return until command is completed. 20670 */ 20671 20672 static int 20673 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20674 uchar_t evpd, uchar_t page_code, size_t *residp) 20675 { 20676 union scsi_cdb cdb; 20677 struct uscsi_cmd ucmd_buf; 20678 int status; 20679 struct sd_lun *un; 20680 20681 ASSERT(ssc != NULL); 20682 un = ssc->ssc_un; 20683 ASSERT(un != NULL); 20684 ASSERT(!mutex_owned(SD_MUTEX(un))); 20685 ASSERT(bufaddr != NULL); 20686 20687 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20688 20689 bzero(&cdb, sizeof (cdb)); 20690 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20691 bzero(bufaddr, buflen); 20692 20693 cdb.scc_cmd = SCMD_INQUIRY; 20694 cdb.cdb_opaque[1] = evpd; 20695 cdb.cdb_opaque[2] = page_code; 20696 FORMG0COUNT(&cdb, buflen); 20697 20698 ucmd_buf.uscsi_cdb = (char *)&cdb; 20699 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20700 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20701 ucmd_buf.uscsi_buflen = buflen; 20702 ucmd_buf.uscsi_rqbuf = NULL; 20703 ucmd_buf.uscsi_rqlen = 0; 20704 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20705 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20706 20707 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20708 UIO_SYSSPACE, SD_PATH_DIRECT); 20709 20710 /* 20711 * Only handle status == 0, the upper-level caller 20712 * will put different assessment based on the context. 20713 */ 20714 if (status == 0) 20715 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20716 20717 if ((status == 0) && (residp != NULL)) { 20718 *residp = ucmd_buf.uscsi_resid; 20719 } 20720 20721 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20722 20723 return (status); 20724 } 20725 20726 20727 /* 20728 * Function: sd_send_scsi_TEST_UNIT_READY 20729 * 20730 * Description: Issue the scsi TEST UNIT READY command. 20731 * This routine can be told to set the flag USCSI_DIAGNOSE to 20732 * prevent retrying failed commands. Use this when the intent 20733 * is either to check for device readiness, to clear a Unit 20734 * Attention, or to clear any outstanding sense data. 20735 * However under specific conditions the expected behavior 20736 * is for retries to bring a device ready, so use the flag 20737 * with caution. 20738 * 20739 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20740 * structure for this target. 20741 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20742 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20743 * 0: dont check for media present, do retries on cmd. 20744 * 20745 * Return Code: 0 - Success 20746 * EIO - IO error 20747 * EACCES - Reservation conflict detected 20748 * ENXIO - Not Ready, medium not present 20749 * errno return code from sd_ssc_send() 20750 * 20751 * Context: Can sleep. Does not return until command is completed. 20752 */ 20753 20754 static int 20755 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20756 { 20757 struct scsi_extended_sense sense_buf; 20758 union scsi_cdb cdb; 20759 struct uscsi_cmd ucmd_buf; 20760 int status; 20761 struct sd_lun *un; 20762 20763 ASSERT(ssc != NULL); 20764 un = ssc->ssc_un; 20765 ASSERT(un != NULL); 20766 ASSERT(!mutex_owned(SD_MUTEX(un))); 20767 20768 SD_TRACE(SD_LOG_IO, un, 20769 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20770 20771 /* 20772 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20773 * timeouts when they receive a TUR and the queue is not empty. Check 20774 * the configuration flag set during attach (indicating the drive has 20775 * this firmware bug) and un_ncmds_in_transport before issuing the 20776 * TUR. If there are 20777 * pending commands return success, this is a bit arbitrary but is ok 20778 * for non-removables (i.e. the eliteI disks) and non-clustering 20779 * configurations. 20780 */ 20781 if (un->un_f_cfg_tur_check == TRUE) { 20782 mutex_enter(SD_MUTEX(un)); 20783 if (un->un_ncmds_in_transport != 0) { 20784 mutex_exit(SD_MUTEX(un)); 20785 return (0); 20786 } 20787 mutex_exit(SD_MUTEX(un)); 20788 } 20789 20790 bzero(&cdb, sizeof (cdb)); 20791 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20792 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20793 20794 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20795 20796 ucmd_buf.uscsi_cdb = (char *)&cdb; 20797 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20798 ucmd_buf.uscsi_bufaddr = NULL; 20799 ucmd_buf.uscsi_buflen = 0; 20800 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20801 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20802 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20803 20804 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20805 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20806 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20807 } 20808 ucmd_buf.uscsi_timeout = 60; 20809 20810 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20811 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20812 SD_PATH_STANDARD)); 20813 20814 switch (status) { 20815 case 0: 20816 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20817 break; /* Success! */ 20818 case EIO: 20819 switch (ucmd_buf.uscsi_status) { 20820 case STATUS_RESERVATION_CONFLICT: 20821 status = EACCES; 20822 break; 20823 case STATUS_CHECK: 20824 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20825 break; 20826 } 20827 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20828 (scsi_sense_key((uint8_t *)&sense_buf) == 20829 KEY_NOT_READY) && 20830 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20831 status = ENXIO; 20832 } 20833 break; 20834 default: 20835 break; 20836 } 20837 break; 20838 default: 20839 break; 20840 } 20841 20842 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20843 20844 return (status); 20845 } 20846 20847 /* 20848 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20849 * 20850 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20851 * 20852 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20853 * structure for this target. 20854 * 20855 * Return Code: 0 - Success 20856 * EACCES 20857 * ENOTSUP 20858 * errno return code from sd_ssc_send() 20859 * 20860 * Context: Can sleep. Does not return until command is completed. 20861 */ 20862 20863 static int 20864 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20865 uint16_t data_len, uchar_t *data_bufp) 20866 { 20867 struct scsi_extended_sense sense_buf; 20868 union scsi_cdb cdb; 20869 struct uscsi_cmd ucmd_buf; 20870 int status; 20871 int no_caller_buf = FALSE; 20872 struct sd_lun *un; 20873 20874 ASSERT(ssc != NULL); 20875 un = ssc->ssc_un; 20876 ASSERT(un != NULL); 20877 ASSERT(!mutex_owned(SD_MUTEX(un))); 20878 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20879 20880 SD_TRACE(SD_LOG_IO, un, 20881 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20882 20883 bzero(&cdb, sizeof (cdb)); 20884 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20885 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20886 if (data_bufp == NULL) { 20887 /* Allocate a default buf if the caller did not give one */ 20888 ASSERT(data_len == 0); 20889 data_len = MHIOC_RESV_KEY_SIZE; 20890 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20891 no_caller_buf = TRUE; 20892 } 20893 20894 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20895 cdb.cdb_opaque[1] = usr_cmd; 20896 FORMG1COUNT(&cdb, data_len); 20897 20898 ucmd_buf.uscsi_cdb = (char *)&cdb; 20899 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20900 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20901 ucmd_buf.uscsi_buflen = data_len; 20902 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20903 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20904 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20905 ucmd_buf.uscsi_timeout = 60; 20906 20907 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20908 UIO_SYSSPACE, SD_PATH_STANDARD); 20909 20910 switch (status) { 20911 case 0: 20912 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20913 20914 break; /* Success! */ 20915 case EIO: 20916 switch (ucmd_buf.uscsi_status) { 20917 case STATUS_RESERVATION_CONFLICT: 20918 status = EACCES; 20919 break; 20920 case STATUS_CHECK: 20921 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20922 (scsi_sense_key((uint8_t *)&sense_buf) == 20923 KEY_ILLEGAL_REQUEST)) { 20924 status = ENOTSUP; 20925 } 20926 break; 20927 default: 20928 break; 20929 } 20930 break; 20931 default: 20932 break; 20933 } 20934 20935 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20936 20937 if (no_caller_buf == TRUE) { 20938 kmem_free(data_bufp, data_len); 20939 } 20940 20941 return (status); 20942 } 20943 20944 20945 /* 20946 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20947 * 20948 * Description: This routine is the driver entry point for handling CD-ROM 20949 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20950 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20951 * device. 20952 * 20953 * Arguments: ssc - ssc contains un - pointer to soft state struct 20954 * for the target. 20955 * usr_cmd SCSI-3 reservation facility command (one of 20956 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20957 * SD_SCSI3_PREEMPTANDABORT) 20958 * usr_bufp - user provided pointer register, reserve descriptor or 20959 * preempt and abort structure (mhioc_register_t, 20960 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20961 * 20962 * Return Code: 0 - Success 20963 * EACCES 20964 * ENOTSUP 20965 * errno return code from sd_ssc_send() 20966 * 20967 * Context: Can sleep. Does not return until command is completed. 20968 */ 20969 20970 static int 20971 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20972 uchar_t *usr_bufp) 20973 { 20974 struct scsi_extended_sense sense_buf; 20975 union scsi_cdb cdb; 20976 struct uscsi_cmd ucmd_buf; 20977 int status; 20978 uchar_t data_len = sizeof (sd_prout_t); 20979 sd_prout_t *prp; 20980 struct sd_lun *un; 20981 20982 ASSERT(ssc != NULL); 20983 un = ssc->ssc_un; 20984 ASSERT(un != NULL); 20985 ASSERT(!mutex_owned(SD_MUTEX(un))); 20986 ASSERT(data_len == 24); /* required by scsi spec */ 20987 20988 SD_TRACE(SD_LOG_IO, un, 20989 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20990 20991 if (usr_bufp == NULL) { 20992 return (EINVAL); 20993 } 20994 20995 bzero(&cdb, sizeof (cdb)); 20996 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20997 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20998 prp = kmem_zalloc(data_len, KM_SLEEP); 20999 21000 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 21001 cdb.cdb_opaque[1] = usr_cmd; 21002 FORMG1COUNT(&cdb, data_len); 21003 21004 ucmd_buf.uscsi_cdb = (char *)&cdb; 21005 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21006 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 21007 ucmd_buf.uscsi_buflen = data_len; 21008 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21009 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21010 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21011 ucmd_buf.uscsi_timeout = 60; 21012 21013 switch (usr_cmd) { 21014 case SD_SCSI3_REGISTER: { 21015 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 21016 21017 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21018 bcopy(ptr->newkey.key, prp->service_key, 21019 MHIOC_RESV_KEY_SIZE); 21020 prp->aptpl = ptr->aptpl; 21021 break; 21022 } 21023 case SD_SCSI3_RESERVE: 21024 case SD_SCSI3_RELEASE: { 21025 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21026 21027 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21028 prp->scope_address = BE_32(ptr->scope_specific_addr); 21029 cdb.cdb_opaque[2] = ptr->type; 21030 break; 21031 } 21032 case SD_SCSI3_PREEMPTANDABORT: { 21033 mhioc_preemptandabort_t *ptr = 21034 (mhioc_preemptandabort_t *)usr_bufp; 21035 21036 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21037 bcopy(ptr->victim_key.key, prp->service_key, 21038 MHIOC_RESV_KEY_SIZE); 21039 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 21040 cdb.cdb_opaque[2] = ptr->resvdesc.type; 21041 ucmd_buf.uscsi_flags |= USCSI_HEAD; 21042 break; 21043 } 21044 case SD_SCSI3_REGISTERANDIGNOREKEY: 21045 { 21046 mhioc_registerandignorekey_t *ptr; 21047 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 21048 bcopy(ptr->newkey.key, 21049 prp->service_key, MHIOC_RESV_KEY_SIZE); 21050 prp->aptpl = ptr->aptpl; 21051 break; 21052 } 21053 default: 21054 ASSERT(FALSE); 21055 break; 21056 } 21057 21058 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21059 UIO_SYSSPACE, SD_PATH_STANDARD); 21060 21061 switch (status) { 21062 case 0: 21063 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21064 break; /* Success! */ 21065 case EIO: 21066 switch (ucmd_buf.uscsi_status) { 21067 case STATUS_RESERVATION_CONFLICT: 21068 status = EACCES; 21069 break; 21070 case STATUS_CHECK: 21071 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21072 (scsi_sense_key((uint8_t *)&sense_buf) == 21073 KEY_ILLEGAL_REQUEST)) { 21074 status = ENOTSUP; 21075 } 21076 break; 21077 default: 21078 break; 21079 } 21080 break; 21081 default: 21082 break; 21083 } 21084 21085 kmem_free(prp, data_len); 21086 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 21087 return (status); 21088 } 21089 21090 21091 /* 21092 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 21093 * 21094 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 21095 * 21096 * Arguments: un - pointer to the target's soft state struct 21097 * dkc - pointer to the callback structure 21098 * 21099 * Return Code: 0 - success 21100 * errno-type error code 21101 * 21102 * Context: kernel thread context only. 21103 * 21104 * _______________________________________________________________ 21105 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 21106 * |FLUSH_VOLATILE| | operation | 21107 * |______________|______________|_________________________________| 21108 * | 0 | NULL | Synchronous flush on both | 21109 * | | | volatile and non-volatile cache | 21110 * |______________|______________|_________________________________| 21111 * | 1 | NULL | Synchronous flush on volatile | 21112 * | | | cache; disk drivers may suppress| 21113 * | | | flush if disk table indicates | 21114 * | | | non-volatile cache | 21115 * |______________|______________|_________________________________| 21116 * | 0 | !NULL | Asynchronous flush on both | 21117 * | | | volatile and non-volatile cache;| 21118 * |______________|______________|_________________________________| 21119 * | 1 | !NULL | Asynchronous flush on volatile | 21120 * | | | cache; disk drivers may suppress| 21121 * | | | flush if disk table indicates | 21122 * | | | non-volatile cache | 21123 * |______________|______________|_________________________________| 21124 * 21125 */ 21126 21127 static int 21128 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 21129 { 21130 struct sd_uscsi_info *uip; 21131 struct uscsi_cmd *uscmd; 21132 union scsi_cdb *cdb; 21133 struct buf *bp; 21134 int rval = 0; 21135 int is_async; 21136 21137 SD_TRACE(SD_LOG_IO, un, 21138 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 21139 21140 ASSERT(un != NULL); 21141 ASSERT(!mutex_owned(SD_MUTEX(un))); 21142 21143 if (dkc == NULL || dkc->dkc_callback == NULL) { 21144 is_async = FALSE; 21145 } else { 21146 is_async = TRUE; 21147 } 21148 21149 mutex_enter(SD_MUTEX(un)); 21150 /* check whether cache flush should be suppressed */ 21151 if (un->un_f_suppress_cache_flush == TRUE) { 21152 mutex_exit(SD_MUTEX(un)); 21153 /* 21154 * suppress the cache flush if the device is told to do 21155 * so by sd.conf or disk table 21156 */ 21157 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 21158 skip the cache flush since suppress_cache_flush is %d!\n", 21159 un->un_f_suppress_cache_flush); 21160 21161 if (is_async == TRUE) { 21162 /* invoke callback for asynchronous flush */ 21163 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 21164 } 21165 return (rval); 21166 } 21167 mutex_exit(SD_MUTEX(un)); 21168 21169 /* 21170 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 21171 * set properly 21172 */ 21173 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 21174 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 21175 21176 mutex_enter(SD_MUTEX(un)); 21177 if (dkc != NULL && un->un_f_sync_nv_supported && 21178 (dkc->dkc_flag & FLUSH_VOLATILE)) { 21179 /* 21180 * if the device supports SYNC_NV bit, turn on 21181 * the SYNC_NV bit to only flush volatile cache 21182 */ 21183 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 21184 } 21185 mutex_exit(SD_MUTEX(un)); 21186 21187 /* 21188 * First get some memory for the uscsi_cmd struct and cdb 21189 * and initialize for SYNCHRONIZE_CACHE cmd. 21190 */ 21191 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21192 uscmd->uscsi_cdblen = CDB_GROUP1; 21193 uscmd->uscsi_cdb = (caddr_t)cdb; 21194 uscmd->uscsi_bufaddr = NULL; 21195 uscmd->uscsi_buflen = 0; 21196 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21197 uscmd->uscsi_rqlen = SENSE_LENGTH; 21198 uscmd->uscsi_rqresid = SENSE_LENGTH; 21199 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 21200 uscmd->uscsi_timeout = sd_io_time; 21201 21202 /* 21203 * Allocate an sd_uscsi_info struct and fill it with the info 21204 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 21205 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 21206 * since we allocate the buf here in this function, we do not 21207 * need to preserve the prior contents of b_private. 21208 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 21209 */ 21210 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 21211 uip->ui_flags = SD_PATH_DIRECT; 21212 uip->ui_cmdp = uscmd; 21213 21214 bp = getrbuf(KM_SLEEP); 21215 bp->b_private = uip; 21216 21217 /* 21218 * Setup buffer to carry uscsi request. 21219 */ 21220 bp->b_flags = B_BUSY; 21221 bp->b_bcount = 0; 21222 bp->b_blkno = 0; 21223 21224 if (is_async == TRUE) { 21225 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 21226 uip->ui_dkc = *dkc; 21227 } 21228 21229 bp->b_edev = SD_GET_DEV(un); 21230 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 21231 21232 /* 21233 * Unset un_f_sync_cache_required flag 21234 */ 21235 mutex_enter(SD_MUTEX(un)); 21236 un->un_f_sync_cache_required = FALSE; 21237 mutex_exit(SD_MUTEX(un)); 21238 21239 (void) sd_uscsi_strategy(bp); 21240 21241 /* 21242 * If synchronous request, wait for completion 21243 * If async just return and let b_iodone callback 21244 * cleanup. 21245 * NOTE: On return, u_ncmds_in_driver will be decremented, 21246 * but it was also incremented in sd_uscsi_strategy(), so 21247 * we should be ok. 21248 */ 21249 if (is_async == FALSE) { 21250 (void) biowait(bp); 21251 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 21252 } 21253 21254 return (rval); 21255 } 21256 21257 21258 static int 21259 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 21260 { 21261 struct sd_uscsi_info *uip; 21262 struct uscsi_cmd *uscmd; 21263 uint8_t *sense_buf; 21264 struct sd_lun *un; 21265 int status; 21266 union scsi_cdb *cdb; 21267 21268 uip = (struct sd_uscsi_info *)(bp->b_private); 21269 ASSERT(uip != NULL); 21270 21271 uscmd = uip->ui_cmdp; 21272 ASSERT(uscmd != NULL); 21273 21274 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 21275 ASSERT(sense_buf != NULL); 21276 21277 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 21278 ASSERT(un != NULL); 21279 21280 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 21281 21282 status = geterror(bp); 21283 switch (status) { 21284 case 0: 21285 break; /* Success! */ 21286 case EIO: 21287 switch (uscmd->uscsi_status) { 21288 case STATUS_RESERVATION_CONFLICT: 21289 /* Ignore reservation conflict */ 21290 status = 0; 21291 goto done; 21292 21293 case STATUS_CHECK: 21294 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 21295 (scsi_sense_key(sense_buf) == 21296 KEY_ILLEGAL_REQUEST)) { 21297 /* Ignore Illegal Request error */ 21298 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 21299 mutex_enter(SD_MUTEX(un)); 21300 un->un_f_sync_nv_supported = FALSE; 21301 mutex_exit(SD_MUTEX(un)); 21302 status = 0; 21303 SD_TRACE(SD_LOG_IO, un, 21304 "un_f_sync_nv_supported \ 21305 is set to false.\n"); 21306 goto done; 21307 } 21308 21309 mutex_enter(SD_MUTEX(un)); 21310 un->un_f_sync_cache_supported = FALSE; 21311 mutex_exit(SD_MUTEX(un)); 21312 SD_TRACE(SD_LOG_IO, un, 21313 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 21314 un_f_sync_cache_supported set to false \ 21315 with asc = %x, ascq = %x\n", 21316 scsi_sense_asc(sense_buf), 21317 scsi_sense_ascq(sense_buf)); 21318 status = ENOTSUP; 21319 goto done; 21320 } 21321 break; 21322 default: 21323 break; 21324 } 21325 /* FALLTHRU */ 21326 default: 21327 /* 21328 * Turn on the un_f_sync_cache_required flag 21329 * since the SYNC CACHE command failed 21330 */ 21331 mutex_enter(SD_MUTEX(un)); 21332 un->un_f_sync_cache_required = TRUE; 21333 mutex_exit(SD_MUTEX(un)); 21334 21335 /* 21336 * Don't log an error message if this device 21337 * has removable media. 21338 */ 21339 if (!un->un_f_has_removable_media) { 21340 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 21341 "SYNCHRONIZE CACHE command failed (%d)\n", status); 21342 } 21343 break; 21344 } 21345 21346 done: 21347 if (uip->ui_dkc.dkc_callback != NULL) { 21348 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 21349 } 21350 21351 ASSERT((bp->b_flags & B_REMAPPED) == 0); 21352 freerbuf(bp); 21353 kmem_free(uip, sizeof (struct sd_uscsi_info)); 21354 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 21355 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 21356 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 21357 21358 return (status); 21359 } 21360 21361 21362 /* 21363 * Function: sd_send_scsi_GET_CONFIGURATION 21364 * 21365 * Description: Issues the get configuration command to the device. 21366 * Called from sd_check_for_writable_cd & sd_get_media_info 21367 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 21368 * Arguments: ssc 21369 * ucmdbuf 21370 * rqbuf 21371 * rqbuflen 21372 * bufaddr 21373 * buflen 21374 * path_flag 21375 * 21376 * Return Code: 0 - Success 21377 * errno return code from sd_ssc_send() 21378 * 21379 * Context: Can sleep. Does not return until command is completed. 21380 * 21381 */ 21382 21383 static int 21384 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21385 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21386 int path_flag) 21387 { 21388 char cdb[CDB_GROUP1]; 21389 int status; 21390 struct sd_lun *un; 21391 21392 ASSERT(ssc != NULL); 21393 un = ssc->ssc_un; 21394 ASSERT(un != NULL); 21395 ASSERT(!mutex_owned(SD_MUTEX(un))); 21396 ASSERT(bufaddr != NULL); 21397 ASSERT(ucmdbuf != NULL); 21398 ASSERT(rqbuf != NULL); 21399 21400 SD_TRACE(SD_LOG_IO, un, 21401 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21402 21403 bzero(cdb, sizeof (cdb)); 21404 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21405 bzero(rqbuf, rqbuflen); 21406 bzero(bufaddr, buflen); 21407 21408 /* 21409 * Set up cdb field for the get configuration command. 21410 */ 21411 cdb[0] = SCMD_GET_CONFIGURATION; 21412 cdb[1] = 0x02; /* Requested Type */ 21413 cdb[8] = SD_PROFILE_HEADER_LEN; 21414 ucmdbuf->uscsi_cdb = cdb; 21415 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21416 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21417 ucmdbuf->uscsi_buflen = buflen; 21418 ucmdbuf->uscsi_timeout = sd_io_time; 21419 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21420 ucmdbuf->uscsi_rqlen = rqbuflen; 21421 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21422 21423 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21424 UIO_SYSSPACE, path_flag); 21425 21426 switch (status) { 21427 case 0: 21428 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21429 break; /* Success! */ 21430 case EIO: 21431 switch (ucmdbuf->uscsi_status) { 21432 case STATUS_RESERVATION_CONFLICT: 21433 status = EACCES; 21434 break; 21435 default: 21436 break; 21437 } 21438 break; 21439 default: 21440 break; 21441 } 21442 21443 if (status == 0) { 21444 SD_DUMP_MEMORY(un, SD_LOG_IO, 21445 "sd_send_scsi_GET_CONFIGURATION: data", 21446 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21447 } 21448 21449 SD_TRACE(SD_LOG_IO, un, 21450 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21451 21452 return (status); 21453 } 21454 21455 /* 21456 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21457 * 21458 * Description: Issues the get configuration command to the device to 21459 * retrieve a specific feature. Called from 21460 * sd_check_for_writable_cd & sd_set_mmc_caps. 21461 * Arguments: ssc 21462 * ucmdbuf 21463 * rqbuf 21464 * rqbuflen 21465 * bufaddr 21466 * buflen 21467 * feature 21468 * 21469 * Return Code: 0 - Success 21470 * errno return code from sd_ssc_send() 21471 * 21472 * Context: Can sleep. Does not return until command is completed. 21473 * 21474 */ 21475 static int 21476 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 21477 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 21478 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 21479 { 21480 char cdb[CDB_GROUP1]; 21481 int status; 21482 struct sd_lun *un; 21483 21484 ASSERT(ssc != NULL); 21485 un = ssc->ssc_un; 21486 ASSERT(un != NULL); 21487 ASSERT(!mutex_owned(SD_MUTEX(un))); 21488 ASSERT(bufaddr != NULL); 21489 ASSERT(ucmdbuf != NULL); 21490 ASSERT(rqbuf != NULL); 21491 21492 SD_TRACE(SD_LOG_IO, un, 21493 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21494 21495 bzero(cdb, sizeof (cdb)); 21496 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21497 bzero(rqbuf, rqbuflen); 21498 bzero(bufaddr, buflen); 21499 21500 /* 21501 * Set up cdb field for the get configuration command. 21502 */ 21503 cdb[0] = SCMD_GET_CONFIGURATION; 21504 cdb[1] = 0x02; /* Requested Type */ 21505 cdb[3] = feature; 21506 cdb[8] = buflen; 21507 ucmdbuf->uscsi_cdb = cdb; 21508 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21509 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21510 ucmdbuf->uscsi_buflen = buflen; 21511 ucmdbuf->uscsi_timeout = sd_io_time; 21512 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21513 ucmdbuf->uscsi_rqlen = rqbuflen; 21514 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21515 21516 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21517 UIO_SYSSPACE, path_flag); 21518 21519 switch (status) { 21520 case 0: 21521 21522 break; /* Success! */ 21523 case EIO: 21524 switch (ucmdbuf->uscsi_status) { 21525 case STATUS_RESERVATION_CONFLICT: 21526 status = EACCES; 21527 break; 21528 default: 21529 break; 21530 } 21531 break; 21532 default: 21533 break; 21534 } 21535 21536 if (status == 0) { 21537 SD_DUMP_MEMORY(un, SD_LOG_IO, 21538 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21539 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21540 } 21541 21542 SD_TRACE(SD_LOG_IO, un, 21543 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21544 21545 return (status); 21546 } 21547 21548 21549 /* 21550 * Function: sd_send_scsi_MODE_SENSE 21551 * 21552 * Description: Utility function for issuing a scsi MODE SENSE command. 21553 * Note: This routine uses a consistent implementation for Group0, 21554 * Group1, and Group2 commands across all platforms. ATAPI devices 21555 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21556 * 21557 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21558 * structure for this target. 21559 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21560 * CDB_GROUP[1|2] (10 byte). 21561 * bufaddr - buffer for page data retrieved from the target. 21562 * buflen - size of page to be retrieved. 21563 * page_code - page code of data to be retrieved from the target. 21564 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21565 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21566 * to use the USCSI "direct" chain and bypass the normal 21567 * command waitq. 21568 * 21569 * Return Code: 0 - Success 21570 * errno return code from sd_ssc_send() 21571 * 21572 * Context: Can sleep. Does not return until command is completed. 21573 */ 21574 21575 static int 21576 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21577 size_t buflen, uchar_t page_code, int path_flag) 21578 { 21579 struct scsi_extended_sense sense_buf; 21580 union scsi_cdb cdb; 21581 struct uscsi_cmd ucmd_buf; 21582 int status; 21583 int headlen; 21584 struct sd_lun *un; 21585 21586 ASSERT(ssc != NULL); 21587 un = ssc->ssc_un; 21588 ASSERT(un != NULL); 21589 ASSERT(!mutex_owned(SD_MUTEX(un))); 21590 ASSERT(bufaddr != NULL); 21591 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21592 (cdbsize == CDB_GROUP2)); 21593 21594 SD_TRACE(SD_LOG_IO, un, 21595 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21596 21597 bzero(&cdb, sizeof (cdb)); 21598 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21599 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21600 bzero(bufaddr, buflen); 21601 21602 if (cdbsize == CDB_GROUP0) { 21603 cdb.scc_cmd = SCMD_MODE_SENSE; 21604 cdb.cdb_opaque[2] = page_code; 21605 FORMG0COUNT(&cdb, buflen); 21606 headlen = MODE_HEADER_LENGTH; 21607 } else { 21608 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21609 cdb.cdb_opaque[2] = page_code; 21610 FORMG1COUNT(&cdb, buflen); 21611 headlen = MODE_HEADER_LENGTH_GRP2; 21612 } 21613 21614 ASSERT(headlen <= buflen); 21615 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21616 21617 ucmd_buf.uscsi_cdb = (char *)&cdb; 21618 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21619 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21620 ucmd_buf.uscsi_buflen = buflen; 21621 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21622 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21623 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21624 ucmd_buf.uscsi_timeout = 60; 21625 21626 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21627 UIO_SYSSPACE, path_flag); 21628 21629 switch (status) { 21630 case 0: 21631 /* 21632 * sr_check_wp() uses 0x3f page code and check the header of 21633 * mode page to determine if target device is write-protected. 21634 * But some USB devices return 0 bytes for 0x3f page code. For 21635 * this case, make sure that mode page header is returned at 21636 * least. 21637 */ 21638 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21639 status = EIO; 21640 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21641 "mode page header is not returned"); 21642 } 21643 break; /* Success! */ 21644 case EIO: 21645 switch (ucmd_buf.uscsi_status) { 21646 case STATUS_RESERVATION_CONFLICT: 21647 status = EACCES; 21648 break; 21649 default: 21650 break; 21651 } 21652 break; 21653 default: 21654 break; 21655 } 21656 21657 if (status == 0) { 21658 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21659 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21660 } 21661 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21662 21663 return (status); 21664 } 21665 21666 21667 /* 21668 * Function: sd_send_scsi_MODE_SELECT 21669 * 21670 * Description: Utility function for issuing a scsi MODE SELECT command. 21671 * Note: This routine uses a consistent implementation for Group0, 21672 * Group1, and Group2 commands across all platforms. ATAPI devices 21673 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21674 * 21675 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21676 * structure for this target. 21677 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21678 * CDB_GROUP[1|2] (10 byte). 21679 * bufaddr - buffer for page data retrieved from the target. 21680 * buflen - size of page to be retrieved. 21681 * save_page - boolean to determin if SP bit should be set. 21682 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21683 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21684 * to use the USCSI "direct" chain and bypass the normal 21685 * command waitq. 21686 * 21687 * Return Code: 0 - Success 21688 * errno return code from sd_ssc_send() 21689 * 21690 * Context: Can sleep. Does not return until command is completed. 21691 */ 21692 21693 static int 21694 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21695 size_t buflen, uchar_t save_page, int path_flag) 21696 { 21697 struct scsi_extended_sense sense_buf; 21698 union scsi_cdb cdb; 21699 struct uscsi_cmd ucmd_buf; 21700 int status; 21701 struct sd_lun *un; 21702 21703 ASSERT(ssc != NULL); 21704 un = ssc->ssc_un; 21705 ASSERT(un != NULL); 21706 ASSERT(!mutex_owned(SD_MUTEX(un))); 21707 ASSERT(bufaddr != NULL); 21708 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21709 (cdbsize == CDB_GROUP2)); 21710 21711 SD_TRACE(SD_LOG_IO, un, 21712 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21713 21714 bzero(&cdb, sizeof (cdb)); 21715 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21716 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21717 21718 /* Set the PF bit for many third party drives */ 21719 cdb.cdb_opaque[1] = 0x10; 21720 21721 /* Set the savepage(SP) bit if given */ 21722 if (save_page == SD_SAVE_PAGE) { 21723 cdb.cdb_opaque[1] |= 0x01; 21724 } 21725 21726 if (cdbsize == CDB_GROUP0) { 21727 cdb.scc_cmd = SCMD_MODE_SELECT; 21728 FORMG0COUNT(&cdb, buflen); 21729 } else { 21730 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21731 FORMG1COUNT(&cdb, buflen); 21732 } 21733 21734 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21735 21736 ucmd_buf.uscsi_cdb = (char *)&cdb; 21737 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21738 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21739 ucmd_buf.uscsi_buflen = buflen; 21740 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21741 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21742 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21743 ucmd_buf.uscsi_timeout = 60; 21744 21745 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21746 UIO_SYSSPACE, path_flag); 21747 21748 switch (status) { 21749 case 0: 21750 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21751 break; /* Success! */ 21752 case EIO: 21753 switch (ucmd_buf.uscsi_status) { 21754 case STATUS_RESERVATION_CONFLICT: 21755 status = EACCES; 21756 break; 21757 default: 21758 break; 21759 } 21760 break; 21761 default: 21762 break; 21763 } 21764 21765 if (status == 0) { 21766 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21767 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21768 } 21769 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21770 21771 return (status); 21772 } 21773 21774 21775 /* 21776 * Function: sd_send_scsi_RDWR 21777 * 21778 * Description: Issue a scsi READ or WRITE command with the given parameters. 21779 * 21780 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21781 * structure for this target. 21782 * cmd: SCMD_READ or SCMD_WRITE 21783 * bufaddr: Address of caller's buffer to receive the RDWR data 21784 * buflen: Length of caller's buffer receive the RDWR data. 21785 * start_block: Block number for the start of the RDWR operation. 21786 * (Assumes target-native block size.) 21787 * residp: Pointer to variable to receive the redisual of the 21788 * RDWR operation (may be NULL of no residual requested). 21789 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21790 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21791 * to use the USCSI "direct" chain and bypass the normal 21792 * command waitq. 21793 * 21794 * Return Code: 0 - Success 21795 * errno return code from sd_ssc_send() 21796 * 21797 * Context: Can sleep. Does not return until command is completed. 21798 */ 21799 21800 static int 21801 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21802 size_t buflen, daddr_t start_block, int path_flag) 21803 { 21804 struct scsi_extended_sense sense_buf; 21805 union scsi_cdb cdb; 21806 struct uscsi_cmd ucmd_buf; 21807 uint32_t block_count; 21808 int status; 21809 int cdbsize; 21810 uchar_t flag; 21811 struct sd_lun *un; 21812 21813 ASSERT(ssc != NULL); 21814 un = ssc->ssc_un; 21815 ASSERT(un != NULL); 21816 ASSERT(!mutex_owned(SD_MUTEX(un))); 21817 ASSERT(bufaddr != NULL); 21818 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21819 21820 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21821 21822 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21823 return (EINVAL); 21824 } 21825 21826 mutex_enter(SD_MUTEX(un)); 21827 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21828 mutex_exit(SD_MUTEX(un)); 21829 21830 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21831 21832 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21833 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21834 bufaddr, buflen, start_block, block_count); 21835 21836 bzero(&cdb, sizeof (cdb)); 21837 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21838 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21839 21840 /* Compute CDB size to use */ 21841 if (start_block > 0xffffffff) 21842 cdbsize = CDB_GROUP4; 21843 else if ((start_block & 0xFFE00000) || 21844 (un->un_f_cfg_is_atapi == TRUE)) 21845 cdbsize = CDB_GROUP1; 21846 else 21847 cdbsize = CDB_GROUP0; 21848 21849 switch (cdbsize) { 21850 case CDB_GROUP0: /* 6-byte CDBs */ 21851 cdb.scc_cmd = cmd; 21852 FORMG0ADDR(&cdb, start_block); 21853 FORMG0COUNT(&cdb, block_count); 21854 break; 21855 case CDB_GROUP1: /* 10-byte CDBs */ 21856 cdb.scc_cmd = cmd | SCMD_GROUP1; 21857 FORMG1ADDR(&cdb, start_block); 21858 FORMG1COUNT(&cdb, block_count); 21859 break; 21860 case CDB_GROUP4: /* 16-byte CDBs */ 21861 cdb.scc_cmd = cmd | SCMD_GROUP4; 21862 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21863 FORMG4COUNT(&cdb, block_count); 21864 break; 21865 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21866 default: 21867 /* All others reserved */ 21868 return (EINVAL); 21869 } 21870 21871 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21872 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21873 21874 ucmd_buf.uscsi_cdb = (char *)&cdb; 21875 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21876 ucmd_buf.uscsi_bufaddr = bufaddr; 21877 ucmd_buf.uscsi_buflen = buflen; 21878 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21879 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21880 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21881 ucmd_buf.uscsi_timeout = 60; 21882 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21883 UIO_SYSSPACE, path_flag); 21884 21885 switch (status) { 21886 case 0: 21887 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21888 break; /* Success! */ 21889 case EIO: 21890 switch (ucmd_buf.uscsi_status) { 21891 case STATUS_RESERVATION_CONFLICT: 21892 status = EACCES; 21893 break; 21894 default: 21895 break; 21896 } 21897 break; 21898 default: 21899 break; 21900 } 21901 21902 if (status == 0) { 21903 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21904 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21905 } 21906 21907 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21908 21909 return (status); 21910 } 21911 21912 21913 /* 21914 * Function: sd_send_scsi_LOG_SENSE 21915 * 21916 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21917 * 21918 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21919 * structure for this target. 21920 * 21921 * Return Code: 0 - Success 21922 * errno return code from sd_ssc_send() 21923 * 21924 * Context: Can sleep. Does not return until command is completed. 21925 */ 21926 21927 static int 21928 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21929 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21930 int path_flag) 21931 21932 { 21933 struct scsi_extended_sense sense_buf; 21934 union scsi_cdb cdb; 21935 struct uscsi_cmd ucmd_buf; 21936 int status; 21937 struct sd_lun *un; 21938 21939 ASSERT(ssc != NULL); 21940 un = ssc->ssc_un; 21941 ASSERT(un != NULL); 21942 ASSERT(!mutex_owned(SD_MUTEX(un))); 21943 21944 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21945 21946 bzero(&cdb, sizeof (cdb)); 21947 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21948 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21949 21950 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21951 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21952 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21953 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21954 FORMG1COUNT(&cdb, buflen); 21955 21956 ucmd_buf.uscsi_cdb = (char *)&cdb; 21957 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21958 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21959 ucmd_buf.uscsi_buflen = buflen; 21960 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21961 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21962 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21963 ucmd_buf.uscsi_timeout = 60; 21964 21965 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21966 UIO_SYSSPACE, path_flag); 21967 21968 switch (status) { 21969 case 0: 21970 break; 21971 case EIO: 21972 switch (ucmd_buf.uscsi_status) { 21973 case STATUS_RESERVATION_CONFLICT: 21974 status = EACCES; 21975 break; 21976 case STATUS_CHECK: 21977 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21978 (scsi_sense_key((uint8_t *)&sense_buf) == 21979 KEY_ILLEGAL_REQUEST) && 21980 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21981 /* 21982 * ASC 0x24: INVALID FIELD IN CDB 21983 */ 21984 switch (page_code) { 21985 case START_STOP_CYCLE_PAGE: 21986 /* 21987 * The start stop cycle counter is 21988 * implemented as page 0x31 in earlier 21989 * generation disks. In new generation 21990 * disks the start stop cycle counter is 21991 * implemented as page 0xE. To properly 21992 * handle this case if an attempt for 21993 * log page 0xE is made and fails we 21994 * will try again using page 0x31. 21995 * 21996 * Network storage BU committed to 21997 * maintain the page 0x31 for this 21998 * purpose and will not have any other 21999 * page implemented with page code 0x31 22000 * until all disks transition to the 22001 * standard page. 22002 */ 22003 mutex_enter(SD_MUTEX(un)); 22004 un->un_start_stop_cycle_page = 22005 START_STOP_CYCLE_VU_PAGE; 22006 cdb.cdb_opaque[2] = 22007 (char)(page_control << 6) | 22008 un->un_start_stop_cycle_page; 22009 mutex_exit(SD_MUTEX(un)); 22010 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22011 status = sd_ssc_send( 22012 ssc, &ucmd_buf, FKIOCTL, 22013 UIO_SYSSPACE, path_flag); 22014 22015 break; 22016 case TEMPERATURE_PAGE: 22017 status = ENOTTY; 22018 break; 22019 default: 22020 break; 22021 } 22022 } 22023 break; 22024 default: 22025 break; 22026 } 22027 break; 22028 default: 22029 break; 22030 } 22031 22032 if (status == 0) { 22033 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22034 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 22035 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22036 } 22037 22038 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 22039 22040 return (status); 22041 } 22042 22043 22044 /* 22045 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 22046 * 22047 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command. 22048 * 22049 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22050 * structure for this target. 22051 * bufaddr 22052 * buflen 22053 * class_req 22054 * 22055 * Return Code: 0 - Success 22056 * errno return code from sd_ssc_send() 22057 * 22058 * Context: Can sleep. Does not return until command is completed. 22059 */ 22060 22061 static int 22062 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr, 22063 size_t buflen, uchar_t class_req) 22064 { 22065 union scsi_cdb cdb; 22066 struct uscsi_cmd ucmd_buf; 22067 int status; 22068 struct sd_lun *un; 22069 22070 ASSERT(ssc != NULL); 22071 un = ssc->ssc_un; 22072 ASSERT(un != NULL); 22073 ASSERT(!mutex_owned(SD_MUTEX(un))); 22074 ASSERT(bufaddr != NULL); 22075 22076 SD_TRACE(SD_LOG_IO, un, 22077 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un); 22078 22079 bzero(&cdb, sizeof (cdb)); 22080 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22081 bzero(bufaddr, buflen); 22082 22083 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION; 22084 cdb.cdb_opaque[1] = 1; /* polled */ 22085 cdb.cdb_opaque[4] = class_req; 22086 FORMG1COUNT(&cdb, buflen); 22087 22088 ucmd_buf.uscsi_cdb = (char *)&cdb; 22089 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22090 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22091 ucmd_buf.uscsi_buflen = buflen; 22092 ucmd_buf.uscsi_rqbuf = NULL; 22093 ucmd_buf.uscsi_rqlen = 0; 22094 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 22095 ucmd_buf.uscsi_timeout = 60; 22096 22097 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22098 UIO_SYSSPACE, SD_PATH_DIRECT); 22099 22100 /* 22101 * Only handle status == 0, the upper-level caller 22102 * will put different assessment based on the context. 22103 */ 22104 if (status == 0) { 22105 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22106 22107 if (ucmd_buf.uscsi_resid != 0) { 22108 status = EIO; 22109 } 22110 } 22111 22112 SD_TRACE(SD_LOG_IO, un, 22113 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n"); 22114 22115 return (status); 22116 } 22117 22118 22119 static boolean_t 22120 sd_gesn_media_data_valid(uchar_t *data) 22121 { 22122 uint16_t len; 22123 22124 len = (data[1] << 8) | data[0]; 22125 return ((len >= 6) && 22126 ((data[2] & SD_GESN_HEADER_NEA) == 0) && 22127 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) && 22128 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0)); 22129 } 22130 22131 22132 /* 22133 * Function: sdioctl 22134 * 22135 * Description: Driver's ioctl(9e) entry point function. 22136 * 22137 * Arguments: dev - device number 22138 * cmd - ioctl operation to be performed 22139 * arg - user argument, contains data to be set or reference 22140 * parameter for get 22141 * flag - bit flag, indicating open settings, 32/64 bit type 22142 * cred_p - user credential pointer 22143 * rval_p - calling process return value (OPT) 22144 * 22145 * Return Code: EINVAL 22146 * ENOTTY 22147 * ENXIO 22148 * EIO 22149 * EFAULT 22150 * ENOTSUP 22151 * EPERM 22152 * 22153 * Context: Called from the device switch at normal priority. 22154 */ 22155 22156 static int 22157 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 22158 { 22159 struct sd_lun *un = NULL; 22160 int err = 0; 22161 int i = 0; 22162 cred_t *cr; 22163 int tmprval = EINVAL; 22164 boolean_t is_valid; 22165 sd_ssc_t *ssc; 22166 22167 /* 22168 * All device accesses go thru sdstrategy where we check on suspend 22169 * status 22170 */ 22171 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22172 return (ENXIO); 22173 } 22174 22175 ASSERT(!mutex_owned(SD_MUTEX(un))); 22176 22177 /* Initialize sd_ssc_t for internal uscsi commands */ 22178 ssc = sd_ssc_init(un); 22179 22180 is_valid = SD_IS_VALID_LABEL(un); 22181 22182 /* 22183 * Moved this wait from sd_uscsi_strategy to here for 22184 * reasons of deadlock prevention. Internal driver commands, 22185 * specifically those to change a devices power level, result 22186 * in a call to sd_uscsi_strategy. 22187 */ 22188 mutex_enter(SD_MUTEX(un)); 22189 while ((un->un_state == SD_STATE_SUSPENDED) || 22190 (un->un_state == SD_STATE_PM_CHANGING)) { 22191 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 22192 } 22193 /* 22194 * Twiddling the counter here protects commands from now 22195 * through to the top of sd_uscsi_strategy. Without the 22196 * counter inc. a power down, for example, could get in 22197 * after the above check for state is made and before 22198 * execution gets to the top of sd_uscsi_strategy. 22199 * That would cause problems. 22200 */ 22201 un->un_ncmds_in_driver++; 22202 22203 if (!is_valid && 22204 (flag & (FNDELAY | FNONBLOCK))) { 22205 switch (cmd) { 22206 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 22207 case DKIOCGVTOC: 22208 case DKIOCGEXTVTOC: 22209 case DKIOCGAPART: 22210 case DKIOCPARTINFO: 22211 case DKIOCEXTPARTINFO: 22212 case DKIOCSGEOM: 22213 case DKIOCSAPART: 22214 case DKIOCGETEFI: 22215 case DKIOCPARTITION: 22216 case DKIOCSVTOC: 22217 case DKIOCSEXTVTOC: 22218 case DKIOCSETEFI: 22219 case DKIOCGMBOOT: 22220 case DKIOCSMBOOT: 22221 case DKIOCG_PHYGEOM: 22222 case DKIOCG_VIRTGEOM: 22223 #if defined(__i386) || defined(__amd64) 22224 case DKIOCSETEXTPART: 22225 #endif 22226 /* let cmlb handle it */ 22227 goto skip_ready_valid; 22228 22229 case CDROMPAUSE: 22230 case CDROMRESUME: 22231 case CDROMPLAYMSF: 22232 case CDROMPLAYTRKIND: 22233 case CDROMREADTOCHDR: 22234 case CDROMREADTOCENTRY: 22235 case CDROMSTOP: 22236 case CDROMSTART: 22237 case CDROMVOLCTRL: 22238 case CDROMSUBCHNL: 22239 case CDROMREADMODE2: 22240 case CDROMREADMODE1: 22241 case CDROMREADOFFSET: 22242 case CDROMSBLKMODE: 22243 case CDROMGBLKMODE: 22244 case CDROMGDRVSPEED: 22245 case CDROMSDRVSPEED: 22246 case CDROMCDDA: 22247 case CDROMCDXA: 22248 case CDROMSUBCODE: 22249 if (!ISCD(un)) { 22250 un->un_ncmds_in_driver--; 22251 ASSERT(un->un_ncmds_in_driver >= 0); 22252 mutex_exit(SD_MUTEX(un)); 22253 err = ENOTTY; 22254 goto done_without_assess; 22255 } 22256 break; 22257 case FDEJECT: 22258 case DKIOCEJECT: 22259 case CDROMEJECT: 22260 if (!un->un_f_eject_media_supported) { 22261 un->un_ncmds_in_driver--; 22262 ASSERT(un->un_ncmds_in_driver >= 0); 22263 mutex_exit(SD_MUTEX(un)); 22264 err = ENOTTY; 22265 goto done_without_assess; 22266 } 22267 break; 22268 case DKIOCFLUSHWRITECACHE: 22269 mutex_exit(SD_MUTEX(un)); 22270 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22271 if (err != 0) { 22272 mutex_enter(SD_MUTEX(un)); 22273 un->un_ncmds_in_driver--; 22274 ASSERT(un->un_ncmds_in_driver >= 0); 22275 mutex_exit(SD_MUTEX(un)); 22276 err = EIO; 22277 goto done_quick_assess; 22278 } 22279 mutex_enter(SD_MUTEX(un)); 22280 /* FALLTHROUGH */ 22281 case DKIOCREMOVABLE: 22282 case DKIOCHOTPLUGGABLE: 22283 case DKIOCINFO: 22284 case DKIOCGMEDIAINFO: 22285 case DKIOCGMEDIAINFOEXT: 22286 case MHIOCENFAILFAST: 22287 case MHIOCSTATUS: 22288 case MHIOCTKOWN: 22289 case MHIOCRELEASE: 22290 case MHIOCGRP_INKEYS: 22291 case MHIOCGRP_INRESV: 22292 case MHIOCGRP_REGISTER: 22293 case MHIOCGRP_RESERVE: 22294 case MHIOCGRP_PREEMPTANDABORT: 22295 case MHIOCGRP_REGISTERANDIGNOREKEY: 22296 case CDROMCLOSETRAY: 22297 case USCSICMD: 22298 goto skip_ready_valid; 22299 default: 22300 break; 22301 } 22302 22303 mutex_exit(SD_MUTEX(un)); 22304 err = sd_ready_and_valid(ssc, SDPART(dev)); 22305 mutex_enter(SD_MUTEX(un)); 22306 22307 if (err != SD_READY_VALID) { 22308 switch (cmd) { 22309 case DKIOCSTATE: 22310 case CDROMGDRVSPEED: 22311 case CDROMSDRVSPEED: 22312 case FDEJECT: /* for eject command */ 22313 case DKIOCEJECT: 22314 case CDROMEJECT: 22315 case DKIOCREMOVABLE: 22316 case DKIOCHOTPLUGGABLE: 22317 break; 22318 default: 22319 if (un->un_f_has_removable_media) { 22320 err = ENXIO; 22321 } else { 22322 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 22323 if (err == SD_RESERVED_BY_OTHERS) { 22324 err = EACCES; 22325 } else { 22326 err = EIO; 22327 } 22328 } 22329 un->un_ncmds_in_driver--; 22330 ASSERT(un->un_ncmds_in_driver >= 0); 22331 mutex_exit(SD_MUTEX(un)); 22332 22333 goto done_without_assess; 22334 } 22335 } 22336 } 22337 22338 skip_ready_valid: 22339 mutex_exit(SD_MUTEX(un)); 22340 22341 switch (cmd) { 22342 case DKIOCINFO: 22343 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 22344 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 22345 break; 22346 22347 case DKIOCGMEDIAINFO: 22348 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 22349 err = sd_get_media_info(dev, (caddr_t)arg, flag); 22350 break; 22351 22352 case DKIOCGMEDIAINFOEXT: 22353 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 22354 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 22355 break; 22356 22357 case DKIOCGGEOM: 22358 case DKIOCGVTOC: 22359 case DKIOCGEXTVTOC: 22360 case DKIOCGAPART: 22361 case DKIOCPARTINFO: 22362 case DKIOCEXTPARTINFO: 22363 case DKIOCSGEOM: 22364 case DKIOCSAPART: 22365 case DKIOCGETEFI: 22366 case DKIOCPARTITION: 22367 case DKIOCSVTOC: 22368 case DKIOCSEXTVTOC: 22369 case DKIOCSETEFI: 22370 case DKIOCGMBOOT: 22371 case DKIOCSMBOOT: 22372 case DKIOCG_PHYGEOM: 22373 case DKIOCG_VIRTGEOM: 22374 #if defined(__i386) || defined(__amd64) 22375 case DKIOCSETEXTPART: 22376 #endif 22377 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 22378 22379 /* TUR should spin up */ 22380 22381 if (un->un_f_has_removable_media) 22382 err = sd_send_scsi_TEST_UNIT_READY(ssc, 22383 SD_CHECK_FOR_MEDIA); 22384 22385 else 22386 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22387 22388 if (err != 0) 22389 goto done_with_assess; 22390 22391 err = cmlb_ioctl(un->un_cmlbhandle, dev, 22392 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 22393 22394 if ((err == 0) && 22395 ((cmd == DKIOCSETEFI) || 22396 (un->un_f_pkstats_enabled) && 22397 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 22398 cmd == DKIOCSEXTVTOC))) { 22399 22400 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 22401 (void *)SD_PATH_DIRECT); 22402 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 22403 sd_set_pstats(un); 22404 SD_TRACE(SD_LOG_IO_PARTITION, un, 22405 "sd_ioctl: un:0x%p pstats created and " 22406 "set\n", un); 22407 } 22408 } 22409 22410 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 22411 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 22412 22413 mutex_enter(SD_MUTEX(un)); 22414 if (un->un_f_devid_supported && 22415 (un->un_f_opt_fab_devid == TRUE)) { 22416 if (un->un_devid == NULL) { 22417 sd_register_devid(ssc, SD_DEVINFO(un), 22418 SD_TARGET_IS_UNRESERVED); 22419 } else { 22420 /* 22421 * The device id for this disk 22422 * has been fabricated. The 22423 * device id must be preserved 22424 * by writing it back out to 22425 * disk. 22426 */ 22427 if (sd_write_deviceid(ssc) != 0) { 22428 ddi_devid_free(un->un_devid); 22429 un->un_devid = NULL; 22430 } 22431 } 22432 } 22433 mutex_exit(SD_MUTEX(un)); 22434 } 22435 22436 break; 22437 22438 case DKIOCLOCK: 22439 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 22440 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22441 SD_PATH_STANDARD); 22442 goto done_with_assess; 22443 22444 case DKIOCUNLOCK: 22445 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 22446 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 22447 SD_PATH_STANDARD); 22448 goto done_with_assess; 22449 22450 case DKIOCSTATE: { 22451 enum dkio_state state; 22452 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 22453 22454 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 22455 err = EFAULT; 22456 } else { 22457 err = sd_check_media(dev, state); 22458 if (err == 0) { 22459 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22460 sizeof (int), flag) != 0) 22461 err = EFAULT; 22462 } 22463 } 22464 break; 22465 } 22466 22467 case DKIOCREMOVABLE: 22468 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22469 i = un->un_f_has_removable_media ? 1 : 0; 22470 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22471 err = EFAULT; 22472 } else { 22473 err = 0; 22474 } 22475 break; 22476 22477 case DKIOCHOTPLUGGABLE: 22478 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22479 i = un->un_f_is_hotpluggable ? 1 : 0; 22480 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22481 err = EFAULT; 22482 } else { 22483 err = 0; 22484 } 22485 break; 22486 22487 case DKIOCREADONLY: 22488 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n"); 22489 i = 0; 22490 if ((ISCD(un) && !un->un_f_mmc_writable_media) || 22491 (sr_check_wp(dev) != 0)) { 22492 i = 1; 22493 } 22494 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22495 err = EFAULT; 22496 } else { 22497 err = 0; 22498 } 22499 break; 22500 22501 case DKIOCGTEMPERATURE: 22502 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22503 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22504 break; 22505 22506 case MHIOCENFAILFAST: 22507 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22508 if ((err = drv_priv(cred_p)) == 0) { 22509 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22510 } 22511 break; 22512 22513 case MHIOCTKOWN: 22514 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22515 if ((err = drv_priv(cred_p)) == 0) { 22516 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22517 } 22518 break; 22519 22520 case MHIOCRELEASE: 22521 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22522 if ((err = drv_priv(cred_p)) == 0) { 22523 err = sd_mhdioc_release(dev); 22524 } 22525 break; 22526 22527 case MHIOCSTATUS: 22528 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22529 if ((err = drv_priv(cred_p)) == 0) { 22530 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22531 case 0: 22532 err = 0; 22533 break; 22534 case EACCES: 22535 *rval_p = 1; 22536 err = 0; 22537 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22538 break; 22539 default: 22540 err = EIO; 22541 goto done_with_assess; 22542 } 22543 } 22544 break; 22545 22546 case MHIOCQRESERVE: 22547 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22548 if ((err = drv_priv(cred_p)) == 0) { 22549 err = sd_reserve_release(dev, SD_RESERVE); 22550 } 22551 break; 22552 22553 case MHIOCREREGISTERDEVID: 22554 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22555 if (drv_priv(cred_p) == EPERM) { 22556 err = EPERM; 22557 } else if (!un->un_f_devid_supported) { 22558 err = ENOTTY; 22559 } else { 22560 err = sd_mhdioc_register_devid(dev); 22561 } 22562 break; 22563 22564 case MHIOCGRP_INKEYS: 22565 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22566 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22567 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22568 err = ENOTSUP; 22569 } else { 22570 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22571 flag); 22572 } 22573 } 22574 break; 22575 22576 case MHIOCGRP_INRESV: 22577 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22578 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22579 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22580 err = ENOTSUP; 22581 } else { 22582 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22583 } 22584 } 22585 break; 22586 22587 case MHIOCGRP_REGISTER: 22588 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22589 if ((err = drv_priv(cred_p)) != EPERM) { 22590 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22591 err = ENOTSUP; 22592 } else if (arg != NULL) { 22593 mhioc_register_t reg; 22594 if (ddi_copyin((void *)arg, ®, 22595 sizeof (mhioc_register_t), flag) != 0) { 22596 err = EFAULT; 22597 } else { 22598 err = 22599 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22600 ssc, SD_SCSI3_REGISTER, 22601 (uchar_t *)®); 22602 if (err != 0) 22603 goto done_with_assess; 22604 } 22605 } 22606 } 22607 break; 22608 22609 case MHIOCGRP_RESERVE: 22610 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22611 if ((err = drv_priv(cred_p)) != EPERM) { 22612 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22613 err = ENOTSUP; 22614 } else if (arg != NULL) { 22615 mhioc_resv_desc_t resv_desc; 22616 if (ddi_copyin((void *)arg, &resv_desc, 22617 sizeof (mhioc_resv_desc_t), flag) != 0) { 22618 err = EFAULT; 22619 } else { 22620 err = 22621 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22622 ssc, SD_SCSI3_RESERVE, 22623 (uchar_t *)&resv_desc); 22624 if (err != 0) 22625 goto done_with_assess; 22626 } 22627 } 22628 } 22629 break; 22630 22631 case MHIOCGRP_PREEMPTANDABORT: 22632 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22633 if ((err = drv_priv(cred_p)) != EPERM) { 22634 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22635 err = ENOTSUP; 22636 } else if (arg != NULL) { 22637 mhioc_preemptandabort_t preempt_abort; 22638 if (ddi_copyin((void *)arg, &preempt_abort, 22639 sizeof (mhioc_preemptandabort_t), 22640 flag) != 0) { 22641 err = EFAULT; 22642 } else { 22643 err = 22644 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22645 ssc, SD_SCSI3_PREEMPTANDABORT, 22646 (uchar_t *)&preempt_abort); 22647 if (err != 0) 22648 goto done_with_assess; 22649 } 22650 } 22651 } 22652 break; 22653 22654 case MHIOCGRP_REGISTERANDIGNOREKEY: 22655 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22656 if ((err = drv_priv(cred_p)) != EPERM) { 22657 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22658 err = ENOTSUP; 22659 } else if (arg != NULL) { 22660 mhioc_registerandignorekey_t r_and_i; 22661 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22662 sizeof (mhioc_registerandignorekey_t), 22663 flag) != 0) { 22664 err = EFAULT; 22665 } else { 22666 err = 22667 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22668 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22669 (uchar_t *)&r_and_i); 22670 if (err != 0) 22671 goto done_with_assess; 22672 } 22673 } 22674 } 22675 break; 22676 22677 case USCSICMD: 22678 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22679 cr = ddi_get_cred(); 22680 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22681 err = EPERM; 22682 } else { 22683 enum uio_seg uioseg; 22684 22685 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22686 UIO_USERSPACE; 22687 if (un->un_f_format_in_progress == TRUE) { 22688 err = EAGAIN; 22689 break; 22690 } 22691 22692 err = sd_ssc_send(ssc, 22693 (struct uscsi_cmd *)arg, 22694 flag, uioseg, SD_PATH_STANDARD); 22695 if (err != 0) 22696 goto done_with_assess; 22697 else 22698 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22699 } 22700 break; 22701 22702 case CDROMPAUSE: 22703 case CDROMRESUME: 22704 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22705 if (!ISCD(un)) { 22706 err = ENOTTY; 22707 } else { 22708 err = sr_pause_resume(dev, cmd); 22709 } 22710 break; 22711 22712 case CDROMPLAYMSF: 22713 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22714 if (!ISCD(un)) { 22715 err = ENOTTY; 22716 } else { 22717 err = sr_play_msf(dev, (caddr_t)arg, flag); 22718 } 22719 break; 22720 22721 case CDROMPLAYTRKIND: 22722 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22723 #if defined(__i386) || defined(__amd64) 22724 /* 22725 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22726 */ 22727 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22728 #else 22729 if (!ISCD(un)) { 22730 #endif 22731 err = ENOTTY; 22732 } else { 22733 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22734 } 22735 break; 22736 22737 case CDROMREADTOCHDR: 22738 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22739 if (!ISCD(un)) { 22740 err = ENOTTY; 22741 } else { 22742 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22743 } 22744 break; 22745 22746 case CDROMREADTOCENTRY: 22747 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22748 if (!ISCD(un)) { 22749 err = ENOTTY; 22750 } else { 22751 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22752 } 22753 break; 22754 22755 case CDROMSTOP: 22756 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22757 if (!ISCD(un)) { 22758 err = ENOTTY; 22759 } else { 22760 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22761 SD_TARGET_STOP, SD_PATH_STANDARD); 22762 goto done_with_assess; 22763 } 22764 break; 22765 22766 case CDROMSTART: 22767 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22768 if (!ISCD(un)) { 22769 err = ENOTTY; 22770 } else { 22771 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22772 SD_TARGET_START, SD_PATH_STANDARD); 22773 goto done_with_assess; 22774 } 22775 break; 22776 22777 case CDROMCLOSETRAY: 22778 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22779 if (!ISCD(un)) { 22780 err = ENOTTY; 22781 } else { 22782 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22783 SD_TARGET_CLOSE, SD_PATH_STANDARD); 22784 goto done_with_assess; 22785 } 22786 break; 22787 22788 case FDEJECT: /* for eject command */ 22789 case DKIOCEJECT: 22790 case CDROMEJECT: 22791 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22792 if (!un->un_f_eject_media_supported) { 22793 err = ENOTTY; 22794 } else { 22795 err = sr_eject(dev); 22796 } 22797 break; 22798 22799 case CDROMVOLCTRL: 22800 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22801 if (!ISCD(un)) { 22802 err = ENOTTY; 22803 } else { 22804 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22805 } 22806 break; 22807 22808 case CDROMSUBCHNL: 22809 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22810 if (!ISCD(un)) { 22811 err = ENOTTY; 22812 } else { 22813 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22814 } 22815 break; 22816 22817 case CDROMREADMODE2: 22818 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22819 if (!ISCD(un)) { 22820 err = ENOTTY; 22821 } else if (un->un_f_cfg_is_atapi == TRUE) { 22822 /* 22823 * If the drive supports READ CD, use that instead of 22824 * switching the LBA size via a MODE SELECT 22825 * Block Descriptor 22826 */ 22827 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22828 } else { 22829 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22830 } 22831 break; 22832 22833 case CDROMREADMODE1: 22834 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22835 if (!ISCD(un)) { 22836 err = ENOTTY; 22837 } else { 22838 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22839 } 22840 break; 22841 22842 case CDROMREADOFFSET: 22843 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22844 if (!ISCD(un)) { 22845 err = ENOTTY; 22846 } else { 22847 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22848 flag); 22849 } 22850 break; 22851 22852 case CDROMSBLKMODE: 22853 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22854 /* 22855 * There is no means of changing block size in case of atapi 22856 * drives, thus return ENOTTY if drive type is atapi 22857 */ 22858 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22859 err = ENOTTY; 22860 } else if (un->un_f_mmc_cap == TRUE) { 22861 22862 /* 22863 * MMC Devices do not support changing the 22864 * logical block size 22865 * 22866 * Note: EINVAL is being returned instead of ENOTTY to 22867 * maintain consistancy with the original mmc 22868 * driver update. 22869 */ 22870 err = EINVAL; 22871 } else { 22872 mutex_enter(SD_MUTEX(un)); 22873 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22874 (un->un_ncmds_in_transport > 0)) { 22875 mutex_exit(SD_MUTEX(un)); 22876 err = EINVAL; 22877 } else { 22878 mutex_exit(SD_MUTEX(un)); 22879 err = sr_change_blkmode(dev, cmd, arg, flag); 22880 } 22881 } 22882 break; 22883 22884 case CDROMGBLKMODE: 22885 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22886 if (!ISCD(un)) { 22887 err = ENOTTY; 22888 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22889 (un->un_f_blockcount_is_valid != FALSE)) { 22890 /* 22891 * Drive is an ATAPI drive so return target block 22892 * size for ATAPI drives since we cannot change the 22893 * blocksize on ATAPI drives. Used primarily to detect 22894 * if an ATAPI cdrom is present. 22895 */ 22896 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22897 sizeof (int), flag) != 0) { 22898 err = EFAULT; 22899 } else { 22900 err = 0; 22901 } 22902 22903 } else { 22904 /* 22905 * Drive supports changing block sizes via a Mode 22906 * Select. 22907 */ 22908 err = sr_change_blkmode(dev, cmd, arg, flag); 22909 } 22910 break; 22911 22912 case CDROMGDRVSPEED: 22913 case CDROMSDRVSPEED: 22914 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22915 if (!ISCD(un)) { 22916 err = ENOTTY; 22917 } else if (un->un_f_mmc_cap == TRUE) { 22918 /* 22919 * Note: In the future the driver implementation 22920 * for getting and 22921 * setting cd speed should entail: 22922 * 1) If non-mmc try the Toshiba mode page 22923 * (sr_change_speed) 22924 * 2) If mmc but no support for Real Time Streaming try 22925 * the SET CD SPEED (0xBB) command 22926 * (sr_atapi_change_speed) 22927 * 3) If mmc and support for Real Time Streaming 22928 * try the GET PERFORMANCE and SET STREAMING 22929 * commands (not yet implemented, 4380808) 22930 */ 22931 /* 22932 * As per recent MMC spec, CD-ROM speed is variable 22933 * and changes with LBA. Since there is no such 22934 * things as drive speed now, fail this ioctl. 22935 * 22936 * Note: EINVAL is returned for consistancy of original 22937 * implementation which included support for getting 22938 * the drive speed of mmc devices but not setting 22939 * the drive speed. Thus EINVAL would be returned 22940 * if a set request was made for an mmc device. 22941 * We no longer support get or set speed for 22942 * mmc but need to remain consistent with regard 22943 * to the error code returned. 22944 */ 22945 err = EINVAL; 22946 } else if (un->un_f_cfg_is_atapi == TRUE) { 22947 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22948 } else { 22949 err = sr_change_speed(dev, cmd, arg, flag); 22950 } 22951 break; 22952 22953 case CDROMCDDA: 22954 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22955 if (!ISCD(un)) { 22956 err = ENOTTY; 22957 } else { 22958 err = sr_read_cdda(dev, (void *)arg, flag); 22959 } 22960 break; 22961 22962 case CDROMCDXA: 22963 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22964 if (!ISCD(un)) { 22965 err = ENOTTY; 22966 } else { 22967 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22968 } 22969 break; 22970 22971 case CDROMSUBCODE: 22972 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22973 if (!ISCD(un)) { 22974 err = ENOTTY; 22975 } else { 22976 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22977 } 22978 break; 22979 22980 22981 #ifdef SDDEBUG 22982 /* RESET/ABORTS testing ioctls */ 22983 case DKIOCRESET: { 22984 int reset_level; 22985 22986 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22987 err = EFAULT; 22988 } else { 22989 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22990 "reset_level = 0x%lx\n", reset_level); 22991 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22992 err = 0; 22993 } else { 22994 err = EIO; 22995 } 22996 } 22997 break; 22998 } 22999 23000 case DKIOCABORT: 23001 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 23002 if (scsi_abort(SD_ADDRESS(un), NULL)) { 23003 err = 0; 23004 } else { 23005 err = EIO; 23006 } 23007 break; 23008 #endif 23009 23010 #ifdef SD_FAULT_INJECTION 23011 /* SDIOC FaultInjection testing ioctls */ 23012 case SDIOCSTART: 23013 case SDIOCSTOP: 23014 case SDIOCINSERTPKT: 23015 case SDIOCINSERTXB: 23016 case SDIOCINSERTUN: 23017 case SDIOCINSERTARQ: 23018 case SDIOCPUSH: 23019 case SDIOCRETRIEVE: 23020 case SDIOCRUN: 23021 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 23022 "SDIOC detected cmd:0x%X:\n", cmd); 23023 /* call error generator */ 23024 sd_faultinjection_ioctl(cmd, arg, un); 23025 err = 0; 23026 break; 23027 23028 #endif /* SD_FAULT_INJECTION */ 23029 23030 case DKIOCFLUSHWRITECACHE: 23031 { 23032 struct dk_callback *dkc = (struct dk_callback *)arg; 23033 23034 mutex_enter(SD_MUTEX(un)); 23035 if (!un->un_f_sync_cache_supported || 23036 !un->un_f_write_cache_enabled) { 23037 err = un->un_f_sync_cache_supported ? 23038 0 : ENOTSUP; 23039 mutex_exit(SD_MUTEX(un)); 23040 if ((flag & FKIOCTL) && dkc != NULL && 23041 dkc->dkc_callback != NULL) { 23042 (*dkc->dkc_callback)(dkc->dkc_cookie, 23043 err); 23044 /* 23045 * Did callback and reported error. 23046 * Since we did a callback, ioctl 23047 * should return 0. 23048 */ 23049 err = 0; 23050 } 23051 break; 23052 } 23053 mutex_exit(SD_MUTEX(un)); 23054 23055 if ((flag & FKIOCTL) && dkc != NULL && 23056 dkc->dkc_callback != NULL) { 23057 /* async SYNC CACHE request */ 23058 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 23059 } else { 23060 /* synchronous SYNC CACHE request */ 23061 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 23062 } 23063 } 23064 break; 23065 23066 case DKIOCGETWCE: { 23067 23068 int wce; 23069 23070 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 23071 break; 23072 } 23073 23074 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 23075 err = EFAULT; 23076 } 23077 break; 23078 } 23079 23080 case DKIOCSETWCE: { 23081 23082 int wce, sync_supported; 23083 int cur_wce = 0; 23084 23085 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 23086 err = EFAULT; 23087 break; 23088 } 23089 23090 /* 23091 * Synchronize multiple threads trying to enable 23092 * or disable the cache via the un_f_wcc_cv 23093 * condition variable. 23094 */ 23095 mutex_enter(SD_MUTEX(un)); 23096 23097 /* 23098 * Don't allow the cache to be enabled if the 23099 * config file has it disabled. 23100 */ 23101 if (un->un_f_opt_disable_cache && wce) { 23102 mutex_exit(SD_MUTEX(un)); 23103 err = EINVAL; 23104 break; 23105 } 23106 23107 /* 23108 * Wait for write cache change in progress 23109 * bit to be clear before proceeding. 23110 */ 23111 while (un->un_f_wcc_inprog) 23112 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 23113 23114 un->un_f_wcc_inprog = 1; 23115 23116 mutex_exit(SD_MUTEX(un)); 23117 23118 /* 23119 * Get the current write cache state 23120 */ 23121 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) { 23122 mutex_enter(SD_MUTEX(un)); 23123 un->un_f_wcc_inprog = 0; 23124 cv_broadcast(&un->un_wcc_cv); 23125 mutex_exit(SD_MUTEX(un)); 23126 break; 23127 } 23128 23129 mutex_enter(SD_MUTEX(un)); 23130 un->un_f_write_cache_enabled = (cur_wce != 0); 23131 23132 if (un->un_f_write_cache_enabled && wce == 0) { 23133 /* 23134 * Disable the write cache. Don't clear 23135 * un_f_write_cache_enabled until after 23136 * the mode select and flush are complete. 23137 */ 23138 sync_supported = un->un_f_sync_cache_supported; 23139 23140 /* 23141 * If cache flush is suppressed, we assume that the 23142 * controller firmware will take care of managing the 23143 * write cache for us: no need to explicitly 23144 * disable it. 23145 */ 23146 if (!un->un_f_suppress_cache_flush) { 23147 mutex_exit(SD_MUTEX(un)); 23148 if ((err = sd_cache_control(ssc, 23149 SD_CACHE_NOCHANGE, 23150 SD_CACHE_DISABLE)) == 0 && 23151 sync_supported) { 23152 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 23153 NULL); 23154 } 23155 } else { 23156 mutex_exit(SD_MUTEX(un)); 23157 } 23158 23159 mutex_enter(SD_MUTEX(un)); 23160 if (err == 0) { 23161 un->un_f_write_cache_enabled = 0; 23162 } 23163 23164 } else if (!un->un_f_write_cache_enabled && wce != 0) { 23165 /* 23166 * Set un_f_write_cache_enabled first, so there is 23167 * no window where the cache is enabled, but the 23168 * bit says it isn't. 23169 */ 23170 un->un_f_write_cache_enabled = 1; 23171 23172 /* 23173 * If cache flush is suppressed, we assume that the 23174 * controller firmware will take care of managing the 23175 * write cache for us: no need to explicitly 23176 * enable it. 23177 */ 23178 if (!un->un_f_suppress_cache_flush) { 23179 mutex_exit(SD_MUTEX(un)); 23180 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 23181 SD_CACHE_ENABLE); 23182 } else { 23183 mutex_exit(SD_MUTEX(un)); 23184 } 23185 23186 mutex_enter(SD_MUTEX(un)); 23187 23188 if (err) { 23189 un->un_f_write_cache_enabled = 0; 23190 } 23191 } 23192 23193 un->un_f_wcc_inprog = 0; 23194 cv_broadcast(&un->un_wcc_cv); 23195 mutex_exit(SD_MUTEX(un)); 23196 break; 23197 } 23198 23199 default: 23200 err = ENOTTY; 23201 break; 23202 } 23203 mutex_enter(SD_MUTEX(un)); 23204 un->un_ncmds_in_driver--; 23205 ASSERT(un->un_ncmds_in_driver >= 0); 23206 mutex_exit(SD_MUTEX(un)); 23207 23208 23209 done_without_assess: 23210 sd_ssc_fini(ssc); 23211 23212 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23213 return (err); 23214 23215 done_with_assess: 23216 mutex_enter(SD_MUTEX(un)); 23217 un->un_ncmds_in_driver--; 23218 ASSERT(un->un_ncmds_in_driver >= 0); 23219 mutex_exit(SD_MUTEX(un)); 23220 23221 done_quick_assess: 23222 if (err != 0) 23223 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23224 /* Uninitialize sd_ssc_t pointer */ 23225 sd_ssc_fini(ssc); 23226 23227 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23228 return (err); 23229 } 23230 23231 23232 /* 23233 * Function: sd_dkio_ctrl_info 23234 * 23235 * Description: This routine is the driver entry point for handling controller 23236 * information ioctl requests (DKIOCINFO). 23237 * 23238 * Arguments: dev - the device number 23239 * arg - pointer to user provided dk_cinfo structure 23240 * specifying the controller type and attributes. 23241 * flag - this argument is a pass through to ddi_copyxxx() 23242 * directly from the mode argument of ioctl(). 23243 * 23244 * Return Code: 0 23245 * EFAULT 23246 * ENXIO 23247 */ 23248 23249 static int 23250 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 23251 { 23252 struct sd_lun *un = NULL; 23253 struct dk_cinfo *info; 23254 dev_info_t *pdip; 23255 int lun, tgt; 23256 23257 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23258 return (ENXIO); 23259 } 23260 23261 info = (struct dk_cinfo *) 23262 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 23263 23264 switch (un->un_ctype) { 23265 case CTYPE_CDROM: 23266 info->dki_ctype = DKC_CDROM; 23267 break; 23268 default: 23269 info->dki_ctype = DKC_SCSI_CCS; 23270 break; 23271 } 23272 pdip = ddi_get_parent(SD_DEVINFO(un)); 23273 info->dki_cnum = ddi_get_instance(pdip); 23274 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 23275 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 23276 } else { 23277 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 23278 DK_DEVLEN - 1); 23279 } 23280 23281 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23282 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 23283 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23284 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 23285 23286 /* Unit Information */ 23287 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 23288 info->dki_slave = ((tgt << 3) | lun); 23289 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 23290 DK_DEVLEN - 1); 23291 info->dki_flags = DKI_FMTVOL; 23292 info->dki_partition = SDPART(dev); 23293 23294 /* Max Transfer size of this device in blocks */ 23295 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 23296 info->dki_addr = 0; 23297 info->dki_space = 0; 23298 info->dki_prio = 0; 23299 info->dki_vec = 0; 23300 23301 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 23302 kmem_free(info, sizeof (struct dk_cinfo)); 23303 return (EFAULT); 23304 } else { 23305 kmem_free(info, sizeof (struct dk_cinfo)); 23306 return (0); 23307 } 23308 } 23309 23310 /* 23311 * Function: sd_get_media_info_com 23312 * 23313 * Description: This routine returns the information required to populate 23314 * the fields for the dk_minfo/dk_minfo_ext structures. 23315 * 23316 * Arguments: dev - the device number 23317 * dki_media_type - media_type 23318 * dki_lbsize - logical block size 23319 * dki_capacity - capacity in blocks 23320 * dki_pbsize - physical block size (if requested) 23321 * 23322 * Return Code: 0 23323 * EACCESS 23324 * EFAULT 23325 * ENXIO 23326 * EIO 23327 */ 23328 static int 23329 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize, 23330 diskaddr_t *dki_capacity, uint_t *dki_pbsize) 23331 { 23332 struct sd_lun *un = NULL; 23333 struct uscsi_cmd com; 23334 struct scsi_inquiry *sinq; 23335 u_longlong_t media_capacity; 23336 uint64_t capacity; 23337 uint_t lbasize; 23338 uint_t pbsize; 23339 uchar_t *out_data; 23340 uchar_t *rqbuf; 23341 int rval = 0; 23342 int rtn; 23343 sd_ssc_t *ssc; 23344 23345 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23346 (un->un_state == SD_STATE_OFFLINE)) { 23347 return (ENXIO); 23348 } 23349 23350 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n"); 23351 23352 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23353 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23354 ssc = sd_ssc_init(un); 23355 23356 /* Issue a TUR to determine if the drive is ready with media present */ 23357 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23358 if (rval == ENXIO) { 23359 goto done; 23360 } else if (rval != 0) { 23361 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23362 } 23363 23364 /* Now get configuration data */ 23365 if (ISCD(un)) { 23366 *dki_media_type = DK_CDROM; 23367 23368 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23369 if (un->un_f_mmc_cap == TRUE) { 23370 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23371 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23372 SD_PATH_STANDARD); 23373 23374 if (rtn) { 23375 /* 23376 * We ignore all failures for CD and need to 23377 * put the assessment before processing code 23378 * to avoid missing assessment for FMA. 23379 */ 23380 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23381 /* 23382 * Failed for other than an illegal request 23383 * or command not supported 23384 */ 23385 if ((com.uscsi_status == STATUS_CHECK) && 23386 (com.uscsi_rqstatus == STATUS_GOOD)) { 23387 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23388 (rqbuf[12] != 0x20)) { 23389 rval = EIO; 23390 goto no_assessment; 23391 } 23392 } 23393 } else { 23394 /* 23395 * The GET CONFIGURATION command succeeded 23396 * so set the media type according to the 23397 * returned data 23398 */ 23399 *dki_media_type = out_data[6]; 23400 *dki_media_type <<= 8; 23401 *dki_media_type |= out_data[7]; 23402 } 23403 } 23404 } else { 23405 /* 23406 * The profile list is not available, so we attempt to identify 23407 * the media type based on the inquiry data 23408 */ 23409 sinq = un->un_sd->sd_inq; 23410 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23411 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23412 /* This is a direct access device or optical disk */ 23413 *dki_media_type = DK_FIXED_DISK; 23414 23415 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23416 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23417 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23418 *dki_media_type = DK_ZIP; 23419 } else if ( 23420 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23421 *dki_media_type = DK_JAZ; 23422 } 23423 } 23424 } else { 23425 /* 23426 * Not a CD, direct access or optical disk so return 23427 * unknown media 23428 */ 23429 *dki_media_type = DK_UNKNOWN; 23430 } 23431 } 23432 23433 /* 23434 * Now read the capacity so we can provide the lbasize, 23435 * pbsize and capacity. 23436 */ 23437 if (dki_pbsize && un->un_f_descr_format_supported) 23438 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 23439 &pbsize, SD_PATH_DIRECT); 23440 23441 if (dki_pbsize == NULL || rval != 0 || 23442 !un->un_f_descr_format_supported) { 23443 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23444 SD_PATH_DIRECT); 23445 23446 switch (rval) { 23447 case 0: 23448 if (un->un_f_enable_rmw && 23449 un->un_phy_blocksize != 0) { 23450 pbsize = un->un_phy_blocksize; 23451 } else { 23452 pbsize = lbasize; 23453 } 23454 media_capacity = capacity; 23455 23456 /* 23457 * sd_send_scsi_READ_CAPACITY() reports capacity in 23458 * un->un_sys_blocksize chunks. So we need to convert 23459 * it into cap.lbsize chunks. 23460 */ 23461 if (un->un_f_has_removable_media) { 23462 media_capacity *= un->un_sys_blocksize; 23463 media_capacity /= lbasize; 23464 } 23465 break; 23466 case EACCES: 23467 rval = EACCES; 23468 goto done; 23469 default: 23470 rval = EIO; 23471 goto done; 23472 } 23473 } else { 23474 if (un->un_f_enable_rmw && 23475 !ISP2(pbsize % DEV_BSIZE)) { 23476 pbsize = SSD_SECSIZE; 23477 } else if (!ISP2(lbasize % DEV_BSIZE) || 23478 !ISP2(pbsize % DEV_BSIZE)) { 23479 pbsize = lbasize = DEV_BSIZE; 23480 } 23481 media_capacity = capacity; 23482 } 23483 23484 /* 23485 * If lun is expanded dynamically, update the un structure. 23486 */ 23487 mutex_enter(SD_MUTEX(un)); 23488 if ((un->un_f_blockcount_is_valid == TRUE) && 23489 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23490 (capacity > un->un_blockcount)) { 23491 un->un_f_expnevent = B_FALSE; 23492 sd_update_block_info(un, lbasize, capacity); 23493 } 23494 mutex_exit(SD_MUTEX(un)); 23495 23496 *dki_lbsize = lbasize; 23497 *dki_capacity = media_capacity; 23498 if (dki_pbsize) 23499 *dki_pbsize = pbsize; 23500 23501 done: 23502 if (rval != 0) { 23503 if (rval == EIO) 23504 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23505 else 23506 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23507 } 23508 no_assessment: 23509 sd_ssc_fini(ssc); 23510 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23511 kmem_free(rqbuf, SENSE_LENGTH); 23512 return (rval); 23513 } 23514 23515 /* 23516 * Function: sd_get_media_info 23517 * 23518 * Description: This routine is the driver entry point for handling ioctl 23519 * requests for the media type or command set profile used by the 23520 * drive to operate on the media (DKIOCGMEDIAINFO). 23521 * 23522 * Arguments: dev - the device number 23523 * arg - pointer to user provided dk_minfo structure 23524 * specifying the media type, logical block size and 23525 * drive capacity. 23526 * flag - this argument is a pass through to ddi_copyxxx() 23527 * directly from the mode argument of ioctl(). 23528 * 23529 * Return Code: returns the value from sd_get_media_info_com 23530 */ 23531 static int 23532 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 23533 { 23534 struct dk_minfo mi; 23535 int rval; 23536 23537 rval = sd_get_media_info_com(dev, &mi.dki_media_type, 23538 &mi.dki_lbsize, &mi.dki_capacity, NULL); 23539 23540 if (rval) 23541 return (rval); 23542 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag)) 23543 rval = EFAULT; 23544 return (rval); 23545 } 23546 23547 /* 23548 * Function: sd_get_media_info_ext 23549 * 23550 * Description: This routine is the driver entry point for handling ioctl 23551 * requests for the media type or command set profile used by the 23552 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23553 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23554 * of this ioctl contains both logical block size and physical 23555 * block size. 23556 * 23557 * 23558 * Arguments: dev - the device number 23559 * arg - pointer to user provided dk_minfo_ext structure 23560 * specifying the media type, logical block size, 23561 * physical block size and disk capacity. 23562 * flag - this argument is a pass through to ddi_copyxxx() 23563 * directly from the mode argument of ioctl(). 23564 * 23565 * Return Code: returns the value from sd_get_media_info_com 23566 */ 23567 static int 23568 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23569 { 23570 struct dk_minfo_ext mie; 23571 int rval = 0; 23572 23573 rval = sd_get_media_info_com(dev, &mie.dki_media_type, 23574 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize); 23575 23576 if (rval) 23577 return (rval); 23578 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag)) 23579 rval = EFAULT; 23580 return (rval); 23581 23582 } 23583 23584 /* 23585 * Function: sd_watch_request_submit 23586 * 23587 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit 23588 * depending on which is supported by device. 23589 */ 23590 static opaque_t 23591 sd_watch_request_submit(struct sd_lun *un) 23592 { 23593 dev_t dev; 23594 23595 /* All submissions are unified to use same device number */ 23596 dev = sd_make_device(SD_DEVINFO(un)); 23597 23598 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 23599 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un), 23600 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23601 (caddr_t)dev)); 23602 } else { 23603 return (scsi_watch_request_submit(SD_SCSI_DEVP(un), 23604 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23605 (caddr_t)dev)); 23606 } 23607 } 23608 23609 23610 /* 23611 * Function: sd_check_media 23612 * 23613 * Description: This utility routine implements the functionality for the 23614 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23615 * driver state changes from that specified by the user 23616 * (inserted or ejected). For example, if the user specifies 23617 * DKIO_EJECTED and the current media state is inserted this 23618 * routine will immediately return DKIO_INSERTED. However, if the 23619 * current media state is not inserted the user thread will be 23620 * blocked until the drive state changes. If DKIO_NONE is specified 23621 * the user thread will block until a drive state change occurs. 23622 * 23623 * Arguments: dev - the device number 23624 * state - user pointer to a dkio_state, updated with the current 23625 * drive state at return. 23626 * 23627 * Return Code: ENXIO 23628 * EIO 23629 * EAGAIN 23630 * EINTR 23631 */ 23632 23633 static int 23634 sd_check_media(dev_t dev, enum dkio_state state) 23635 { 23636 struct sd_lun *un = NULL; 23637 enum dkio_state prev_state; 23638 opaque_t token = NULL; 23639 int rval = 0; 23640 sd_ssc_t *ssc; 23641 23642 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23643 return (ENXIO); 23644 } 23645 23646 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23647 23648 ssc = sd_ssc_init(un); 23649 23650 mutex_enter(SD_MUTEX(un)); 23651 23652 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23653 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23654 23655 prev_state = un->un_mediastate; 23656 23657 /* is there anything to do? */ 23658 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23659 /* 23660 * submit the request to the scsi_watch service; 23661 * scsi_media_watch_cb() does the real work 23662 */ 23663 mutex_exit(SD_MUTEX(un)); 23664 23665 /* 23666 * This change handles the case where a scsi watch request is 23667 * added to a device that is powered down. To accomplish this 23668 * we power up the device before adding the scsi watch request, 23669 * since the scsi watch sends a TUR directly to the device 23670 * which the device cannot handle if it is powered down. 23671 */ 23672 if (sd_pm_entry(un) != DDI_SUCCESS) { 23673 mutex_enter(SD_MUTEX(un)); 23674 goto done; 23675 } 23676 23677 token = sd_watch_request_submit(un); 23678 23679 sd_pm_exit(un); 23680 23681 mutex_enter(SD_MUTEX(un)); 23682 if (token == NULL) { 23683 rval = EAGAIN; 23684 goto done; 23685 } 23686 23687 /* 23688 * This is a special case IOCTL that doesn't return 23689 * until the media state changes. Routine sdpower 23690 * knows about and handles this so don't count it 23691 * as an active cmd in the driver, which would 23692 * keep the device busy to the pm framework. 23693 * If the count isn't decremented the device can't 23694 * be powered down. 23695 */ 23696 un->un_ncmds_in_driver--; 23697 ASSERT(un->un_ncmds_in_driver >= 0); 23698 23699 /* 23700 * if a prior request had been made, this will be the same 23701 * token, as scsi_watch was designed that way. 23702 */ 23703 un->un_swr_token = token; 23704 un->un_specified_mediastate = state; 23705 23706 /* 23707 * now wait for media change 23708 * we will not be signalled unless mediastate == state but it is 23709 * still better to test for this condition, since there is a 23710 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23711 */ 23712 SD_TRACE(SD_LOG_COMMON, un, 23713 "sd_check_media: waiting for media state change\n"); 23714 while (un->un_mediastate == state) { 23715 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23716 SD_TRACE(SD_LOG_COMMON, un, 23717 "sd_check_media: waiting for media state " 23718 "was interrupted\n"); 23719 un->un_ncmds_in_driver++; 23720 rval = EINTR; 23721 goto done; 23722 } 23723 SD_TRACE(SD_LOG_COMMON, un, 23724 "sd_check_media: received signal, state=%x\n", 23725 un->un_mediastate); 23726 } 23727 /* 23728 * Inc the counter to indicate the device once again 23729 * has an active outstanding cmd. 23730 */ 23731 un->un_ncmds_in_driver++; 23732 } 23733 23734 /* invalidate geometry */ 23735 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23736 sr_ejected(un); 23737 } 23738 23739 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23740 uint64_t capacity; 23741 uint_t lbasize; 23742 23743 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23744 mutex_exit(SD_MUTEX(un)); 23745 /* 23746 * Since the following routines use SD_PATH_DIRECT, we must 23747 * call PM directly before the upcoming disk accesses. This 23748 * may cause the disk to be power/spin up. 23749 */ 23750 23751 if (sd_pm_entry(un) == DDI_SUCCESS) { 23752 rval = sd_send_scsi_READ_CAPACITY(ssc, 23753 &capacity, &lbasize, SD_PATH_DIRECT); 23754 if (rval != 0) { 23755 sd_pm_exit(un); 23756 if (rval == EIO) 23757 sd_ssc_assessment(ssc, 23758 SD_FMT_STATUS_CHECK); 23759 else 23760 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23761 mutex_enter(SD_MUTEX(un)); 23762 goto done; 23763 } 23764 } else { 23765 rval = EIO; 23766 mutex_enter(SD_MUTEX(un)); 23767 goto done; 23768 } 23769 mutex_enter(SD_MUTEX(un)); 23770 23771 sd_update_block_info(un, lbasize, capacity); 23772 23773 /* 23774 * Check if the media in the device is writable or not 23775 */ 23776 if (ISCD(un)) { 23777 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23778 } 23779 23780 mutex_exit(SD_MUTEX(un)); 23781 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23782 if ((cmlb_validate(un->un_cmlbhandle, 0, 23783 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23784 sd_set_pstats(un); 23785 SD_TRACE(SD_LOG_IO_PARTITION, un, 23786 "sd_check_media: un:0x%p pstats created and " 23787 "set\n", un); 23788 } 23789 23790 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23791 SD_PATH_DIRECT); 23792 23793 sd_pm_exit(un); 23794 23795 if (rval != 0) { 23796 if (rval == EIO) 23797 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23798 else 23799 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23800 } 23801 23802 mutex_enter(SD_MUTEX(un)); 23803 } 23804 done: 23805 sd_ssc_fini(ssc); 23806 un->un_f_watcht_stopped = FALSE; 23807 if (token != NULL && un->un_swr_token != NULL) { 23808 /* 23809 * Use of this local token and the mutex ensures that we avoid 23810 * some race conditions associated with terminating the 23811 * scsi watch. 23812 */ 23813 token = un->un_swr_token; 23814 mutex_exit(SD_MUTEX(un)); 23815 (void) scsi_watch_request_terminate(token, 23816 SCSI_WATCH_TERMINATE_WAIT); 23817 if (scsi_watch_get_ref_count(token) == 0) { 23818 mutex_enter(SD_MUTEX(un)); 23819 un->un_swr_token = (opaque_t)NULL; 23820 } else { 23821 mutex_enter(SD_MUTEX(un)); 23822 } 23823 } 23824 23825 /* 23826 * Update the capacity kstat value, if no media previously 23827 * (capacity kstat is 0) and a media has been inserted 23828 * (un_f_blockcount_is_valid == TRUE) 23829 */ 23830 if (un->un_errstats) { 23831 struct sd_errstats *stp = NULL; 23832 23833 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23834 if ((stp->sd_capacity.value.ui64 == 0) && 23835 (un->un_f_blockcount_is_valid == TRUE)) { 23836 stp->sd_capacity.value.ui64 = 23837 (uint64_t)((uint64_t)un->un_blockcount * 23838 un->un_sys_blocksize); 23839 } 23840 } 23841 mutex_exit(SD_MUTEX(un)); 23842 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23843 return (rval); 23844 } 23845 23846 23847 /* 23848 * Function: sd_delayed_cv_broadcast 23849 * 23850 * Description: Delayed cv_broadcast to allow for target to recover from media 23851 * insertion. 23852 * 23853 * Arguments: arg - driver soft state (unit) structure 23854 */ 23855 23856 static void 23857 sd_delayed_cv_broadcast(void *arg) 23858 { 23859 struct sd_lun *un = arg; 23860 23861 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23862 23863 mutex_enter(SD_MUTEX(un)); 23864 un->un_dcvb_timeid = NULL; 23865 cv_broadcast(&un->un_state_cv); 23866 mutex_exit(SD_MUTEX(un)); 23867 } 23868 23869 23870 /* 23871 * Function: sd_media_watch_cb 23872 * 23873 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23874 * routine processes the TUR sense data and updates the driver 23875 * state if a transition has occurred. The user thread 23876 * (sd_check_media) is then signalled. 23877 * 23878 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23879 * among multiple watches that share this callback function 23880 * resultp - scsi watch facility result packet containing scsi 23881 * packet, status byte and sense data 23882 * 23883 * Return Code: 0 for success, -1 for failure 23884 */ 23885 23886 static int 23887 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23888 { 23889 struct sd_lun *un; 23890 struct scsi_status *statusp = resultp->statusp; 23891 uint8_t *sensep = (uint8_t *)resultp->sensep; 23892 enum dkio_state state = DKIO_NONE; 23893 dev_t dev = (dev_t)arg; 23894 uchar_t actual_sense_length; 23895 uint8_t skey, asc, ascq; 23896 23897 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23898 return (-1); 23899 } 23900 actual_sense_length = resultp->actual_sense_length; 23901 23902 mutex_enter(SD_MUTEX(un)); 23903 SD_TRACE(SD_LOG_COMMON, un, 23904 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23905 *((char *)statusp), (void *)sensep, actual_sense_length); 23906 23907 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23908 un->un_mediastate = DKIO_DEV_GONE; 23909 cv_broadcast(&un->un_state_cv); 23910 mutex_exit(SD_MUTEX(un)); 23911 23912 return (0); 23913 } 23914 23915 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 23916 if (sd_gesn_media_data_valid(resultp->mmc_data)) { 23917 if ((resultp->mmc_data[5] & 23918 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) { 23919 state = DKIO_INSERTED; 23920 } else { 23921 state = DKIO_EJECTED; 23922 } 23923 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) == 23924 SD_GESN_MEDIA_EVENT_EJECTREQUEST) { 23925 sd_log_eject_request_event(un, KM_NOSLEEP); 23926 } 23927 } 23928 } else if (sensep != NULL) { 23929 /* 23930 * If there was a check condition then sensep points to valid 23931 * sense data. If status was not a check condition but a 23932 * reservation or busy status then the new state is DKIO_NONE. 23933 */ 23934 skey = scsi_sense_key(sensep); 23935 asc = scsi_sense_asc(sensep); 23936 ascq = scsi_sense_ascq(sensep); 23937 23938 SD_INFO(SD_LOG_COMMON, un, 23939 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23940 skey, asc, ascq); 23941 /* This routine only uses up to 13 bytes of sense data. */ 23942 if (actual_sense_length >= 13) { 23943 if (skey == KEY_UNIT_ATTENTION) { 23944 if (asc == 0x28) { 23945 state = DKIO_INSERTED; 23946 } 23947 } else if (skey == KEY_NOT_READY) { 23948 /* 23949 * Sense data of 02/06/00 means that the 23950 * drive could not read the media (No 23951 * reference position found). In this case 23952 * to prevent a hang on the DKIOCSTATE IOCTL 23953 * we set the media state to DKIO_INSERTED. 23954 */ 23955 if (asc == 0x06 && ascq == 0x00) 23956 state = DKIO_INSERTED; 23957 23958 /* 23959 * if 02/04/02 means that the host 23960 * should send start command. Explicitly 23961 * leave the media state as is 23962 * (inserted) as the media is inserted 23963 * and host has stopped device for PM 23964 * reasons. Upon next true read/write 23965 * to this media will bring the 23966 * device to the right state good for 23967 * media access. 23968 */ 23969 if (asc == 0x3a) { 23970 state = DKIO_EJECTED; 23971 } else { 23972 /* 23973 * If the drive is busy with an 23974 * operation or long write, keep the 23975 * media in an inserted state. 23976 */ 23977 23978 if ((asc == 0x04) && 23979 ((ascq == 0x02) || 23980 (ascq == 0x07) || 23981 (ascq == 0x08))) { 23982 state = DKIO_INSERTED; 23983 } 23984 } 23985 } else if (skey == KEY_NO_SENSE) { 23986 if ((asc == 0x00) && (ascq == 0x00)) { 23987 /* 23988 * Sense Data 00/00/00 does not provide 23989 * any information about the state of 23990 * the media. Ignore it. 23991 */ 23992 mutex_exit(SD_MUTEX(un)); 23993 return (0); 23994 } 23995 } 23996 } 23997 } else if ((*((char *)statusp) == STATUS_GOOD) && 23998 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23999 state = DKIO_INSERTED; 24000 } 24001 24002 SD_TRACE(SD_LOG_COMMON, un, 24003 "sd_media_watch_cb: state=%x, specified=%x\n", 24004 state, un->un_specified_mediastate); 24005 24006 /* 24007 * now signal the waiting thread if this is *not* the specified state; 24008 * delay the signal if the state is DKIO_INSERTED to allow the target 24009 * to recover 24010 */ 24011 if (state != un->un_specified_mediastate) { 24012 un->un_mediastate = state; 24013 if (state == DKIO_INSERTED) { 24014 /* 24015 * delay the signal to give the drive a chance 24016 * to do what it apparently needs to do 24017 */ 24018 SD_TRACE(SD_LOG_COMMON, un, 24019 "sd_media_watch_cb: delayed cv_broadcast\n"); 24020 if (un->un_dcvb_timeid == NULL) { 24021 un->un_dcvb_timeid = 24022 timeout(sd_delayed_cv_broadcast, un, 24023 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 24024 } 24025 } else { 24026 SD_TRACE(SD_LOG_COMMON, un, 24027 "sd_media_watch_cb: immediate cv_broadcast\n"); 24028 cv_broadcast(&un->un_state_cv); 24029 } 24030 } 24031 mutex_exit(SD_MUTEX(un)); 24032 return (0); 24033 } 24034 24035 24036 /* 24037 * Function: sd_dkio_get_temp 24038 * 24039 * Description: This routine is the driver entry point for handling ioctl 24040 * requests to get the disk temperature. 24041 * 24042 * Arguments: dev - the device number 24043 * arg - pointer to user provided dk_temperature structure. 24044 * flag - this argument is a pass through to ddi_copyxxx() 24045 * directly from the mode argument of ioctl(). 24046 * 24047 * Return Code: 0 24048 * EFAULT 24049 * ENXIO 24050 * EAGAIN 24051 */ 24052 24053 static int 24054 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 24055 { 24056 struct sd_lun *un = NULL; 24057 struct dk_temperature *dktemp = NULL; 24058 uchar_t *temperature_page; 24059 int rval = 0; 24060 int path_flag = SD_PATH_STANDARD; 24061 sd_ssc_t *ssc; 24062 24063 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24064 return (ENXIO); 24065 } 24066 24067 ssc = sd_ssc_init(un); 24068 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 24069 24070 /* copyin the disk temp argument to get the user flags */ 24071 if (ddi_copyin((void *)arg, dktemp, 24072 sizeof (struct dk_temperature), flag) != 0) { 24073 rval = EFAULT; 24074 goto done; 24075 } 24076 24077 /* Initialize the temperature to invalid. */ 24078 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24079 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24080 24081 /* 24082 * Note: Investigate removing the "bypass pm" semantic. 24083 * Can we just bypass PM always? 24084 */ 24085 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 24086 path_flag = SD_PATH_DIRECT; 24087 ASSERT(!mutex_owned(&un->un_pm_mutex)); 24088 mutex_enter(&un->un_pm_mutex); 24089 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24090 /* 24091 * If DKT_BYPASS_PM is set, and the drive happens to be 24092 * in low power mode, we can not wake it up, Need to 24093 * return EAGAIN. 24094 */ 24095 mutex_exit(&un->un_pm_mutex); 24096 rval = EAGAIN; 24097 goto done; 24098 } else { 24099 /* 24100 * Indicate to PM the device is busy. This is required 24101 * to avoid a race - i.e. the ioctl is issuing a 24102 * command and the pm framework brings down the device 24103 * to low power mode (possible power cut-off on some 24104 * platforms). 24105 */ 24106 mutex_exit(&un->un_pm_mutex); 24107 if (sd_pm_entry(un) != DDI_SUCCESS) { 24108 rval = EAGAIN; 24109 goto done; 24110 } 24111 } 24112 } 24113 24114 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24115 24116 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 24117 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 24118 if (rval != 0) 24119 goto done2; 24120 24121 /* 24122 * For the current temperature verify that the parameter length is 0x02 24123 * and the parameter code is 0x00 24124 */ 24125 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24126 (temperature_page[5] == 0x00)) { 24127 if (temperature_page[9] == 0xFF) { 24128 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24129 } else { 24130 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24131 } 24132 } 24133 24134 /* 24135 * For the reference temperature verify that the parameter 24136 * length is 0x02 and the parameter code is 0x01 24137 */ 24138 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24139 (temperature_page[11] == 0x01)) { 24140 if (temperature_page[15] == 0xFF) { 24141 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24142 } else { 24143 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24144 } 24145 } 24146 24147 /* Do the copyout regardless of the temperature commands status. */ 24148 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24149 flag) != 0) { 24150 rval = EFAULT; 24151 goto done1; 24152 } 24153 24154 done2: 24155 if (rval != 0) { 24156 if (rval == EIO) 24157 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24158 else 24159 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24160 } 24161 done1: 24162 if (path_flag == SD_PATH_DIRECT) { 24163 sd_pm_exit(un); 24164 } 24165 24166 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24167 done: 24168 sd_ssc_fini(ssc); 24169 if (dktemp != NULL) { 24170 kmem_free(dktemp, sizeof (struct dk_temperature)); 24171 } 24172 24173 return (rval); 24174 } 24175 24176 24177 /* 24178 * Function: sd_log_page_supported 24179 * 24180 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24181 * supported log pages. 24182 * 24183 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 24184 * structure for this target. 24185 * log_page - 24186 * 24187 * Return Code: -1 - on error (log sense is optional and may not be supported). 24188 * 0 - log page not found. 24189 * 1 - log page found. 24190 */ 24191 24192 static int 24193 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 24194 { 24195 uchar_t *log_page_data; 24196 int i; 24197 int match = 0; 24198 int log_size; 24199 int status = 0; 24200 struct sd_lun *un; 24201 24202 ASSERT(ssc != NULL); 24203 un = ssc->ssc_un; 24204 ASSERT(un != NULL); 24205 24206 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24207 24208 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 24209 SD_PATH_DIRECT); 24210 24211 if (status != 0) { 24212 if (status == EIO) { 24213 /* 24214 * Some disks do not support log sense, we 24215 * should ignore this kind of error(sense key is 24216 * 0x5 - illegal request). 24217 */ 24218 uint8_t *sensep; 24219 int senlen; 24220 24221 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 24222 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 24223 ssc->ssc_uscsi_cmd->uscsi_rqresid); 24224 24225 if (senlen > 0 && 24226 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 24227 sd_ssc_assessment(ssc, 24228 SD_FMT_IGNORE_COMPROMISE); 24229 } else { 24230 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24231 } 24232 } else { 24233 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24234 } 24235 24236 SD_ERROR(SD_LOG_COMMON, un, 24237 "sd_log_page_supported: failed log page retrieval\n"); 24238 kmem_free(log_page_data, 0xFF); 24239 return (-1); 24240 } 24241 24242 log_size = log_page_data[3]; 24243 24244 /* 24245 * The list of supported log pages start from the fourth byte. Check 24246 * until we run out of log pages or a match is found. 24247 */ 24248 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24249 if (log_page_data[i] == log_page) { 24250 match++; 24251 } 24252 } 24253 kmem_free(log_page_data, 0xFF); 24254 return (match); 24255 } 24256 24257 24258 /* 24259 * Function: sd_mhdioc_failfast 24260 * 24261 * Description: This routine is the driver entry point for handling ioctl 24262 * requests to enable/disable the multihost failfast option. 24263 * (MHIOCENFAILFAST) 24264 * 24265 * Arguments: dev - the device number 24266 * arg - user specified probing interval. 24267 * flag - this argument is a pass through to ddi_copyxxx() 24268 * directly from the mode argument of ioctl(). 24269 * 24270 * Return Code: 0 24271 * EFAULT 24272 * ENXIO 24273 */ 24274 24275 static int 24276 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24277 { 24278 struct sd_lun *un = NULL; 24279 int mh_time; 24280 int rval = 0; 24281 24282 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24283 return (ENXIO); 24284 } 24285 24286 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24287 return (EFAULT); 24288 24289 if (mh_time) { 24290 mutex_enter(SD_MUTEX(un)); 24291 un->un_resvd_status |= SD_FAILFAST; 24292 mutex_exit(SD_MUTEX(un)); 24293 /* 24294 * If mh_time is INT_MAX, then this ioctl is being used for 24295 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24296 */ 24297 if (mh_time != INT_MAX) { 24298 rval = sd_check_mhd(dev, mh_time); 24299 } 24300 } else { 24301 (void) sd_check_mhd(dev, 0); 24302 mutex_enter(SD_MUTEX(un)); 24303 un->un_resvd_status &= ~SD_FAILFAST; 24304 mutex_exit(SD_MUTEX(un)); 24305 } 24306 return (rval); 24307 } 24308 24309 24310 /* 24311 * Function: sd_mhdioc_takeown 24312 * 24313 * Description: This routine is the driver entry point for handling ioctl 24314 * requests to forcefully acquire exclusive access rights to the 24315 * multihost disk (MHIOCTKOWN). 24316 * 24317 * Arguments: dev - the device number 24318 * arg - user provided structure specifying the delay 24319 * parameters in milliseconds 24320 * flag - this argument is a pass through to ddi_copyxxx() 24321 * directly from the mode argument of ioctl(). 24322 * 24323 * Return Code: 0 24324 * EFAULT 24325 * ENXIO 24326 */ 24327 24328 static int 24329 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24330 { 24331 struct sd_lun *un = NULL; 24332 struct mhioctkown *tkown = NULL; 24333 int rval = 0; 24334 24335 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24336 return (ENXIO); 24337 } 24338 24339 if (arg != NULL) { 24340 tkown = (struct mhioctkown *) 24341 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24342 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24343 if (rval != 0) { 24344 rval = EFAULT; 24345 goto error; 24346 } 24347 } 24348 24349 rval = sd_take_ownership(dev, tkown); 24350 mutex_enter(SD_MUTEX(un)); 24351 if (rval == 0) { 24352 un->un_resvd_status |= SD_RESERVE; 24353 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24354 sd_reinstate_resv_delay = 24355 tkown->reinstate_resv_delay * 1000; 24356 } else { 24357 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24358 } 24359 /* 24360 * Give the scsi_watch routine interval set by 24361 * the MHIOCENFAILFAST ioctl precedence here. 24362 */ 24363 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24364 mutex_exit(SD_MUTEX(un)); 24365 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24366 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24367 "sd_mhdioc_takeown : %d\n", 24368 sd_reinstate_resv_delay); 24369 } else { 24370 mutex_exit(SD_MUTEX(un)); 24371 } 24372 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24373 sd_mhd_reset_notify_cb, (caddr_t)un); 24374 } else { 24375 un->un_resvd_status &= ~SD_RESERVE; 24376 mutex_exit(SD_MUTEX(un)); 24377 } 24378 24379 error: 24380 if (tkown != NULL) { 24381 kmem_free(tkown, sizeof (struct mhioctkown)); 24382 } 24383 return (rval); 24384 } 24385 24386 24387 /* 24388 * Function: sd_mhdioc_release 24389 * 24390 * Description: This routine is the driver entry point for handling ioctl 24391 * requests to release exclusive access rights to the multihost 24392 * disk (MHIOCRELEASE). 24393 * 24394 * Arguments: dev - the device number 24395 * 24396 * Return Code: 0 24397 * ENXIO 24398 */ 24399 24400 static int 24401 sd_mhdioc_release(dev_t dev) 24402 { 24403 struct sd_lun *un = NULL; 24404 timeout_id_t resvd_timeid_save; 24405 int resvd_status_save; 24406 int rval = 0; 24407 24408 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24409 return (ENXIO); 24410 } 24411 24412 mutex_enter(SD_MUTEX(un)); 24413 resvd_status_save = un->un_resvd_status; 24414 un->un_resvd_status &= 24415 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24416 if (un->un_resvd_timeid) { 24417 resvd_timeid_save = un->un_resvd_timeid; 24418 un->un_resvd_timeid = NULL; 24419 mutex_exit(SD_MUTEX(un)); 24420 (void) untimeout(resvd_timeid_save); 24421 } else { 24422 mutex_exit(SD_MUTEX(un)); 24423 } 24424 24425 /* 24426 * destroy any pending timeout thread that may be attempting to 24427 * reinstate reservation on this device. 24428 */ 24429 sd_rmv_resv_reclaim_req(dev); 24430 24431 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24432 mutex_enter(SD_MUTEX(un)); 24433 if ((un->un_mhd_token) && 24434 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24435 mutex_exit(SD_MUTEX(un)); 24436 (void) sd_check_mhd(dev, 0); 24437 } else { 24438 mutex_exit(SD_MUTEX(un)); 24439 } 24440 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24441 sd_mhd_reset_notify_cb, (caddr_t)un); 24442 } else { 24443 /* 24444 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24445 */ 24446 mutex_enter(SD_MUTEX(un)); 24447 un->un_resvd_status = resvd_status_save; 24448 mutex_exit(SD_MUTEX(un)); 24449 } 24450 return (rval); 24451 } 24452 24453 24454 /* 24455 * Function: sd_mhdioc_register_devid 24456 * 24457 * Description: This routine is the driver entry point for handling ioctl 24458 * requests to register the device id (MHIOCREREGISTERDEVID). 24459 * 24460 * Note: The implementation for this ioctl has been updated to 24461 * be consistent with the original PSARC case (1999/357) 24462 * (4375899, 4241671, 4220005) 24463 * 24464 * Arguments: dev - the device number 24465 * 24466 * Return Code: 0 24467 * ENXIO 24468 */ 24469 24470 static int 24471 sd_mhdioc_register_devid(dev_t dev) 24472 { 24473 struct sd_lun *un = NULL; 24474 int rval = 0; 24475 sd_ssc_t *ssc; 24476 24477 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24478 return (ENXIO); 24479 } 24480 24481 ASSERT(!mutex_owned(SD_MUTEX(un))); 24482 24483 mutex_enter(SD_MUTEX(un)); 24484 24485 /* If a devid already exists, de-register it */ 24486 if (un->un_devid != NULL) { 24487 ddi_devid_unregister(SD_DEVINFO(un)); 24488 /* 24489 * After unregister devid, needs to free devid memory 24490 */ 24491 ddi_devid_free(un->un_devid); 24492 un->un_devid = NULL; 24493 } 24494 24495 /* Check for reservation conflict */ 24496 mutex_exit(SD_MUTEX(un)); 24497 ssc = sd_ssc_init(un); 24498 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24499 mutex_enter(SD_MUTEX(un)); 24500 24501 switch (rval) { 24502 case 0: 24503 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24504 break; 24505 case EACCES: 24506 break; 24507 default: 24508 rval = EIO; 24509 } 24510 24511 mutex_exit(SD_MUTEX(un)); 24512 if (rval != 0) { 24513 if (rval == EIO) 24514 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24515 else 24516 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24517 } 24518 sd_ssc_fini(ssc); 24519 return (rval); 24520 } 24521 24522 24523 /* 24524 * Function: sd_mhdioc_inkeys 24525 * 24526 * Description: This routine is the driver entry point for handling ioctl 24527 * requests to issue the SCSI-3 Persistent In Read Keys command 24528 * to the device (MHIOCGRP_INKEYS). 24529 * 24530 * Arguments: dev - the device number 24531 * arg - user provided in_keys structure 24532 * flag - this argument is a pass through to ddi_copyxxx() 24533 * directly from the mode argument of ioctl(). 24534 * 24535 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24536 * ENXIO 24537 * EFAULT 24538 */ 24539 24540 static int 24541 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24542 { 24543 struct sd_lun *un; 24544 mhioc_inkeys_t inkeys; 24545 int rval = 0; 24546 24547 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24548 return (ENXIO); 24549 } 24550 24551 #ifdef _MULTI_DATAMODEL 24552 switch (ddi_model_convert_from(flag & FMODELS)) { 24553 case DDI_MODEL_ILP32: { 24554 struct mhioc_inkeys32 inkeys32; 24555 24556 if (ddi_copyin(arg, &inkeys32, 24557 sizeof (struct mhioc_inkeys32), flag) != 0) { 24558 return (EFAULT); 24559 } 24560 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24561 if ((rval = sd_persistent_reservation_in_read_keys(un, 24562 &inkeys, flag)) != 0) { 24563 return (rval); 24564 } 24565 inkeys32.generation = inkeys.generation; 24566 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24567 flag) != 0) { 24568 return (EFAULT); 24569 } 24570 break; 24571 } 24572 case DDI_MODEL_NONE: 24573 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24574 flag) != 0) { 24575 return (EFAULT); 24576 } 24577 if ((rval = sd_persistent_reservation_in_read_keys(un, 24578 &inkeys, flag)) != 0) { 24579 return (rval); 24580 } 24581 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24582 flag) != 0) { 24583 return (EFAULT); 24584 } 24585 break; 24586 } 24587 24588 #else /* ! _MULTI_DATAMODEL */ 24589 24590 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24591 return (EFAULT); 24592 } 24593 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24594 if (rval != 0) { 24595 return (rval); 24596 } 24597 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24598 return (EFAULT); 24599 } 24600 24601 #endif /* _MULTI_DATAMODEL */ 24602 24603 return (rval); 24604 } 24605 24606 24607 /* 24608 * Function: sd_mhdioc_inresv 24609 * 24610 * Description: This routine is the driver entry point for handling ioctl 24611 * requests to issue the SCSI-3 Persistent In Read Reservations 24612 * command to the device (MHIOCGRP_INKEYS). 24613 * 24614 * Arguments: dev - the device number 24615 * arg - user provided in_resv structure 24616 * flag - this argument is a pass through to ddi_copyxxx() 24617 * directly from the mode argument of ioctl(). 24618 * 24619 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24620 * ENXIO 24621 * EFAULT 24622 */ 24623 24624 static int 24625 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24626 { 24627 struct sd_lun *un; 24628 mhioc_inresvs_t inresvs; 24629 int rval = 0; 24630 24631 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24632 return (ENXIO); 24633 } 24634 24635 #ifdef _MULTI_DATAMODEL 24636 24637 switch (ddi_model_convert_from(flag & FMODELS)) { 24638 case DDI_MODEL_ILP32: { 24639 struct mhioc_inresvs32 inresvs32; 24640 24641 if (ddi_copyin(arg, &inresvs32, 24642 sizeof (struct mhioc_inresvs32), flag) != 0) { 24643 return (EFAULT); 24644 } 24645 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24646 if ((rval = sd_persistent_reservation_in_read_resv(un, 24647 &inresvs, flag)) != 0) { 24648 return (rval); 24649 } 24650 inresvs32.generation = inresvs.generation; 24651 if (ddi_copyout(&inresvs32, arg, 24652 sizeof (struct mhioc_inresvs32), flag) != 0) { 24653 return (EFAULT); 24654 } 24655 break; 24656 } 24657 case DDI_MODEL_NONE: 24658 if (ddi_copyin(arg, &inresvs, 24659 sizeof (mhioc_inresvs_t), flag) != 0) { 24660 return (EFAULT); 24661 } 24662 if ((rval = sd_persistent_reservation_in_read_resv(un, 24663 &inresvs, flag)) != 0) { 24664 return (rval); 24665 } 24666 if (ddi_copyout(&inresvs, arg, 24667 sizeof (mhioc_inresvs_t), flag) != 0) { 24668 return (EFAULT); 24669 } 24670 break; 24671 } 24672 24673 #else /* ! _MULTI_DATAMODEL */ 24674 24675 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24676 return (EFAULT); 24677 } 24678 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24679 if (rval != 0) { 24680 return (rval); 24681 } 24682 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24683 return (EFAULT); 24684 } 24685 24686 #endif /* ! _MULTI_DATAMODEL */ 24687 24688 return (rval); 24689 } 24690 24691 24692 /* 24693 * The following routines support the clustering functionality described below 24694 * and implement lost reservation reclaim functionality. 24695 * 24696 * Clustering 24697 * ---------- 24698 * The clustering code uses two different, independent forms of SCSI 24699 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24700 * Persistent Group Reservations. For any particular disk, it will use either 24701 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24702 * 24703 * SCSI-2 24704 * The cluster software takes ownership of a multi-hosted disk by issuing the 24705 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24706 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24707 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24708 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24709 * driver. The meaning of failfast is that if the driver (on this host) ever 24710 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24711 * it should immediately panic the host. The motivation for this ioctl is that 24712 * if this host does encounter reservation conflict, the underlying cause is 24713 * that some other host of the cluster has decided that this host is no longer 24714 * in the cluster and has seized control of the disks for itself. Since this 24715 * host is no longer in the cluster, it ought to panic itself. The 24716 * MHIOCENFAILFAST ioctl does two things: 24717 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24718 * error to panic the host 24719 * (b) it sets up a periodic timer to test whether this host still has 24720 * "access" (in that no other host has reserved the device): if the 24721 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24722 * purpose of that periodic timer is to handle scenarios where the host is 24723 * otherwise temporarily quiescent, temporarily doing no real i/o. 24724 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24725 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24726 * the device itself. 24727 * 24728 * SCSI-3 PGR 24729 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24730 * facility is supported through the shared multihost disk ioctls 24731 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24732 * MHIOCGRP_PREEMPTANDABORT) 24733 * 24734 * Reservation Reclaim: 24735 * -------------------- 24736 * To support the lost reservation reclaim operations this driver creates a 24737 * single thread to handle reinstating reservations on all devices that have 24738 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24739 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24740 * and the reservation reclaim thread loops through the requests to regain the 24741 * lost reservations. 24742 */ 24743 24744 /* 24745 * Function: sd_check_mhd() 24746 * 24747 * Description: This function sets up and submits a scsi watch request or 24748 * terminates an existing watch request. This routine is used in 24749 * support of reservation reclaim. 24750 * 24751 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24752 * among multiple watches that share the callback function 24753 * interval - the number of microseconds specifying the watch 24754 * interval for issuing TEST UNIT READY commands. If 24755 * set to 0 the watch should be terminated. If the 24756 * interval is set to 0 and if the device is required 24757 * to hold reservation while disabling failfast, the 24758 * watch is restarted with an interval of 24759 * reinstate_resv_delay. 24760 * 24761 * Return Code: 0 - Successful submit/terminate of scsi watch request 24762 * ENXIO - Indicates an invalid device was specified 24763 * EAGAIN - Unable to submit the scsi watch request 24764 */ 24765 24766 static int 24767 sd_check_mhd(dev_t dev, int interval) 24768 { 24769 struct sd_lun *un; 24770 opaque_t token; 24771 24772 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24773 return (ENXIO); 24774 } 24775 24776 /* is this a watch termination request? */ 24777 if (interval == 0) { 24778 mutex_enter(SD_MUTEX(un)); 24779 /* if there is an existing watch task then terminate it */ 24780 if (un->un_mhd_token) { 24781 token = un->un_mhd_token; 24782 un->un_mhd_token = NULL; 24783 mutex_exit(SD_MUTEX(un)); 24784 (void) scsi_watch_request_terminate(token, 24785 SCSI_WATCH_TERMINATE_ALL_WAIT); 24786 mutex_enter(SD_MUTEX(un)); 24787 } else { 24788 mutex_exit(SD_MUTEX(un)); 24789 /* 24790 * Note: If we return here we don't check for the 24791 * failfast case. This is the original legacy 24792 * implementation but perhaps we should be checking 24793 * the failfast case. 24794 */ 24795 return (0); 24796 } 24797 /* 24798 * If the device is required to hold reservation while 24799 * disabling failfast, we need to restart the scsi_watch 24800 * routine with an interval of reinstate_resv_delay. 24801 */ 24802 if (un->un_resvd_status & SD_RESERVE) { 24803 interval = sd_reinstate_resv_delay/1000; 24804 } else { 24805 /* no failfast so bail */ 24806 mutex_exit(SD_MUTEX(un)); 24807 return (0); 24808 } 24809 mutex_exit(SD_MUTEX(un)); 24810 } 24811 24812 /* 24813 * adjust minimum time interval to 1 second, 24814 * and convert from msecs to usecs 24815 */ 24816 if (interval > 0 && interval < 1000) { 24817 interval = 1000; 24818 } 24819 interval *= 1000; 24820 24821 /* 24822 * submit the request to the scsi_watch service 24823 */ 24824 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24825 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24826 if (token == NULL) { 24827 return (EAGAIN); 24828 } 24829 24830 /* 24831 * save token for termination later on 24832 */ 24833 mutex_enter(SD_MUTEX(un)); 24834 un->un_mhd_token = token; 24835 mutex_exit(SD_MUTEX(un)); 24836 return (0); 24837 } 24838 24839 24840 /* 24841 * Function: sd_mhd_watch_cb() 24842 * 24843 * Description: This function is the call back function used by the scsi watch 24844 * facility. The scsi watch facility sends the "Test Unit Ready" 24845 * and processes the status. If applicable (i.e. a "Unit Attention" 24846 * status and automatic "Request Sense" not used) the scsi watch 24847 * facility will send a "Request Sense" and retrieve the sense data 24848 * to be passed to this callback function. In either case the 24849 * automatic "Request Sense" or the facility submitting one, this 24850 * callback is passed the status and sense data. 24851 * 24852 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24853 * among multiple watches that share this callback function 24854 * resultp - scsi watch facility result packet containing scsi 24855 * packet, status byte and sense data 24856 * 24857 * Return Code: 0 - continue the watch task 24858 * non-zero - terminate the watch task 24859 */ 24860 24861 static int 24862 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24863 { 24864 struct sd_lun *un; 24865 struct scsi_status *statusp; 24866 uint8_t *sensep; 24867 struct scsi_pkt *pkt; 24868 uchar_t actual_sense_length; 24869 dev_t dev = (dev_t)arg; 24870 24871 ASSERT(resultp != NULL); 24872 statusp = resultp->statusp; 24873 sensep = (uint8_t *)resultp->sensep; 24874 pkt = resultp->pkt; 24875 actual_sense_length = resultp->actual_sense_length; 24876 24877 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24878 return (ENXIO); 24879 } 24880 24881 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24882 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24883 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24884 24885 /* Begin processing of the status and/or sense data */ 24886 if (pkt->pkt_reason != CMD_CMPLT) { 24887 /* Handle the incomplete packet */ 24888 sd_mhd_watch_incomplete(un, pkt); 24889 return (0); 24890 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24891 if (*((unsigned char *)statusp) 24892 == STATUS_RESERVATION_CONFLICT) { 24893 /* 24894 * Handle a reservation conflict by panicking if 24895 * configured for failfast or by logging the conflict 24896 * and updating the reservation status 24897 */ 24898 mutex_enter(SD_MUTEX(un)); 24899 if ((un->un_resvd_status & SD_FAILFAST) && 24900 (sd_failfast_enable)) { 24901 sd_panic_for_res_conflict(un); 24902 /*NOTREACHED*/ 24903 } 24904 SD_INFO(SD_LOG_IOCTL_MHD, un, 24905 "sd_mhd_watch_cb: Reservation Conflict\n"); 24906 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24907 mutex_exit(SD_MUTEX(un)); 24908 } 24909 } 24910 24911 if (sensep != NULL) { 24912 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24913 mutex_enter(SD_MUTEX(un)); 24914 if ((scsi_sense_asc(sensep) == 24915 SD_SCSI_RESET_SENSE_CODE) && 24916 (un->un_resvd_status & SD_RESERVE)) { 24917 /* 24918 * The additional sense code indicates a power 24919 * on or bus device reset has occurred; update 24920 * the reservation status. 24921 */ 24922 un->un_resvd_status |= 24923 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24924 SD_INFO(SD_LOG_IOCTL_MHD, un, 24925 "sd_mhd_watch_cb: Lost Reservation\n"); 24926 } 24927 } else { 24928 return (0); 24929 } 24930 } else { 24931 mutex_enter(SD_MUTEX(un)); 24932 } 24933 24934 if ((un->un_resvd_status & SD_RESERVE) && 24935 (un->un_resvd_status & SD_LOST_RESERVE)) { 24936 if (un->un_resvd_status & SD_WANT_RESERVE) { 24937 /* 24938 * A reset occurred in between the last probe and this 24939 * one so if a timeout is pending cancel it. 24940 */ 24941 if (un->un_resvd_timeid) { 24942 timeout_id_t temp_id = un->un_resvd_timeid; 24943 un->un_resvd_timeid = NULL; 24944 mutex_exit(SD_MUTEX(un)); 24945 (void) untimeout(temp_id); 24946 mutex_enter(SD_MUTEX(un)); 24947 } 24948 un->un_resvd_status &= ~SD_WANT_RESERVE; 24949 } 24950 if (un->un_resvd_timeid == 0) { 24951 /* Schedule a timeout to handle the lost reservation */ 24952 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24953 (void *)dev, 24954 drv_usectohz(sd_reinstate_resv_delay)); 24955 } 24956 } 24957 mutex_exit(SD_MUTEX(un)); 24958 return (0); 24959 } 24960 24961 24962 /* 24963 * Function: sd_mhd_watch_incomplete() 24964 * 24965 * Description: This function is used to find out why a scsi pkt sent by the 24966 * scsi watch facility was not completed. Under some scenarios this 24967 * routine will return. Otherwise it will send a bus reset to see 24968 * if the drive is still online. 24969 * 24970 * Arguments: un - driver soft state (unit) structure 24971 * pkt - incomplete scsi pkt 24972 */ 24973 24974 static void 24975 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24976 { 24977 int be_chatty; 24978 int perr; 24979 24980 ASSERT(pkt != NULL); 24981 ASSERT(un != NULL); 24982 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24983 perr = (pkt->pkt_statistics & STAT_PERR); 24984 24985 mutex_enter(SD_MUTEX(un)); 24986 if (un->un_state == SD_STATE_DUMPING) { 24987 mutex_exit(SD_MUTEX(un)); 24988 return; 24989 } 24990 24991 switch (pkt->pkt_reason) { 24992 case CMD_UNX_BUS_FREE: 24993 /* 24994 * If we had a parity error that caused the target to drop BSY*, 24995 * don't be chatty about it. 24996 */ 24997 if (perr && be_chatty) { 24998 be_chatty = 0; 24999 } 25000 break; 25001 case CMD_TAG_REJECT: 25002 /* 25003 * The SCSI-2 spec states that a tag reject will be sent by the 25004 * target if tagged queuing is not supported. A tag reject may 25005 * also be sent during certain initialization periods or to 25006 * control internal resources. For the latter case the target 25007 * may also return Queue Full. 25008 * 25009 * If this driver receives a tag reject from a target that is 25010 * going through an init period or controlling internal 25011 * resources tagged queuing will be disabled. This is a less 25012 * than optimal behavior but the driver is unable to determine 25013 * the target state and assumes tagged queueing is not supported 25014 */ 25015 pkt->pkt_flags = 0; 25016 un->un_tagflags = 0; 25017 25018 if (un->un_f_opt_queueing == TRUE) { 25019 un->un_throttle = min(un->un_throttle, 3); 25020 } else { 25021 un->un_throttle = 1; 25022 } 25023 mutex_exit(SD_MUTEX(un)); 25024 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 25025 mutex_enter(SD_MUTEX(un)); 25026 break; 25027 case CMD_INCOMPLETE: 25028 /* 25029 * The transport stopped with an abnormal state, fallthrough and 25030 * reset the target and/or bus unless selection did not complete 25031 * (indicated by STATE_GOT_BUS) in which case we don't want to 25032 * go through a target/bus reset 25033 */ 25034 if (pkt->pkt_state == STATE_GOT_BUS) { 25035 break; 25036 } 25037 /*FALLTHROUGH*/ 25038 25039 case CMD_TIMEOUT: 25040 default: 25041 /* 25042 * The lun may still be running the command, so a lun reset 25043 * should be attempted. If the lun reset fails or cannot be 25044 * issued, than try a target reset. Lastly try a bus reset. 25045 */ 25046 if ((pkt->pkt_statistics & 25047 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 25048 int reset_retval = 0; 25049 mutex_exit(SD_MUTEX(un)); 25050 if (un->un_f_allow_bus_device_reset == TRUE) { 25051 if (un->un_f_lun_reset_enabled == TRUE) { 25052 reset_retval = 25053 scsi_reset(SD_ADDRESS(un), 25054 RESET_LUN); 25055 } 25056 if (reset_retval == 0) { 25057 reset_retval = 25058 scsi_reset(SD_ADDRESS(un), 25059 RESET_TARGET); 25060 } 25061 } 25062 if (reset_retval == 0) { 25063 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25064 } 25065 mutex_enter(SD_MUTEX(un)); 25066 } 25067 break; 25068 } 25069 25070 /* A device/bus reset has occurred; update the reservation status. */ 25071 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 25072 (STAT_BUS_RESET | STAT_DEV_RESET))) { 25073 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25074 un->un_resvd_status |= 25075 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25076 SD_INFO(SD_LOG_IOCTL_MHD, un, 25077 "sd_mhd_watch_incomplete: Lost Reservation\n"); 25078 } 25079 } 25080 25081 /* 25082 * The disk has been turned off; Update the device state. 25083 * 25084 * Note: Should we be offlining the disk here? 25085 */ 25086 if (pkt->pkt_state == STATE_GOT_BUS) { 25087 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 25088 "Disk not responding to selection\n"); 25089 if (un->un_state != SD_STATE_OFFLINE) { 25090 New_state(un, SD_STATE_OFFLINE); 25091 } 25092 } else if (be_chatty) { 25093 /* 25094 * suppress messages if they are all the same pkt reason; 25095 * with TQ, many (up to 256) are returned with the same 25096 * pkt_reason 25097 */ 25098 if (pkt->pkt_reason != un->un_last_pkt_reason) { 25099 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25100 "sd_mhd_watch_incomplete: " 25101 "SCSI transport failed: reason '%s'\n", 25102 scsi_rname(pkt->pkt_reason)); 25103 } 25104 } 25105 un->un_last_pkt_reason = pkt->pkt_reason; 25106 mutex_exit(SD_MUTEX(un)); 25107 } 25108 25109 25110 /* 25111 * Function: sd_sname() 25112 * 25113 * Description: This is a simple little routine to return a string containing 25114 * a printable description of command status byte for use in 25115 * logging. 25116 * 25117 * Arguments: status - pointer to a status byte 25118 * 25119 * Return Code: char * - string containing status description. 25120 */ 25121 25122 static char * 25123 sd_sname(uchar_t status) 25124 { 25125 switch (status & STATUS_MASK) { 25126 case STATUS_GOOD: 25127 return ("good status"); 25128 case STATUS_CHECK: 25129 return ("check condition"); 25130 case STATUS_MET: 25131 return ("condition met"); 25132 case STATUS_BUSY: 25133 return ("busy"); 25134 case STATUS_INTERMEDIATE: 25135 return ("intermediate"); 25136 case STATUS_INTERMEDIATE_MET: 25137 return ("intermediate - condition met"); 25138 case STATUS_RESERVATION_CONFLICT: 25139 return ("reservation_conflict"); 25140 case STATUS_TERMINATED: 25141 return ("command terminated"); 25142 case STATUS_QFULL: 25143 return ("queue full"); 25144 default: 25145 return ("<unknown status>"); 25146 } 25147 } 25148 25149 25150 /* 25151 * Function: sd_mhd_resvd_recover() 25152 * 25153 * Description: This function adds a reservation entry to the 25154 * sd_resv_reclaim_request list and signals the reservation 25155 * reclaim thread that there is work pending. If the reservation 25156 * reclaim thread has not been previously created this function 25157 * will kick it off. 25158 * 25159 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25160 * among multiple watches that share this callback function 25161 * 25162 * Context: This routine is called by timeout() and is run in interrupt 25163 * context. It must not sleep or call other functions which may 25164 * sleep. 25165 */ 25166 25167 static void 25168 sd_mhd_resvd_recover(void *arg) 25169 { 25170 dev_t dev = (dev_t)arg; 25171 struct sd_lun *un; 25172 struct sd_thr_request *sd_treq = NULL; 25173 struct sd_thr_request *sd_cur = NULL; 25174 struct sd_thr_request *sd_prev = NULL; 25175 int already_there = 0; 25176 25177 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25178 return; 25179 } 25180 25181 mutex_enter(SD_MUTEX(un)); 25182 un->un_resvd_timeid = NULL; 25183 if (un->un_resvd_status & SD_WANT_RESERVE) { 25184 /* 25185 * There was a reset so don't issue the reserve, allow the 25186 * sd_mhd_watch_cb callback function to notice this and 25187 * reschedule the timeout for reservation. 25188 */ 25189 mutex_exit(SD_MUTEX(un)); 25190 return; 25191 } 25192 mutex_exit(SD_MUTEX(un)); 25193 25194 /* 25195 * Add this device to the sd_resv_reclaim_request list and the 25196 * sd_resv_reclaim_thread should take care of the rest. 25197 * 25198 * Note: We can't sleep in this context so if the memory allocation 25199 * fails allow the sd_mhd_watch_cb callback function to notice this and 25200 * reschedule the timeout for reservation. (4378460) 25201 */ 25202 sd_treq = (struct sd_thr_request *) 25203 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25204 if (sd_treq == NULL) { 25205 return; 25206 } 25207 25208 sd_treq->sd_thr_req_next = NULL; 25209 sd_treq->dev = dev; 25210 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25211 if (sd_tr.srq_thr_req_head == NULL) { 25212 sd_tr.srq_thr_req_head = sd_treq; 25213 } else { 25214 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25215 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25216 if (sd_cur->dev == dev) { 25217 /* 25218 * already in Queue so don't log 25219 * another request for the device 25220 */ 25221 already_there = 1; 25222 break; 25223 } 25224 sd_prev = sd_cur; 25225 } 25226 if (!already_there) { 25227 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25228 "logging request for %lx\n", dev); 25229 sd_prev->sd_thr_req_next = sd_treq; 25230 } else { 25231 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25232 } 25233 } 25234 25235 /* 25236 * Create a kernel thread to do the reservation reclaim and free up this 25237 * thread. We cannot block this thread while we go away to do the 25238 * reservation reclaim 25239 */ 25240 if (sd_tr.srq_resv_reclaim_thread == NULL) 25241 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25242 sd_resv_reclaim_thread, NULL, 25243 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25244 25245 /* Tell the reservation reclaim thread that it has work to do */ 25246 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25247 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25248 } 25249 25250 /* 25251 * Function: sd_resv_reclaim_thread() 25252 * 25253 * Description: This function implements the reservation reclaim operations 25254 * 25255 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25256 * among multiple watches that share this callback function 25257 */ 25258 25259 static void 25260 sd_resv_reclaim_thread() 25261 { 25262 struct sd_lun *un; 25263 struct sd_thr_request *sd_mhreq; 25264 25265 /* Wait for work */ 25266 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25267 if (sd_tr.srq_thr_req_head == NULL) { 25268 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25269 &sd_tr.srq_resv_reclaim_mutex); 25270 } 25271 25272 /* Loop while we have work */ 25273 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25274 un = ddi_get_soft_state(sd_state, 25275 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25276 if (un == NULL) { 25277 /* 25278 * softstate structure is NULL so just 25279 * dequeue the request and continue 25280 */ 25281 sd_tr.srq_thr_req_head = 25282 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25283 kmem_free(sd_tr.srq_thr_cur_req, 25284 sizeof (struct sd_thr_request)); 25285 continue; 25286 } 25287 25288 /* dequeue the request */ 25289 sd_mhreq = sd_tr.srq_thr_cur_req; 25290 sd_tr.srq_thr_req_head = 25291 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25292 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25293 25294 /* 25295 * Reclaim reservation only if SD_RESERVE is still set. There 25296 * may have been a call to MHIOCRELEASE before we got here. 25297 */ 25298 mutex_enter(SD_MUTEX(un)); 25299 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25300 /* 25301 * Note: The SD_LOST_RESERVE flag is cleared before 25302 * reclaiming the reservation. If this is done after the 25303 * call to sd_reserve_release a reservation loss in the 25304 * window between pkt completion of reserve cmd and 25305 * mutex_enter below may not be recognized 25306 */ 25307 un->un_resvd_status &= ~SD_LOST_RESERVE; 25308 mutex_exit(SD_MUTEX(un)); 25309 25310 if (sd_reserve_release(sd_mhreq->dev, 25311 SD_RESERVE) == 0) { 25312 mutex_enter(SD_MUTEX(un)); 25313 un->un_resvd_status |= SD_RESERVE; 25314 mutex_exit(SD_MUTEX(un)); 25315 SD_INFO(SD_LOG_IOCTL_MHD, un, 25316 "sd_resv_reclaim_thread: " 25317 "Reservation Recovered\n"); 25318 } else { 25319 mutex_enter(SD_MUTEX(un)); 25320 un->un_resvd_status |= SD_LOST_RESERVE; 25321 mutex_exit(SD_MUTEX(un)); 25322 SD_INFO(SD_LOG_IOCTL_MHD, un, 25323 "sd_resv_reclaim_thread: Failed " 25324 "Reservation Recovery\n"); 25325 } 25326 } else { 25327 mutex_exit(SD_MUTEX(un)); 25328 } 25329 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25330 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25331 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25332 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25333 /* 25334 * wakeup the destroy thread if anyone is waiting on 25335 * us to complete. 25336 */ 25337 cv_signal(&sd_tr.srq_inprocess_cv); 25338 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25339 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25340 } 25341 25342 /* 25343 * cleanup the sd_tr structure now that this thread will not exist 25344 */ 25345 ASSERT(sd_tr.srq_thr_req_head == NULL); 25346 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25347 sd_tr.srq_resv_reclaim_thread = NULL; 25348 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25349 thread_exit(); 25350 } 25351 25352 25353 /* 25354 * Function: sd_rmv_resv_reclaim_req() 25355 * 25356 * Description: This function removes any pending reservation reclaim requests 25357 * for the specified device. 25358 * 25359 * Arguments: dev - the device 'dev_t' 25360 */ 25361 25362 static void 25363 sd_rmv_resv_reclaim_req(dev_t dev) 25364 { 25365 struct sd_thr_request *sd_mhreq; 25366 struct sd_thr_request *sd_prev; 25367 25368 /* Remove a reservation reclaim request from the list */ 25369 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25370 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25371 /* 25372 * We are attempting to reinstate reservation for 25373 * this device. We wait for sd_reserve_release() 25374 * to return before we return. 25375 */ 25376 cv_wait(&sd_tr.srq_inprocess_cv, 25377 &sd_tr.srq_resv_reclaim_mutex); 25378 } else { 25379 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25380 if (sd_mhreq && sd_mhreq->dev == dev) { 25381 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25382 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25383 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25384 return; 25385 } 25386 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25387 if (sd_mhreq && sd_mhreq->dev == dev) { 25388 break; 25389 } 25390 sd_prev = sd_mhreq; 25391 } 25392 if (sd_mhreq != NULL) { 25393 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25394 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25395 } 25396 } 25397 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25398 } 25399 25400 25401 /* 25402 * Function: sd_mhd_reset_notify_cb() 25403 * 25404 * Description: This is a call back function for scsi_reset_notify. This 25405 * function updates the softstate reserved status and logs the 25406 * reset. The driver scsi watch facility callback function 25407 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25408 * will reclaim the reservation. 25409 * 25410 * Arguments: arg - driver soft state (unit) structure 25411 */ 25412 25413 static void 25414 sd_mhd_reset_notify_cb(caddr_t arg) 25415 { 25416 struct sd_lun *un = (struct sd_lun *)arg; 25417 25418 mutex_enter(SD_MUTEX(un)); 25419 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25420 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25421 SD_INFO(SD_LOG_IOCTL_MHD, un, 25422 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25423 } 25424 mutex_exit(SD_MUTEX(un)); 25425 } 25426 25427 25428 /* 25429 * Function: sd_take_ownership() 25430 * 25431 * Description: This routine implements an algorithm to achieve a stable 25432 * reservation on disks which don't implement priority reserve, 25433 * and makes sure that other host lose re-reservation attempts. 25434 * This algorithm contains of a loop that keeps issuing the RESERVE 25435 * for some period of time (min_ownership_delay, default 6 seconds) 25436 * During that loop, it looks to see if there has been a bus device 25437 * reset or bus reset (both of which cause an existing reservation 25438 * to be lost). If the reservation is lost issue RESERVE until a 25439 * period of min_ownership_delay with no resets has gone by, or 25440 * until max_ownership_delay has expired. This loop ensures that 25441 * the host really did manage to reserve the device, in spite of 25442 * resets. The looping for min_ownership_delay (default six 25443 * seconds) is important to early generation clustering products, 25444 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25445 * MHIOCENFAILFAST periodic timer of two seconds. By having 25446 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25447 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25448 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25449 * have already noticed, via the MHIOCENFAILFAST polling, that it 25450 * no longer "owns" the disk and will have panicked itself. Thus, 25451 * the host issuing the MHIOCTKOWN is assured (with timing 25452 * dependencies) that by the time it actually starts to use the 25453 * disk for real work, the old owner is no longer accessing it. 25454 * 25455 * min_ownership_delay is the minimum amount of time for which the 25456 * disk must be reserved continuously devoid of resets before the 25457 * MHIOCTKOWN ioctl will return success. 25458 * 25459 * max_ownership_delay indicates the amount of time by which the 25460 * take ownership should succeed or timeout with an error. 25461 * 25462 * Arguments: dev - the device 'dev_t' 25463 * *p - struct containing timing info. 25464 * 25465 * Return Code: 0 for success or error code 25466 */ 25467 25468 static int 25469 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25470 { 25471 struct sd_lun *un; 25472 int rval; 25473 int err; 25474 int reservation_count = 0; 25475 int min_ownership_delay = 6000000; /* in usec */ 25476 int max_ownership_delay = 30000000; /* in usec */ 25477 clock_t start_time; /* starting time of this algorithm */ 25478 clock_t end_time; /* time limit for giving up */ 25479 clock_t ownership_time; /* time limit for stable ownership */ 25480 clock_t current_time; 25481 clock_t previous_current_time; 25482 25483 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25484 return (ENXIO); 25485 } 25486 25487 /* 25488 * Attempt a device reservation. A priority reservation is requested. 25489 */ 25490 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25491 != SD_SUCCESS) { 25492 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25493 "sd_take_ownership: return(1)=%d\n", rval); 25494 return (rval); 25495 } 25496 25497 /* Update the softstate reserved status to indicate the reservation */ 25498 mutex_enter(SD_MUTEX(un)); 25499 un->un_resvd_status |= SD_RESERVE; 25500 un->un_resvd_status &= 25501 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25502 mutex_exit(SD_MUTEX(un)); 25503 25504 if (p != NULL) { 25505 if (p->min_ownership_delay != 0) { 25506 min_ownership_delay = p->min_ownership_delay * 1000; 25507 } 25508 if (p->max_ownership_delay != 0) { 25509 max_ownership_delay = p->max_ownership_delay * 1000; 25510 } 25511 } 25512 SD_INFO(SD_LOG_IOCTL_MHD, un, 25513 "sd_take_ownership: min, max delays: %d, %d\n", 25514 min_ownership_delay, max_ownership_delay); 25515 25516 start_time = ddi_get_lbolt(); 25517 current_time = start_time; 25518 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25519 end_time = start_time + drv_usectohz(max_ownership_delay); 25520 25521 while (current_time - end_time < 0) { 25522 delay(drv_usectohz(500000)); 25523 25524 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25525 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25526 mutex_enter(SD_MUTEX(un)); 25527 rval = (un->un_resvd_status & 25528 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25529 mutex_exit(SD_MUTEX(un)); 25530 break; 25531 } 25532 } 25533 previous_current_time = current_time; 25534 current_time = ddi_get_lbolt(); 25535 mutex_enter(SD_MUTEX(un)); 25536 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25537 ownership_time = ddi_get_lbolt() + 25538 drv_usectohz(min_ownership_delay); 25539 reservation_count = 0; 25540 } else { 25541 reservation_count++; 25542 } 25543 un->un_resvd_status |= SD_RESERVE; 25544 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25545 mutex_exit(SD_MUTEX(un)); 25546 25547 SD_INFO(SD_LOG_IOCTL_MHD, un, 25548 "sd_take_ownership: ticks for loop iteration=%ld, " 25549 "reservation=%s\n", (current_time - previous_current_time), 25550 reservation_count ? "ok" : "reclaimed"); 25551 25552 if (current_time - ownership_time >= 0 && 25553 reservation_count >= 4) { 25554 rval = 0; /* Achieved a stable ownership */ 25555 break; 25556 } 25557 if (current_time - end_time >= 0) { 25558 rval = EACCES; /* No ownership in max possible time */ 25559 break; 25560 } 25561 } 25562 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25563 "sd_take_ownership: return(2)=%d\n", rval); 25564 return (rval); 25565 } 25566 25567 25568 /* 25569 * Function: sd_reserve_release() 25570 * 25571 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25572 * PRIORITY RESERVE commands based on a user specified command type 25573 * 25574 * Arguments: dev - the device 'dev_t' 25575 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25576 * SD_RESERVE, SD_RELEASE 25577 * 25578 * Return Code: 0 or Error Code 25579 */ 25580 25581 static int 25582 sd_reserve_release(dev_t dev, int cmd) 25583 { 25584 struct uscsi_cmd *com = NULL; 25585 struct sd_lun *un = NULL; 25586 char cdb[CDB_GROUP0]; 25587 int rval; 25588 25589 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25590 (cmd == SD_PRIORITY_RESERVE)); 25591 25592 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25593 return (ENXIO); 25594 } 25595 25596 /* instantiate and initialize the command and cdb */ 25597 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25598 bzero(cdb, CDB_GROUP0); 25599 com->uscsi_flags = USCSI_SILENT; 25600 com->uscsi_timeout = un->un_reserve_release_time; 25601 com->uscsi_cdblen = CDB_GROUP0; 25602 com->uscsi_cdb = cdb; 25603 if (cmd == SD_RELEASE) { 25604 cdb[0] = SCMD_RELEASE; 25605 } else { 25606 cdb[0] = SCMD_RESERVE; 25607 } 25608 25609 /* Send the command. */ 25610 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25611 SD_PATH_STANDARD); 25612 25613 /* 25614 * "break" a reservation that is held by another host, by issuing a 25615 * reset if priority reserve is desired, and we could not get the 25616 * device. 25617 */ 25618 if ((cmd == SD_PRIORITY_RESERVE) && 25619 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25620 /* 25621 * First try to reset the LUN. If we cannot, then try a target 25622 * reset, followed by a bus reset if the target reset fails. 25623 */ 25624 int reset_retval = 0; 25625 if (un->un_f_lun_reset_enabled == TRUE) { 25626 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25627 } 25628 if (reset_retval == 0) { 25629 /* The LUN reset either failed or was not issued */ 25630 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25631 } 25632 if ((reset_retval == 0) && 25633 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25634 rval = EIO; 25635 kmem_free(com, sizeof (*com)); 25636 return (rval); 25637 } 25638 25639 bzero(com, sizeof (struct uscsi_cmd)); 25640 com->uscsi_flags = USCSI_SILENT; 25641 com->uscsi_cdb = cdb; 25642 com->uscsi_cdblen = CDB_GROUP0; 25643 com->uscsi_timeout = 5; 25644 25645 /* 25646 * Reissue the last reserve command, this time without request 25647 * sense. Assume that it is just a regular reserve command. 25648 */ 25649 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25650 SD_PATH_STANDARD); 25651 } 25652 25653 /* Return an error if still getting a reservation conflict. */ 25654 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25655 rval = EACCES; 25656 } 25657 25658 kmem_free(com, sizeof (*com)); 25659 return (rval); 25660 } 25661 25662 25663 #define SD_NDUMP_RETRIES 12 25664 /* 25665 * System Crash Dump routine 25666 */ 25667 25668 static int 25669 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25670 { 25671 int instance; 25672 int partition; 25673 int i; 25674 int err; 25675 struct sd_lun *un; 25676 struct scsi_pkt *wr_pktp; 25677 struct buf *wr_bp; 25678 struct buf wr_buf; 25679 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25680 daddr_t tgt_blkno; /* rmw - blkno for target */ 25681 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25682 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25683 size_t io_start_offset; 25684 int doing_rmw = FALSE; 25685 int rval; 25686 ssize_t dma_resid; 25687 daddr_t oblkno; 25688 diskaddr_t nblks = 0; 25689 diskaddr_t start_block; 25690 25691 instance = SDUNIT(dev); 25692 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25693 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25694 return (ENXIO); 25695 } 25696 25697 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25698 25699 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25700 25701 partition = SDPART(dev); 25702 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25703 25704 if (!(NOT_DEVBSIZE(un))) { 25705 int secmask = 0; 25706 int blknomask = 0; 25707 25708 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25709 secmask = un->un_tgt_blocksize - 1; 25710 25711 if (blkno & blknomask) { 25712 SD_TRACE(SD_LOG_DUMP, un, 25713 "sddump: dump start block not modulo %d\n", 25714 un->un_tgt_blocksize); 25715 return (EINVAL); 25716 } 25717 25718 if ((nblk * DEV_BSIZE) & secmask) { 25719 SD_TRACE(SD_LOG_DUMP, un, 25720 "sddump: dump length not modulo %d\n", 25721 un->un_tgt_blocksize); 25722 return (EINVAL); 25723 } 25724 25725 } 25726 25727 /* Validate blocks to dump at against partition size. */ 25728 25729 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25730 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25731 25732 if (NOT_DEVBSIZE(un)) { 25733 if ((blkno + nblk) > nblks) { 25734 SD_TRACE(SD_LOG_DUMP, un, 25735 "sddump: dump range larger than partition: " 25736 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25737 blkno, nblk, nblks); 25738 return (EINVAL); 25739 } 25740 } else { 25741 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25742 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25743 SD_TRACE(SD_LOG_DUMP, un, 25744 "sddump: dump range larger than partition: " 25745 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25746 blkno, nblk, nblks); 25747 return (EINVAL); 25748 } 25749 } 25750 25751 mutex_enter(&un->un_pm_mutex); 25752 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25753 struct scsi_pkt *start_pktp; 25754 25755 mutex_exit(&un->un_pm_mutex); 25756 25757 /* 25758 * use pm framework to power on HBA 1st 25759 */ 25760 (void) pm_raise_power(SD_DEVINFO(un), 0, 25761 SD_PM_STATE_ACTIVE(un)); 25762 25763 /* 25764 * Dump no long uses sdpower to power on a device, it's 25765 * in-line here so it can be done in polled mode. 25766 */ 25767 25768 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25769 25770 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25771 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25772 25773 if (start_pktp == NULL) { 25774 /* We were not given a SCSI packet, fail. */ 25775 return (EIO); 25776 } 25777 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25778 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25779 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25780 start_pktp->pkt_flags = FLAG_NOINTR; 25781 25782 mutex_enter(SD_MUTEX(un)); 25783 SD_FILL_SCSI1_LUN(un, start_pktp); 25784 mutex_exit(SD_MUTEX(un)); 25785 /* 25786 * Scsi_poll returns 0 (success) if the command completes and 25787 * the status block is STATUS_GOOD. 25788 */ 25789 if (sd_scsi_poll(un, start_pktp) != 0) { 25790 scsi_destroy_pkt(start_pktp); 25791 return (EIO); 25792 } 25793 scsi_destroy_pkt(start_pktp); 25794 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un), 25795 SD_PM_STATE_CHANGE); 25796 } else { 25797 mutex_exit(&un->un_pm_mutex); 25798 } 25799 25800 mutex_enter(SD_MUTEX(un)); 25801 un->un_throttle = 0; 25802 25803 /* 25804 * The first time through, reset the specific target device. 25805 * However, when cpr calls sddump we know that sd is in a 25806 * a good state so no bus reset is required. 25807 * Clear sense data via Request Sense cmd. 25808 * In sddump we don't care about allow_bus_device_reset anymore 25809 */ 25810 25811 if ((un->un_state != SD_STATE_SUSPENDED) && 25812 (un->un_state != SD_STATE_DUMPING)) { 25813 25814 New_state(un, SD_STATE_DUMPING); 25815 25816 if (un->un_f_is_fibre == FALSE) { 25817 mutex_exit(SD_MUTEX(un)); 25818 /* 25819 * Attempt a bus reset for parallel scsi. 25820 * 25821 * Note: A bus reset is required because on some host 25822 * systems (i.e. E420R) a bus device reset is 25823 * insufficient to reset the state of the target. 25824 * 25825 * Note: Don't issue the reset for fibre-channel, 25826 * because this tends to hang the bus (loop) for 25827 * too long while everyone is logging out and in 25828 * and the deadman timer for dumping will fire 25829 * before the dump is complete. 25830 */ 25831 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25832 mutex_enter(SD_MUTEX(un)); 25833 Restore_state(un); 25834 mutex_exit(SD_MUTEX(un)); 25835 return (EIO); 25836 } 25837 25838 /* Delay to give the device some recovery time. */ 25839 drv_usecwait(10000); 25840 25841 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25842 SD_INFO(SD_LOG_DUMP, un, 25843 "sddump: sd_send_polled_RQS failed\n"); 25844 } 25845 mutex_enter(SD_MUTEX(un)); 25846 } 25847 } 25848 25849 /* 25850 * Convert the partition-relative block number to a 25851 * disk physical block number. 25852 */ 25853 if (NOT_DEVBSIZE(un)) { 25854 blkno += start_block; 25855 } else { 25856 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25857 blkno += start_block; 25858 } 25859 25860 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25861 25862 25863 /* 25864 * Check if the device has a non-512 block size. 25865 */ 25866 wr_bp = NULL; 25867 if (NOT_DEVBSIZE(un)) { 25868 tgt_byte_offset = blkno * un->un_sys_blocksize; 25869 tgt_byte_count = nblk * un->un_sys_blocksize; 25870 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25871 (tgt_byte_count % un->un_tgt_blocksize)) { 25872 doing_rmw = TRUE; 25873 /* 25874 * Calculate the block number and number of block 25875 * in terms of the media block size. 25876 */ 25877 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25878 tgt_nblk = 25879 ((tgt_byte_offset + tgt_byte_count + 25880 (un->un_tgt_blocksize - 1)) / 25881 un->un_tgt_blocksize) - tgt_blkno; 25882 25883 /* 25884 * Invoke the routine which is going to do read part 25885 * of read-modify-write. 25886 * Note that this routine returns a pointer to 25887 * a valid bp in wr_bp. 25888 */ 25889 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25890 &wr_bp); 25891 if (err) { 25892 mutex_exit(SD_MUTEX(un)); 25893 return (err); 25894 } 25895 /* 25896 * Offset is being calculated as - 25897 * (original block # * system block size) - 25898 * (new block # * target block size) 25899 */ 25900 io_start_offset = 25901 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25902 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25903 25904 ASSERT((io_start_offset >= 0) && 25905 (io_start_offset < un->un_tgt_blocksize)); 25906 /* 25907 * Do the modify portion of read modify write. 25908 */ 25909 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25910 (size_t)nblk * un->un_sys_blocksize); 25911 } else { 25912 doing_rmw = FALSE; 25913 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25914 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25915 } 25916 25917 /* Convert blkno and nblk to target blocks */ 25918 blkno = tgt_blkno; 25919 nblk = tgt_nblk; 25920 } else { 25921 wr_bp = &wr_buf; 25922 bzero(wr_bp, sizeof (struct buf)); 25923 wr_bp->b_flags = B_BUSY; 25924 wr_bp->b_un.b_addr = addr; 25925 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25926 wr_bp->b_resid = 0; 25927 } 25928 25929 mutex_exit(SD_MUTEX(un)); 25930 25931 /* 25932 * Obtain a SCSI packet for the write command. 25933 * It should be safe to call the allocator here without 25934 * worrying about being locked for DVMA mapping because 25935 * the address we're passed is already a DVMA mapping 25936 * 25937 * We are also not going to worry about semaphore ownership 25938 * in the dump buffer. Dumping is single threaded at present. 25939 */ 25940 25941 wr_pktp = NULL; 25942 25943 dma_resid = wr_bp->b_bcount; 25944 oblkno = blkno; 25945 25946 if (!(NOT_DEVBSIZE(un))) { 25947 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 25948 } 25949 25950 while (dma_resid != 0) { 25951 25952 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25953 wr_bp->b_flags &= ~B_ERROR; 25954 25955 if (un->un_partial_dma_supported == 1) { 25956 blkno = oblkno + 25957 ((wr_bp->b_bcount - dma_resid) / 25958 un->un_tgt_blocksize); 25959 nblk = dma_resid / un->un_tgt_blocksize; 25960 25961 if (wr_pktp) { 25962 /* 25963 * Partial DMA transfers after initial transfer 25964 */ 25965 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25966 blkno, nblk); 25967 } else { 25968 /* Initial transfer */ 25969 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25970 un->un_pkt_flags, NULL_FUNC, NULL, 25971 blkno, nblk); 25972 } 25973 } else { 25974 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25975 0, NULL_FUNC, NULL, blkno, nblk); 25976 } 25977 25978 if (rval == 0) { 25979 /* We were given a SCSI packet, continue. */ 25980 break; 25981 } 25982 25983 if (i == 0) { 25984 if (wr_bp->b_flags & B_ERROR) { 25985 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25986 "no resources for dumping; " 25987 "error code: 0x%x, retrying", 25988 geterror(wr_bp)); 25989 } else { 25990 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25991 "no resources for dumping; retrying"); 25992 } 25993 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25994 if (wr_bp->b_flags & B_ERROR) { 25995 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25996 "no resources for dumping; error code: " 25997 "0x%x, retrying\n", geterror(wr_bp)); 25998 } 25999 } else { 26000 if (wr_bp->b_flags & B_ERROR) { 26001 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26002 "no resources for dumping; " 26003 "error code: 0x%x, retries failed, " 26004 "giving up.\n", geterror(wr_bp)); 26005 } else { 26006 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26007 "no resources for dumping; " 26008 "retries failed, giving up.\n"); 26009 } 26010 mutex_enter(SD_MUTEX(un)); 26011 Restore_state(un); 26012 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 26013 mutex_exit(SD_MUTEX(un)); 26014 scsi_free_consistent_buf(wr_bp); 26015 } else { 26016 mutex_exit(SD_MUTEX(un)); 26017 } 26018 return (EIO); 26019 } 26020 drv_usecwait(10000); 26021 } 26022 26023 if (un->un_partial_dma_supported == 1) { 26024 /* 26025 * save the resid from PARTIAL_DMA 26026 */ 26027 dma_resid = wr_pktp->pkt_resid; 26028 if (dma_resid != 0) 26029 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 26030 wr_pktp->pkt_resid = 0; 26031 } else { 26032 dma_resid = 0; 26033 } 26034 26035 /* SunBug 1222170 */ 26036 wr_pktp->pkt_flags = FLAG_NOINTR; 26037 26038 err = EIO; 26039 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26040 26041 /* 26042 * Scsi_poll returns 0 (success) if the command completes and 26043 * the status block is STATUS_GOOD. We should only check 26044 * errors if this condition is not true. Even then we should 26045 * send our own request sense packet only if we have a check 26046 * condition and auto request sense has not been performed by 26047 * the hba. 26048 */ 26049 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 26050 26051 if ((sd_scsi_poll(un, wr_pktp) == 0) && 26052 (wr_pktp->pkt_resid == 0)) { 26053 err = SD_SUCCESS; 26054 break; 26055 } 26056 26057 /* 26058 * Check CMD_DEV_GONE 1st, give up if device is gone. 26059 */ 26060 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 26061 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26062 "Error while dumping state...Device is gone\n"); 26063 break; 26064 } 26065 26066 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 26067 SD_INFO(SD_LOG_DUMP, un, 26068 "sddump: write failed with CHECK, try # %d\n", i); 26069 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 26070 (void) sd_send_polled_RQS(un); 26071 } 26072 26073 continue; 26074 } 26075 26076 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 26077 int reset_retval = 0; 26078 26079 SD_INFO(SD_LOG_DUMP, un, 26080 "sddump: write failed with BUSY, try # %d\n", i); 26081 26082 if (un->un_f_lun_reset_enabled == TRUE) { 26083 reset_retval = scsi_reset(SD_ADDRESS(un), 26084 RESET_LUN); 26085 } 26086 if (reset_retval == 0) { 26087 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26088 } 26089 (void) sd_send_polled_RQS(un); 26090 26091 } else { 26092 SD_INFO(SD_LOG_DUMP, un, 26093 "sddump: write failed with 0x%x, try # %d\n", 26094 SD_GET_PKT_STATUS(wr_pktp), i); 26095 mutex_enter(SD_MUTEX(un)); 26096 sd_reset_target(un, wr_pktp); 26097 mutex_exit(SD_MUTEX(un)); 26098 } 26099 26100 /* 26101 * If we are not getting anywhere with lun/target resets, 26102 * let's reset the bus. 26103 */ 26104 if (i == SD_NDUMP_RETRIES/2) { 26105 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26106 (void) sd_send_polled_RQS(un); 26107 } 26108 } 26109 } 26110 26111 scsi_destroy_pkt(wr_pktp); 26112 mutex_enter(SD_MUTEX(un)); 26113 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 26114 mutex_exit(SD_MUTEX(un)); 26115 scsi_free_consistent_buf(wr_bp); 26116 } else { 26117 mutex_exit(SD_MUTEX(un)); 26118 } 26119 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 26120 return (err); 26121 } 26122 26123 /* 26124 * Function: sd_scsi_poll() 26125 * 26126 * Description: This is a wrapper for the scsi_poll call. 26127 * 26128 * Arguments: sd_lun - The unit structure 26129 * scsi_pkt - The scsi packet being sent to the device. 26130 * 26131 * Return Code: 0 - Command completed successfully with good status 26132 * -1 - Command failed. This could indicate a check condition 26133 * or other status value requiring recovery action. 26134 * 26135 * NOTE: This code is only called off sddump(). 26136 */ 26137 26138 static int 26139 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 26140 { 26141 int status; 26142 26143 ASSERT(un != NULL); 26144 ASSERT(!mutex_owned(SD_MUTEX(un))); 26145 ASSERT(pktp != NULL); 26146 26147 status = SD_SUCCESS; 26148 26149 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 26150 pktp->pkt_flags |= un->un_tagflags; 26151 pktp->pkt_flags &= ~FLAG_NODISCON; 26152 } 26153 26154 status = sd_ddi_scsi_poll(pktp); 26155 /* 26156 * Scsi_poll returns 0 (success) if the command completes and the 26157 * status block is STATUS_GOOD. We should only check errors if this 26158 * condition is not true. Even then we should send our own request 26159 * sense packet only if we have a check condition and auto 26160 * request sense has not been performed by the hba. 26161 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 26162 */ 26163 if ((status != SD_SUCCESS) && 26164 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 26165 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 26166 (pktp->pkt_reason != CMD_DEV_GONE)) 26167 (void) sd_send_polled_RQS(un); 26168 26169 return (status); 26170 } 26171 26172 /* 26173 * Function: sd_send_polled_RQS() 26174 * 26175 * Description: This sends the request sense command to a device. 26176 * 26177 * Arguments: sd_lun - The unit structure 26178 * 26179 * Return Code: 0 - Command completed successfully with good status 26180 * -1 - Command failed. 26181 * 26182 */ 26183 26184 static int 26185 sd_send_polled_RQS(struct sd_lun *un) 26186 { 26187 int ret_val; 26188 struct scsi_pkt *rqs_pktp; 26189 struct buf *rqs_bp; 26190 26191 ASSERT(un != NULL); 26192 ASSERT(!mutex_owned(SD_MUTEX(un))); 26193 26194 ret_val = SD_SUCCESS; 26195 26196 rqs_pktp = un->un_rqs_pktp; 26197 rqs_bp = un->un_rqs_bp; 26198 26199 mutex_enter(SD_MUTEX(un)); 26200 26201 if (un->un_sense_isbusy) { 26202 ret_val = SD_FAILURE; 26203 mutex_exit(SD_MUTEX(un)); 26204 return (ret_val); 26205 } 26206 26207 /* 26208 * If the request sense buffer (and packet) is not in use, 26209 * let's set the un_sense_isbusy and send our packet 26210 */ 26211 un->un_sense_isbusy = 1; 26212 rqs_pktp->pkt_resid = 0; 26213 rqs_pktp->pkt_reason = 0; 26214 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26215 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26216 26217 mutex_exit(SD_MUTEX(un)); 26218 26219 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26220 " 0x%p\n", rqs_bp->b_un.b_addr); 26221 26222 /* 26223 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26224 * axle - it has a call into us! 26225 */ 26226 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26227 SD_INFO(SD_LOG_COMMON, un, 26228 "sd_send_polled_RQS: RQS failed\n"); 26229 } 26230 26231 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26232 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26233 26234 mutex_enter(SD_MUTEX(un)); 26235 un->un_sense_isbusy = 0; 26236 mutex_exit(SD_MUTEX(un)); 26237 26238 return (ret_val); 26239 } 26240 26241 /* 26242 * Defines needed for localized version of the scsi_poll routine. 26243 */ 26244 #define CSEC 10000 /* usecs */ 26245 #define SEC_TO_CSEC (1000000/CSEC) 26246 26247 /* 26248 * Function: sd_ddi_scsi_poll() 26249 * 26250 * Description: Localized version of the scsi_poll routine. The purpose is to 26251 * send a scsi_pkt to a device as a polled command. This version 26252 * is to ensure more robust handling of transport errors. 26253 * Specifically this routine cures not ready, coming ready 26254 * transition for power up and reset of sonoma's. This can take 26255 * up to 45 seconds for power-on and 20 seconds for reset of a 26256 * sonoma lun. 26257 * 26258 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26259 * 26260 * Return Code: 0 - Command completed successfully with good status 26261 * -1 - Command failed. 26262 * 26263 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 26264 * be fixed (removing this code), we need to determine how to handle the 26265 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 26266 * 26267 * NOTE: This code is only called off sddump(). 26268 */ 26269 static int 26270 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26271 { 26272 int rval = -1; 26273 int savef; 26274 long savet; 26275 void (*savec)(); 26276 int timeout; 26277 int busy_count; 26278 int poll_delay; 26279 int rc; 26280 uint8_t *sensep; 26281 struct scsi_arq_status *arqstat; 26282 extern int do_polled_io; 26283 26284 ASSERT(pkt->pkt_scbp); 26285 26286 /* 26287 * save old flags.. 26288 */ 26289 savef = pkt->pkt_flags; 26290 savec = pkt->pkt_comp; 26291 savet = pkt->pkt_time; 26292 26293 pkt->pkt_flags |= FLAG_NOINTR; 26294 26295 /* 26296 * XXX there is nothing in the SCSA spec that states that we should not 26297 * do a callback for polled cmds; however, removing this will break sd 26298 * and probably other target drivers 26299 */ 26300 pkt->pkt_comp = NULL; 26301 26302 /* 26303 * we don't like a polled command without timeout. 26304 * 60 seconds seems long enough. 26305 */ 26306 if (pkt->pkt_time == 0) 26307 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26308 26309 /* 26310 * Send polled cmd. 26311 * 26312 * We do some error recovery for various errors. Tran_busy, 26313 * queue full, and non-dispatched commands are retried every 10 msec. 26314 * as they are typically transient failures. Busy status and Not 26315 * Ready are retried every second as this status takes a while to 26316 * change. 26317 */ 26318 timeout = pkt->pkt_time * SEC_TO_CSEC; 26319 26320 for (busy_count = 0; busy_count < timeout; busy_count++) { 26321 /* 26322 * Initialize pkt status variables. 26323 */ 26324 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26325 26326 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26327 if (rc != TRAN_BUSY) { 26328 /* Transport failed - give up. */ 26329 break; 26330 } else { 26331 /* Transport busy - try again. */ 26332 poll_delay = 1 * CSEC; /* 10 msec. */ 26333 } 26334 } else { 26335 /* 26336 * Transport accepted - check pkt status. 26337 */ 26338 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26339 if ((pkt->pkt_reason == CMD_CMPLT) && 26340 (rc == STATUS_CHECK) && 26341 (pkt->pkt_state & STATE_ARQ_DONE)) { 26342 arqstat = 26343 (struct scsi_arq_status *)(pkt->pkt_scbp); 26344 sensep = (uint8_t *)&arqstat->sts_sensedata; 26345 } else { 26346 sensep = NULL; 26347 } 26348 26349 if ((pkt->pkt_reason == CMD_CMPLT) && 26350 (rc == STATUS_GOOD)) { 26351 /* No error - we're done */ 26352 rval = 0; 26353 break; 26354 26355 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26356 /* Lost connection - give up */ 26357 break; 26358 26359 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26360 (pkt->pkt_state == 0)) { 26361 /* Pkt not dispatched - try again. */ 26362 poll_delay = 1 * CSEC; /* 10 msec. */ 26363 26364 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26365 (rc == STATUS_QFULL)) { 26366 /* Queue full - try again. */ 26367 poll_delay = 1 * CSEC; /* 10 msec. */ 26368 26369 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26370 (rc == STATUS_BUSY)) { 26371 /* Busy - try again. */ 26372 poll_delay = 100 * CSEC; /* 1 sec. */ 26373 busy_count += (SEC_TO_CSEC - 1); 26374 26375 } else if ((sensep != NULL) && 26376 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 26377 /* 26378 * Unit Attention - try again. 26379 * Pretend it took 1 sec. 26380 * NOTE: 'continue' avoids poll_delay 26381 */ 26382 busy_count += (SEC_TO_CSEC - 1); 26383 continue; 26384 26385 } else if ((sensep != NULL) && 26386 (scsi_sense_key(sensep) == KEY_NOT_READY) && 26387 (scsi_sense_asc(sensep) == 0x04) && 26388 (scsi_sense_ascq(sensep) == 0x01)) { 26389 /* 26390 * Not ready -> ready - try again. 26391 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 26392 * ...same as STATUS_BUSY 26393 */ 26394 poll_delay = 100 * CSEC; /* 1 sec. */ 26395 busy_count += (SEC_TO_CSEC - 1); 26396 26397 } else { 26398 /* BAD status - give up. */ 26399 break; 26400 } 26401 } 26402 26403 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 26404 !do_polled_io) { 26405 delay(drv_usectohz(poll_delay)); 26406 } else { 26407 /* we busy wait during cpr_dump or interrupt threads */ 26408 drv_usecwait(poll_delay); 26409 } 26410 } 26411 26412 pkt->pkt_flags = savef; 26413 pkt->pkt_comp = savec; 26414 pkt->pkt_time = savet; 26415 26416 /* return on error */ 26417 if (rval) 26418 return (rval); 26419 26420 /* 26421 * This is not a performance critical code path. 26422 * 26423 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26424 * issues associated with looking at DMA memory prior to 26425 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26426 */ 26427 scsi_sync_pkt(pkt); 26428 return (0); 26429 } 26430 26431 26432 26433 /* 26434 * Function: sd_persistent_reservation_in_read_keys 26435 * 26436 * Description: This routine is the driver entry point for handling CD-ROM 26437 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26438 * by sending the SCSI-3 PRIN commands to the device. 26439 * Processes the read keys command response by copying the 26440 * reservation key information into the user provided buffer. 26441 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26442 * 26443 * Arguments: un - Pointer to soft state struct for the target. 26444 * usrp - user provided pointer to multihost Persistent In Read 26445 * Keys structure (mhioc_inkeys_t) 26446 * flag - this argument is a pass through to ddi_copyxxx() 26447 * directly from the mode argument of ioctl(). 26448 * 26449 * Return Code: 0 - Success 26450 * EACCES 26451 * ENOTSUP 26452 * errno return code from sd_send_scsi_cmd() 26453 * 26454 * Context: Can sleep. Does not return until command is completed. 26455 */ 26456 26457 static int 26458 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26459 mhioc_inkeys_t *usrp, int flag) 26460 { 26461 #ifdef _MULTI_DATAMODEL 26462 struct mhioc_key_list32 li32; 26463 #endif 26464 sd_prin_readkeys_t *in; 26465 mhioc_inkeys_t *ptr; 26466 mhioc_key_list_t li; 26467 uchar_t *data_bufp; 26468 int data_len; 26469 int rval = 0; 26470 size_t copysz; 26471 sd_ssc_t *ssc; 26472 26473 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26474 return (EINVAL); 26475 } 26476 bzero(&li, sizeof (mhioc_key_list_t)); 26477 26478 ssc = sd_ssc_init(un); 26479 26480 /* 26481 * Get the listsize from user 26482 */ 26483 #ifdef _MULTI_DATAMODEL 26484 26485 switch (ddi_model_convert_from(flag & FMODELS)) { 26486 case DDI_MODEL_ILP32: 26487 copysz = sizeof (struct mhioc_key_list32); 26488 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26489 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26490 "sd_persistent_reservation_in_read_keys: " 26491 "failed ddi_copyin: mhioc_key_list32_t\n"); 26492 rval = EFAULT; 26493 goto done; 26494 } 26495 li.listsize = li32.listsize; 26496 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26497 break; 26498 26499 case DDI_MODEL_NONE: 26500 copysz = sizeof (mhioc_key_list_t); 26501 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26502 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26503 "sd_persistent_reservation_in_read_keys: " 26504 "failed ddi_copyin: mhioc_key_list_t\n"); 26505 rval = EFAULT; 26506 goto done; 26507 } 26508 break; 26509 } 26510 26511 #else /* ! _MULTI_DATAMODEL */ 26512 copysz = sizeof (mhioc_key_list_t); 26513 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26514 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26515 "sd_persistent_reservation_in_read_keys: " 26516 "failed ddi_copyin: mhioc_key_list_t\n"); 26517 rval = EFAULT; 26518 goto done; 26519 } 26520 #endif 26521 26522 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26523 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26524 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26525 26526 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26527 data_len, data_bufp); 26528 if (rval != 0) { 26529 if (rval == EIO) 26530 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26531 else 26532 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26533 goto done; 26534 } 26535 in = (sd_prin_readkeys_t *)data_bufp; 26536 ptr->generation = BE_32(in->generation); 26537 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26538 26539 /* 26540 * Return the min(listsize, listlen) keys 26541 */ 26542 #ifdef _MULTI_DATAMODEL 26543 26544 switch (ddi_model_convert_from(flag & FMODELS)) { 26545 case DDI_MODEL_ILP32: 26546 li32.listlen = li.listlen; 26547 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26548 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26549 "sd_persistent_reservation_in_read_keys: " 26550 "failed ddi_copyout: mhioc_key_list32_t\n"); 26551 rval = EFAULT; 26552 goto done; 26553 } 26554 break; 26555 26556 case DDI_MODEL_NONE: 26557 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26558 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26559 "sd_persistent_reservation_in_read_keys: " 26560 "failed ddi_copyout: mhioc_key_list_t\n"); 26561 rval = EFAULT; 26562 goto done; 26563 } 26564 break; 26565 } 26566 26567 #else /* ! _MULTI_DATAMODEL */ 26568 26569 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26570 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26571 "sd_persistent_reservation_in_read_keys: " 26572 "failed ddi_copyout: mhioc_key_list_t\n"); 26573 rval = EFAULT; 26574 goto done; 26575 } 26576 26577 #endif /* _MULTI_DATAMODEL */ 26578 26579 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26580 li.listsize * MHIOC_RESV_KEY_SIZE); 26581 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26582 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26583 "sd_persistent_reservation_in_read_keys: " 26584 "failed ddi_copyout: keylist\n"); 26585 rval = EFAULT; 26586 } 26587 done: 26588 sd_ssc_fini(ssc); 26589 kmem_free(data_bufp, data_len); 26590 return (rval); 26591 } 26592 26593 26594 /* 26595 * Function: sd_persistent_reservation_in_read_resv 26596 * 26597 * Description: This routine is the driver entry point for handling CD-ROM 26598 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26599 * by sending the SCSI-3 PRIN commands to the device. 26600 * Process the read persistent reservations command response by 26601 * copying the reservation information into the user provided 26602 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26603 * 26604 * Arguments: un - Pointer to soft state struct for the target. 26605 * usrp - user provided pointer to multihost Persistent In Read 26606 * Keys structure (mhioc_inkeys_t) 26607 * flag - this argument is a pass through to ddi_copyxxx() 26608 * directly from the mode argument of ioctl(). 26609 * 26610 * Return Code: 0 - Success 26611 * EACCES 26612 * ENOTSUP 26613 * errno return code from sd_send_scsi_cmd() 26614 * 26615 * Context: Can sleep. Does not return until command is completed. 26616 */ 26617 26618 static int 26619 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26620 mhioc_inresvs_t *usrp, int flag) 26621 { 26622 #ifdef _MULTI_DATAMODEL 26623 struct mhioc_resv_desc_list32 resvlist32; 26624 #endif 26625 sd_prin_readresv_t *in; 26626 mhioc_inresvs_t *ptr; 26627 sd_readresv_desc_t *readresv_ptr; 26628 mhioc_resv_desc_list_t resvlist; 26629 mhioc_resv_desc_t resvdesc; 26630 uchar_t *data_bufp = NULL; 26631 int data_len; 26632 int rval = 0; 26633 int i; 26634 size_t copysz; 26635 mhioc_resv_desc_t *bufp; 26636 sd_ssc_t *ssc; 26637 26638 if ((ptr = usrp) == NULL) { 26639 return (EINVAL); 26640 } 26641 26642 ssc = sd_ssc_init(un); 26643 26644 /* 26645 * Get the listsize from user 26646 */ 26647 #ifdef _MULTI_DATAMODEL 26648 switch (ddi_model_convert_from(flag & FMODELS)) { 26649 case DDI_MODEL_ILP32: 26650 copysz = sizeof (struct mhioc_resv_desc_list32); 26651 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26652 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26653 "sd_persistent_reservation_in_read_resv: " 26654 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26655 rval = EFAULT; 26656 goto done; 26657 } 26658 resvlist.listsize = resvlist32.listsize; 26659 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26660 break; 26661 26662 case DDI_MODEL_NONE: 26663 copysz = sizeof (mhioc_resv_desc_list_t); 26664 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26665 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26666 "sd_persistent_reservation_in_read_resv: " 26667 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26668 rval = EFAULT; 26669 goto done; 26670 } 26671 break; 26672 } 26673 #else /* ! _MULTI_DATAMODEL */ 26674 copysz = sizeof (mhioc_resv_desc_list_t); 26675 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26676 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26677 "sd_persistent_reservation_in_read_resv: " 26678 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26679 rval = EFAULT; 26680 goto done; 26681 } 26682 #endif /* ! _MULTI_DATAMODEL */ 26683 26684 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26685 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26686 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26687 26688 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26689 data_len, data_bufp); 26690 if (rval != 0) { 26691 if (rval == EIO) 26692 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26693 else 26694 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26695 goto done; 26696 } 26697 in = (sd_prin_readresv_t *)data_bufp; 26698 ptr->generation = BE_32(in->generation); 26699 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26700 26701 /* 26702 * Return the min(listsize, listlen( keys 26703 */ 26704 #ifdef _MULTI_DATAMODEL 26705 26706 switch (ddi_model_convert_from(flag & FMODELS)) { 26707 case DDI_MODEL_ILP32: 26708 resvlist32.listlen = resvlist.listlen; 26709 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26710 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26711 "sd_persistent_reservation_in_read_resv: " 26712 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26713 rval = EFAULT; 26714 goto done; 26715 } 26716 break; 26717 26718 case DDI_MODEL_NONE: 26719 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26720 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26721 "sd_persistent_reservation_in_read_resv: " 26722 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26723 rval = EFAULT; 26724 goto done; 26725 } 26726 break; 26727 } 26728 26729 #else /* ! _MULTI_DATAMODEL */ 26730 26731 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26732 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26733 "sd_persistent_reservation_in_read_resv: " 26734 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26735 rval = EFAULT; 26736 goto done; 26737 } 26738 26739 #endif /* ! _MULTI_DATAMODEL */ 26740 26741 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26742 bufp = resvlist.list; 26743 copysz = sizeof (mhioc_resv_desc_t); 26744 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26745 i++, readresv_ptr++, bufp++) { 26746 26747 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26748 MHIOC_RESV_KEY_SIZE); 26749 resvdesc.type = readresv_ptr->type; 26750 resvdesc.scope = readresv_ptr->scope; 26751 resvdesc.scope_specific_addr = 26752 BE_32(readresv_ptr->scope_specific_addr); 26753 26754 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26755 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26756 "sd_persistent_reservation_in_read_resv: " 26757 "failed ddi_copyout: resvlist\n"); 26758 rval = EFAULT; 26759 goto done; 26760 } 26761 } 26762 done: 26763 sd_ssc_fini(ssc); 26764 /* only if data_bufp is allocated, we need to free it */ 26765 if (data_bufp) { 26766 kmem_free(data_bufp, data_len); 26767 } 26768 return (rval); 26769 } 26770 26771 26772 /* 26773 * Function: sr_change_blkmode() 26774 * 26775 * Description: This routine is the driver entry point for handling CD-ROM 26776 * block mode ioctl requests. Support for returning and changing 26777 * the current block size in use by the device is implemented. The 26778 * LBA size is changed via a MODE SELECT Block Descriptor. 26779 * 26780 * This routine issues a mode sense with an allocation length of 26781 * 12 bytes for the mode page header and a single block descriptor. 26782 * 26783 * Arguments: dev - the device 'dev_t' 26784 * cmd - the request type; one of CDROMGBLKMODE (get) or 26785 * CDROMSBLKMODE (set) 26786 * data - current block size or requested block size 26787 * flag - this argument is a pass through to ddi_copyxxx() directly 26788 * from the mode argument of ioctl(). 26789 * 26790 * Return Code: the code returned by sd_send_scsi_cmd() 26791 * EINVAL if invalid arguments are provided 26792 * EFAULT if ddi_copyxxx() fails 26793 * ENXIO if fail ddi_get_soft_state 26794 * EIO if invalid mode sense block descriptor length 26795 * 26796 */ 26797 26798 static int 26799 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26800 { 26801 struct sd_lun *un = NULL; 26802 struct mode_header *sense_mhp, *select_mhp; 26803 struct block_descriptor *sense_desc, *select_desc; 26804 int current_bsize; 26805 int rval = EINVAL; 26806 uchar_t *sense = NULL; 26807 uchar_t *select = NULL; 26808 sd_ssc_t *ssc; 26809 26810 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26811 26812 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26813 return (ENXIO); 26814 } 26815 26816 /* 26817 * The block length is changed via the Mode Select block descriptor, the 26818 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26819 * required as part of this routine. Therefore the mode sense allocation 26820 * length is specified to be the length of a mode page header and a 26821 * block descriptor. 26822 */ 26823 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26824 26825 ssc = sd_ssc_init(un); 26826 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26827 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26828 sd_ssc_fini(ssc); 26829 if (rval != 0) { 26830 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26831 "sr_change_blkmode: Mode Sense Failed\n"); 26832 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26833 return (rval); 26834 } 26835 26836 /* Check the block descriptor len to handle only 1 block descriptor */ 26837 sense_mhp = (struct mode_header *)sense; 26838 if ((sense_mhp->bdesc_length == 0) || 26839 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26840 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26841 "sr_change_blkmode: Mode Sense returned invalid block" 26842 " descriptor length\n"); 26843 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26844 return (EIO); 26845 } 26846 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26847 current_bsize = ((sense_desc->blksize_hi << 16) | 26848 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26849 26850 /* Process command */ 26851 switch (cmd) { 26852 case CDROMGBLKMODE: 26853 /* Return the block size obtained during the mode sense */ 26854 if (ddi_copyout(¤t_bsize, (void *)data, 26855 sizeof (int), flag) != 0) 26856 rval = EFAULT; 26857 break; 26858 case CDROMSBLKMODE: 26859 /* Validate the requested block size */ 26860 switch (data) { 26861 case CDROM_BLK_512: 26862 case CDROM_BLK_1024: 26863 case CDROM_BLK_2048: 26864 case CDROM_BLK_2056: 26865 case CDROM_BLK_2336: 26866 case CDROM_BLK_2340: 26867 case CDROM_BLK_2352: 26868 case CDROM_BLK_2368: 26869 case CDROM_BLK_2448: 26870 case CDROM_BLK_2646: 26871 case CDROM_BLK_2647: 26872 break; 26873 default: 26874 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26875 "sr_change_blkmode: " 26876 "Block Size '%ld' Not Supported\n", data); 26877 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26878 return (EINVAL); 26879 } 26880 26881 /* 26882 * The current block size matches the requested block size so 26883 * there is no need to send the mode select to change the size 26884 */ 26885 if (current_bsize == data) { 26886 break; 26887 } 26888 26889 /* Build the select data for the requested block size */ 26890 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26891 select_mhp = (struct mode_header *)select; 26892 select_desc = 26893 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26894 /* 26895 * The LBA size is changed via the block descriptor, so the 26896 * descriptor is built according to the user data 26897 */ 26898 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26899 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26900 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26901 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26902 26903 /* Send the mode select for the requested block size */ 26904 ssc = sd_ssc_init(un); 26905 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26906 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26907 SD_PATH_STANDARD); 26908 sd_ssc_fini(ssc); 26909 if (rval != 0) { 26910 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26911 "sr_change_blkmode: Mode Select Failed\n"); 26912 /* 26913 * The mode select failed for the requested block size, 26914 * so reset the data for the original block size and 26915 * send it to the target. The error is indicated by the 26916 * return value for the failed mode select. 26917 */ 26918 select_desc->blksize_hi = sense_desc->blksize_hi; 26919 select_desc->blksize_mid = sense_desc->blksize_mid; 26920 select_desc->blksize_lo = sense_desc->blksize_lo; 26921 ssc = sd_ssc_init(un); 26922 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26923 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26924 SD_PATH_STANDARD); 26925 sd_ssc_fini(ssc); 26926 } else { 26927 ASSERT(!mutex_owned(SD_MUTEX(un))); 26928 mutex_enter(SD_MUTEX(un)); 26929 sd_update_block_info(un, (uint32_t)data, 0); 26930 mutex_exit(SD_MUTEX(un)); 26931 } 26932 break; 26933 default: 26934 /* should not reach here, but check anyway */ 26935 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26936 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26937 rval = EINVAL; 26938 break; 26939 } 26940 26941 if (select) { 26942 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26943 } 26944 if (sense) { 26945 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26946 } 26947 return (rval); 26948 } 26949 26950 26951 /* 26952 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26953 * implement driver support for getting and setting the CD speed. The command 26954 * set used will be based on the device type. If the device has not been 26955 * identified as MMC the Toshiba vendor specific mode page will be used. If 26956 * the device is MMC but does not support the Real Time Streaming feature 26957 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26958 * be used to read the speed. 26959 */ 26960 26961 /* 26962 * Function: sr_change_speed() 26963 * 26964 * Description: This routine is the driver entry point for handling CD-ROM 26965 * drive speed ioctl requests for devices supporting the Toshiba 26966 * vendor specific drive speed mode page. Support for returning 26967 * and changing the current drive speed in use by the device is 26968 * implemented. 26969 * 26970 * Arguments: dev - the device 'dev_t' 26971 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26972 * CDROMSDRVSPEED (set) 26973 * data - current drive speed or requested drive speed 26974 * flag - this argument is a pass through to ddi_copyxxx() directly 26975 * from the mode argument of ioctl(). 26976 * 26977 * Return Code: the code returned by sd_send_scsi_cmd() 26978 * EINVAL if invalid arguments are provided 26979 * EFAULT if ddi_copyxxx() fails 26980 * ENXIO if fail ddi_get_soft_state 26981 * EIO if invalid mode sense block descriptor length 26982 */ 26983 26984 static int 26985 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26986 { 26987 struct sd_lun *un = NULL; 26988 struct mode_header *sense_mhp, *select_mhp; 26989 struct mode_speed *sense_page, *select_page; 26990 int current_speed; 26991 int rval = EINVAL; 26992 int bd_len; 26993 uchar_t *sense = NULL; 26994 uchar_t *select = NULL; 26995 sd_ssc_t *ssc; 26996 26997 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26998 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26999 return (ENXIO); 27000 } 27001 27002 /* 27003 * Note: The drive speed is being modified here according to a Toshiba 27004 * vendor specific mode page (0x31). 27005 */ 27006 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27007 27008 ssc = sd_ssc_init(un); 27009 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27010 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 27011 SD_PATH_STANDARD); 27012 sd_ssc_fini(ssc); 27013 if (rval != 0) { 27014 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27015 "sr_change_speed: Mode Sense Failed\n"); 27016 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27017 return (rval); 27018 } 27019 sense_mhp = (struct mode_header *)sense; 27020 27021 /* Check the block descriptor len to handle only 1 block descriptor */ 27022 bd_len = sense_mhp->bdesc_length; 27023 if (bd_len > MODE_BLK_DESC_LENGTH) { 27024 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27025 "sr_change_speed: Mode Sense returned invalid block " 27026 "descriptor length\n"); 27027 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27028 return (EIO); 27029 } 27030 27031 sense_page = (struct mode_speed *) 27032 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27033 current_speed = sense_page->speed; 27034 27035 /* Process command */ 27036 switch (cmd) { 27037 case CDROMGDRVSPEED: 27038 /* Return the drive speed obtained during the mode sense */ 27039 if (current_speed == 0x2) { 27040 current_speed = CDROM_TWELVE_SPEED; 27041 } 27042 if (ddi_copyout(¤t_speed, (void *)data, 27043 sizeof (int), flag) != 0) { 27044 rval = EFAULT; 27045 } 27046 break; 27047 case CDROMSDRVSPEED: 27048 /* Validate the requested drive speed */ 27049 switch ((uchar_t)data) { 27050 case CDROM_TWELVE_SPEED: 27051 data = 0x2; 27052 /*FALLTHROUGH*/ 27053 case CDROM_NORMAL_SPEED: 27054 case CDROM_DOUBLE_SPEED: 27055 case CDROM_QUAD_SPEED: 27056 case CDROM_MAXIMUM_SPEED: 27057 break; 27058 default: 27059 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27060 "sr_change_speed: " 27061 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 27062 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27063 return (EINVAL); 27064 } 27065 27066 /* 27067 * The current drive speed matches the requested drive speed so 27068 * there is no need to send the mode select to change the speed 27069 */ 27070 if (current_speed == data) { 27071 break; 27072 } 27073 27074 /* Build the select data for the requested drive speed */ 27075 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27076 select_mhp = (struct mode_header *)select; 27077 select_mhp->bdesc_length = 0; 27078 select_page = 27079 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27080 select_page = 27081 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27082 select_page->mode_page.code = CDROM_MODE_SPEED; 27083 select_page->mode_page.length = 2; 27084 select_page->speed = (uchar_t)data; 27085 27086 /* Send the mode select for the requested block size */ 27087 ssc = sd_ssc_init(un); 27088 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27089 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27090 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27091 sd_ssc_fini(ssc); 27092 if (rval != 0) { 27093 /* 27094 * The mode select failed for the requested drive speed, 27095 * so reset the data for the original drive speed and 27096 * send it to the target. The error is indicated by the 27097 * return value for the failed mode select. 27098 */ 27099 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27100 "sr_drive_speed: Mode Select Failed\n"); 27101 select_page->speed = sense_page->speed; 27102 ssc = sd_ssc_init(un); 27103 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27104 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27105 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27106 sd_ssc_fini(ssc); 27107 } 27108 break; 27109 default: 27110 /* should not reach here, but check anyway */ 27111 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27112 "sr_change_speed: Command '%x' Not Supported\n", cmd); 27113 rval = EINVAL; 27114 break; 27115 } 27116 27117 if (select) { 27118 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 27119 } 27120 if (sense) { 27121 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27122 } 27123 27124 return (rval); 27125 } 27126 27127 27128 /* 27129 * Function: sr_atapi_change_speed() 27130 * 27131 * Description: This routine is the driver entry point for handling CD-ROM 27132 * drive speed ioctl requests for MMC devices that do not support 27133 * the Real Time Streaming feature (0x107). 27134 * 27135 * Note: This routine will use the SET SPEED command which may not 27136 * be supported by all devices. 27137 * 27138 * Arguments: dev- the device 'dev_t' 27139 * cmd- the request type; one of CDROMGDRVSPEED (get) or 27140 * CDROMSDRVSPEED (set) 27141 * data- current drive speed or requested drive speed 27142 * flag- this argument is a pass through to ddi_copyxxx() directly 27143 * from the mode argument of ioctl(). 27144 * 27145 * Return Code: the code returned by sd_send_scsi_cmd() 27146 * EINVAL if invalid arguments are provided 27147 * EFAULT if ddi_copyxxx() fails 27148 * ENXIO if fail ddi_get_soft_state 27149 * EIO if invalid mode sense block descriptor length 27150 */ 27151 27152 static int 27153 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27154 { 27155 struct sd_lun *un; 27156 struct uscsi_cmd *com = NULL; 27157 struct mode_header_grp2 *sense_mhp; 27158 uchar_t *sense_page; 27159 uchar_t *sense = NULL; 27160 char cdb[CDB_GROUP5]; 27161 int bd_len; 27162 int current_speed = 0; 27163 int max_speed = 0; 27164 int rval; 27165 sd_ssc_t *ssc; 27166 27167 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27168 27169 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27170 return (ENXIO); 27171 } 27172 27173 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 27174 27175 ssc = sd_ssc_init(un); 27176 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27177 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 27178 SD_PATH_STANDARD); 27179 sd_ssc_fini(ssc); 27180 if (rval != 0) { 27181 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27182 "sr_atapi_change_speed: Mode Sense Failed\n"); 27183 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27184 return (rval); 27185 } 27186 27187 /* Check the block descriptor len to handle only 1 block descriptor */ 27188 sense_mhp = (struct mode_header_grp2 *)sense; 27189 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 27190 if (bd_len > MODE_BLK_DESC_LENGTH) { 27191 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27192 "sr_atapi_change_speed: Mode Sense returned invalid " 27193 "block descriptor length\n"); 27194 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27195 return (EIO); 27196 } 27197 27198 /* Calculate the current and maximum drive speeds */ 27199 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27200 current_speed = (sense_page[14] << 8) | sense_page[15]; 27201 max_speed = (sense_page[8] << 8) | sense_page[9]; 27202 27203 /* Process the command */ 27204 switch (cmd) { 27205 case CDROMGDRVSPEED: 27206 current_speed /= SD_SPEED_1X; 27207 if (ddi_copyout(¤t_speed, (void *)data, 27208 sizeof (int), flag) != 0) 27209 rval = EFAULT; 27210 break; 27211 case CDROMSDRVSPEED: 27212 /* Convert the speed code to KB/sec */ 27213 switch ((uchar_t)data) { 27214 case CDROM_NORMAL_SPEED: 27215 current_speed = SD_SPEED_1X; 27216 break; 27217 case CDROM_DOUBLE_SPEED: 27218 current_speed = 2 * SD_SPEED_1X; 27219 break; 27220 case CDROM_QUAD_SPEED: 27221 current_speed = 4 * SD_SPEED_1X; 27222 break; 27223 case CDROM_TWELVE_SPEED: 27224 current_speed = 12 * SD_SPEED_1X; 27225 break; 27226 case CDROM_MAXIMUM_SPEED: 27227 current_speed = 0xffff; 27228 break; 27229 default: 27230 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27231 "sr_atapi_change_speed: invalid drive speed %d\n", 27232 (uchar_t)data); 27233 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27234 return (EINVAL); 27235 } 27236 27237 /* Check the request against the drive's max speed. */ 27238 if (current_speed != 0xffff) { 27239 if (current_speed > max_speed) { 27240 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27241 return (EINVAL); 27242 } 27243 } 27244 27245 /* 27246 * Build and send the SET SPEED command 27247 * 27248 * Note: The SET SPEED (0xBB) command used in this routine is 27249 * obsolete per the SCSI MMC spec but still supported in the 27250 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27251 * therefore the command is still implemented in this routine. 27252 */ 27253 bzero(cdb, sizeof (cdb)); 27254 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27255 cdb[2] = (uchar_t)(current_speed >> 8); 27256 cdb[3] = (uchar_t)current_speed; 27257 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27258 com->uscsi_cdb = (caddr_t)cdb; 27259 com->uscsi_cdblen = CDB_GROUP5; 27260 com->uscsi_bufaddr = NULL; 27261 com->uscsi_buflen = 0; 27262 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27263 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 27264 break; 27265 default: 27266 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27267 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27268 rval = EINVAL; 27269 } 27270 27271 if (sense) { 27272 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27273 } 27274 if (com) { 27275 kmem_free(com, sizeof (*com)); 27276 } 27277 return (rval); 27278 } 27279 27280 27281 /* 27282 * Function: sr_pause_resume() 27283 * 27284 * Description: This routine is the driver entry point for handling CD-ROM 27285 * pause/resume ioctl requests. This only affects the audio play 27286 * operation. 27287 * 27288 * Arguments: dev - the device 'dev_t' 27289 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27290 * for setting the resume bit of the cdb. 27291 * 27292 * Return Code: the code returned by sd_send_scsi_cmd() 27293 * EINVAL if invalid mode specified 27294 * 27295 */ 27296 27297 static int 27298 sr_pause_resume(dev_t dev, int cmd) 27299 { 27300 struct sd_lun *un; 27301 struct uscsi_cmd *com; 27302 char cdb[CDB_GROUP1]; 27303 int rval; 27304 27305 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27306 return (ENXIO); 27307 } 27308 27309 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27310 bzero(cdb, CDB_GROUP1); 27311 cdb[0] = SCMD_PAUSE_RESUME; 27312 switch (cmd) { 27313 case CDROMRESUME: 27314 cdb[8] = 1; 27315 break; 27316 case CDROMPAUSE: 27317 cdb[8] = 0; 27318 break; 27319 default: 27320 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27321 " Command '%x' Not Supported\n", cmd); 27322 rval = EINVAL; 27323 goto done; 27324 } 27325 27326 com->uscsi_cdb = cdb; 27327 com->uscsi_cdblen = CDB_GROUP1; 27328 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27329 27330 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27331 SD_PATH_STANDARD); 27332 27333 done: 27334 kmem_free(com, sizeof (*com)); 27335 return (rval); 27336 } 27337 27338 27339 /* 27340 * Function: sr_play_msf() 27341 * 27342 * Description: This routine is the driver entry point for handling CD-ROM 27343 * ioctl requests to output the audio signals at the specified 27344 * starting address and continue the audio play until the specified 27345 * ending address (CDROMPLAYMSF) The address is in Minute Second 27346 * Frame (MSF) format. 27347 * 27348 * Arguments: dev - the device 'dev_t' 27349 * data - pointer to user provided audio msf structure, 27350 * specifying start/end addresses. 27351 * flag - this argument is a pass through to ddi_copyxxx() 27352 * directly from the mode argument of ioctl(). 27353 * 27354 * Return Code: the code returned by sd_send_scsi_cmd() 27355 * EFAULT if ddi_copyxxx() fails 27356 * ENXIO if fail ddi_get_soft_state 27357 * EINVAL if data pointer is NULL 27358 */ 27359 27360 static int 27361 sr_play_msf(dev_t dev, caddr_t data, int flag) 27362 { 27363 struct sd_lun *un; 27364 struct uscsi_cmd *com; 27365 struct cdrom_msf msf_struct; 27366 struct cdrom_msf *msf = &msf_struct; 27367 char cdb[CDB_GROUP1]; 27368 int rval; 27369 27370 if (data == NULL) { 27371 return (EINVAL); 27372 } 27373 27374 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27375 return (ENXIO); 27376 } 27377 27378 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27379 return (EFAULT); 27380 } 27381 27382 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27383 bzero(cdb, CDB_GROUP1); 27384 cdb[0] = SCMD_PLAYAUDIO_MSF; 27385 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27386 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27387 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27388 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27389 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27390 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27391 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27392 } else { 27393 cdb[3] = msf->cdmsf_min0; 27394 cdb[4] = msf->cdmsf_sec0; 27395 cdb[5] = msf->cdmsf_frame0; 27396 cdb[6] = msf->cdmsf_min1; 27397 cdb[7] = msf->cdmsf_sec1; 27398 cdb[8] = msf->cdmsf_frame1; 27399 } 27400 com->uscsi_cdb = cdb; 27401 com->uscsi_cdblen = CDB_GROUP1; 27402 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27403 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27404 SD_PATH_STANDARD); 27405 kmem_free(com, sizeof (*com)); 27406 return (rval); 27407 } 27408 27409 27410 /* 27411 * Function: sr_play_trkind() 27412 * 27413 * Description: This routine is the driver entry point for handling CD-ROM 27414 * ioctl requests to output the audio signals at the specified 27415 * starting address and continue the audio play until the specified 27416 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27417 * format. 27418 * 27419 * Arguments: dev - the device 'dev_t' 27420 * data - pointer to user provided audio track/index structure, 27421 * specifying start/end addresses. 27422 * flag - this argument is a pass through to ddi_copyxxx() 27423 * directly from the mode argument of ioctl(). 27424 * 27425 * Return Code: the code returned by sd_send_scsi_cmd() 27426 * EFAULT if ddi_copyxxx() fails 27427 * ENXIO if fail ddi_get_soft_state 27428 * EINVAL if data pointer is NULL 27429 */ 27430 27431 static int 27432 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27433 { 27434 struct cdrom_ti ti_struct; 27435 struct cdrom_ti *ti = &ti_struct; 27436 struct uscsi_cmd *com = NULL; 27437 char cdb[CDB_GROUP1]; 27438 int rval; 27439 27440 if (data == NULL) { 27441 return (EINVAL); 27442 } 27443 27444 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27445 return (EFAULT); 27446 } 27447 27448 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27449 bzero(cdb, CDB_GROUP1); 27450 cdb[0] = SCMD_PLAYAUDIO_TI; 27451 cdb[4] = ti->cdti_trk0; 27452 cdb[5] = ti->cdti_ind0; 27453 cdb[7] = ti->cdti_trk1; 27454 cdb[8] = ti->cdti_ind1; 27455 com->uscsi_cdb = cdb; 27456 com->uscsi_cdblen = CDB_GROUP1; 27457 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27458 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27459 SD_PATH_STANDARD); 27460 kmem_free(com, sizeof (*com)); 27461 return (rval); 27462 } 27463 27464 27465 /* 27466 * Function: sr_read_all_subcodes() 27467 * 27468 * Description: This routine is the driver entry point for handling CD-ROM 27469 * ioctl requests to return raw subcode data while the target is 27470 * playing audio (CDROMSUBCODE). 27471 * 27472 * Arguments: dev - the device 'dev_t' 27473 * data - pointer to user provided cdrom subcode structure, 27474 * specifying the transfer length and address. 27475 * flag - this argument is a pass through to ddi_copyxxx() 27476 * directly from the mode argument of ioctl(). 27477 * 27478 * Return Code: the code returned by sd_send_scsi_cmd() 27479 * EFAULT if ddi_copyxxx() fails 27480 * ENXIO if fail ddi_get_soft_state 27481 * EINVAL if data pointer is NULL 27482 */ 27483 27484 static int 27485 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27486 { 27487 struct sd_lun *un = NULL; 27488 struct uscsi_cmd *com = NULL; 27489 struct cdrom_subcode *subcode = NULL; 27490 int rval; 27491 size_t buflen; 27492 char cdb[CDB_GROUP5]; 27493 27494 #ifdef _MULTI_DATAMODEL 27495 /* To support ILP32 applications in an LP64 world */ 27496 struct cdrom_subcode32 cdrom_subcode32; 27497 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27498 #endif 27499 if (data == NULL) { 27500 return (EINVAL); 27501 } 27502 27503 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27504 return (ENXIO); 27505 } 27506 27507 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27508 27509 #ifdef _MULTI_DATAMODEL 27510 switch (ddi_model_convert_from(flag & FMODELS)) { 27511 case DDI_MODEL_ILP32: 27512 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27513 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27514 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27515 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27516 return (EFAULT); 27517 } 27518 /* Convert the ILP32 uscsi data from the application to LP64 */ 27519 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27520 break; 27521 case DDI_MODEL_NONE: 27522 if (ddi_copyin(data, subcode, 27523 sizeof (struct cdrom_subcode), flag)) { 27524 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27525 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27526 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27527 return (EFAULT); 27528 } 27529 break; 27530 } 27531 #else /* ! _MULTI_DATAMODEL */ 27532 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27533 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27534 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27535 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27536 return (EFAULT); 27537 } 27538 #endif /* _MULTI_DATAMODEL */ 27539 27540 /* 27541 * Since MMC-2 expects max 3 bytes for length, check if the 27542 * length input is greater than 3 bytes 27543 */ 27544 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27545 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27546 "sr_read_all_subcodes: " 27547 "cdrom transfer length too large: %d (limit %d)\n", 27548 subcode->cdsc_length, 0xFFFFFF); 27549 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27550 return (EINVAL); 27551 } 27552 27553 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27554 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27555 bzero(cdb, CDB_GROUP5); 27556 27557 if (un->un_f_mmc_cap == TRUE) { 27558 cdb[0] = (char)SCMD_READ_CD; 27559 cdb[2] = (char)0xff; 27560 cdb[3] = (char)0xff; 27561 cdb[4] = (char)0xff; 27562 cdb[5] = (char)0xff; 27563 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27564 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27565 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27566 cdb[10] = 1; 27567 } else { 27568 /* 27569 * Note: A vendor specific command (0xDF) is being used her to 27570 * request a read of all subcodes. 27571 */ 27572 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27573 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27574 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27575 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27576 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27577 } 27578 com->uscsi_cdb = cdb; 27579 com->uscsi_cdblen = CDB_GROUP5; 27580 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27581 com->uscsi_buflen = buflen; 27582 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27583 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27584 SD_PATH_STANDARD); 27585 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27586 kmem_free(com, sizeof (*com)); 27587 return (rval); 27588 } 27589 27590 27591 /* 27592 * Function: sr_read_subchannel() 27593 * 27594 * Description: This routine is the driver entry point for handling CD-ROM 27595 * ioctl requests to return the Q sub-channel data of the CD 27596 * current position block. (CDROMSUBCHNL) The data includes the 27597 * track number, index number, absolute CD-ROM address (LBA or MSF 27598 * format per the user) , track relative CD-ROM address (LBA or MSF 27599 * format per the user), control data and audio status. 27600 * 27601 * Arguments: dev - the device 'dev_t' 27602 * data - pointer to user provided cdrom sub-channel structure 27603 * flag - this argument is a pass through to ddi_copyxxx() 27604 * directly from the mode argument of ioctl(). 27605 * 27606 * Return Code: the code returned by sd_send_scsi_cmd() 27607 * EFAULT if ddi_copyxxx() fails 27608 * ENXIO if fail ddi_get_soft_state 27609 * EINVAL if data pointer is NULL 27610 */ 27611 27612 static int 27613 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27614 { 27615 struct sd_lun *un; 27616 struct uscsi_cmd *com; 27617 struct cdrom_subchnl subchanel; 27618 struct cdrom_subchnl *subchnl = &subchanel; 27619 char cdb[CDB_GROUP1]; 27620 caddr_t buffer; 27621 int rval; 27622 27623 if (data == NULL) { 27624 return (EINVAL); 27625 } 27626 27627 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27628 (un->un_state == SD_STATE_OFFLINE)) { 27629 return (ENXIO); 27630 } 27631 27632 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27633 return (EFAULT); 27634 } 27635 27636 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27637 bzero(cdb, CDB_GROUP1); 27638 cdb[0] = SCMD_READ_SUBCHANNEL; 27639 /* Set the MSF bit based on the user requested address format */ 27640 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27641 /* 27642 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27643 * returned 27644 */ 27645 cdb[2] = 0x40; 27646 /* 27647 * Set byte 3 to specify the return data format. A value of 0x01 27648 * indicates that the CD-ROM current position should be returned. 27649 */ 27650 cdb[3] = 0x01; 27651 cdb[8] = 0x10; 27652 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27653 com->uscsi_cdb = cdb; 27654 com->uscsi_cdblen = CDB_GROUP1; 27655 com->uscsi_bufaddr = buffer; 27656 com->uscsi_buflen = 16; 27657 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27658 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27659 SD_PATH_STANDARD); 27660 if (rval != 0) { 27661 kmem_free(buffer, 16); 27662 kmem_free(com, sizeof (*com)); 27663 return (rval); 27664 } 27665 27666 /* Process the returned Q sub-channel data */ 27667 subchnl->cdsc_audiostatus = buffer[1]; 27668 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27669 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27670 subchnl->cdsc_trk = buffer[6]; 27671 subchnl->cdsc_ind = buffer[7]; 27672 if (subchnl->cdsc_format & CDROM_LBA) { 27673 subchnl->cdsc_absaddr.lba = 27674 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27675 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27676 subchnl->cdsc_reladdr.lba = 27677 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27678 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27679 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27680 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27681 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27682 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27683 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27684 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27685 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27686 } else { 27687 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27688 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27689 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27690 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27691 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27692 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27693 } 27694 kmem_free(buffer, 16); 27695 kmem_free(com, sizeof (*com)); 27696 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27697 != 0) { 27698 return (EFAULT); 27699 } 27700 return (rval); 27701 } 27702 27703 27704 /* 27705 * Function: sr_read_tocentry() 27706 * 27707 * Description: This routine is the driver entry point for handling CD-ROM 27708 * ioctl requests to read from the Table of Contents (TOC) 27709 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27710 * fields, the starting address (LBA or MSF format per the user) 27711 * and the data mode if the user specified track is a data track. 27712 * 27713 * Note: The READ HEADER (0x44) command used in this routine is 27714 * obsolete per the SCSI MMC spec but still supported in the 27715 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27716 * therefore the command is still implemented in this routine. 27717 * 27718 * Arguments: dev - the device 'dev_t' 27719 * data - pointer to user provided toc entry structure, 27720 * specifying the track # and the address format 27721 * (LBA or MSF). 27722 * flag - this argument is a pass through to ddi_copyxxx() 27723 * directly from the mode argument of ioctl(). 27724 * 27725 * Return Code: the code returned by sd_send_scsi_cmd() 27726 * EFAULT if ddi_copyxxx() fails 27727 * ENXIO if fail ddi_get_soft_state 27728 * EINVAL if data pointer is NULL 27729 */ 27730 27731 static int 27732 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27733 { 27734 struct sd_lun *un = NULL; 27735 struct uscsi_cmd *com; 27736 struct cdrom_tocentry toc_entry; 27737 struct cdrom_tocentry *entry = &toc_entry; 27738 caddr_t buffer; 27739 int rval; 27740 char cdb[CDB_GROUP1]; 27741 27742 if (data == NULL) { 27743 return (EINVAL); 27744 } 27745 27746 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27747 (un->un_state == SD_STATE_OFFLINE)) { 27748 return (ENXIO); 27749 } 27750 27751 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27752 return (EFAULT); 27753 } 27754 27755 /* Validate the requested track and address format */ 27756 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27757 return (EINVAL); 27758 } 27759 27760 if (entry->cdte_track == 0) { 27761 return (EINVAL); 27762 } 27763 27764 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27765 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27766 bzero(cdb, CDB_GROUP1); 27767 27768 cdb[0] = SCMD_READ_TOC; 27769 /* Set the MSF bit based on the user requested address format */ 27770 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27771 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27772 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27773 } else { 27774 cdb[6] = entry->cdte_track; 27775 } 27776 27777 /* 27778 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27779 * (4 byte TOC response header + 8 byte track descriptor) 27780 */ 27781 cdb[8] = 12; 27782 com->uscsi_cdb = cdb; 27783 com->uscsi_cdblen = CDB_GROUP1; 27784 com->uscsi_bufaddr = buffer; 27785 com->uscsi_buflen = 0x0C; 27786 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27787 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27788 SD_PATH_STANDARD); 27789 if (rval != 0) { 27790 kmem_free(buffer, 12); 27791 kmem_free(com, sizeof (*com)); 27792 return (rval); 27793 } 27794 27795 /* Process the toc entry */ 27796 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27797 entry->cdte_ctrl = (buffer[5] & 0x0F); 27798 if (entry->cdte_format & CDROM_LBA) { 27799 entry->cdte_addr.lba = 27800 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27801 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27802 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27803 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27804 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27805 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27806 /* 27807 * Send a READ TOC command using the LBA address format to get 27808 * the LBA for the track requested so it can be used in the 27809 * READ HEADER request 27810 * 27811 * Note: The MSF bit of the READ HEADER command specifies the 27812 * output format. The block address specified in that command 27813 * must be in LBA format. 27814 */ 27815 cdb[1] = 0; 27816 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27817 SD_PATH_STANDARD); 27818 if (rval != 0) { 27819 kmem_free(buffer, 12); 27820 kmem_free(com, sizeof (*com)); 27821 return (rval); 27822 } 27823 } else { 27824 entry->cdte_addr.msf.minute = buffer[9]; 27825 entry->cdte_addr.msf.second = buffer[10]; 27826 entry->cdte_addr.msf.frame = buffer[11]; 27827 /* 27828 * Send a READ TOC command using the LBA address format to get 27829 * the LBA for the track requested so it can be used in the 27830 * READ HEADER request 27831 * 27832 * Note: The MSF bit of the READ HEADER command specifies the 27833 * output format. The block address specified in that command 27834 * must be in LBA format. 27835 */ 27836 cdb[1] = 0; 27837 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27838 SD_PATH_STANDARD); 27839 if (rval != 0) { 27840 kmem_free(buffer, 12); 27841 kmem_free(com, sizeof (*com)); 27842 return (rval); 27843 } 27844 } 27845 27846 /* 27847 * Build and send the READ HEADER command to determine the data mode of 27848 * the user specified track. 27849 */ 27850 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27851 (entry->cdte_track != CDROM_LEADOUT)) { 27852 bzero(cdb, CDB_GROUP1); 27853 cdb[0] = SCMD_READ_HEADER; 27854 cdb[2] = buffer[8]; 27855 cdb[3] = buffer[9]; 27856 cdb[4] = buffer[10]; 27857 cdb[5] = buffer[11]; 27858 cdb[8] = 0x08; 27859 com->uscsi_buflen = 0x08; 27860 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27861 SD_PATH_STANDARD); 27862 if (rval == 0) { 27863 entry->cdte_datamode = buffer[0]; 27864 } else { 27865 /* 27866 * READ HEADER command failed, since this is 27867 * obsoleted in one spec, its better to return 27868 * -1 for an invlid track so that we can still 27869 * receive the rest of the TOC data. 27870 */ 27871 entry->cdte_datamode = (uchar_t)-1; 27872 } 27873 } else { 27874 entry->cdte_datamode = (uchar_t)-1; 27875 } 27876 27877 kmem_free(buffer, 12); 27878 kmem_free(com, sizeof (*com)); 27879 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27880 return (EFAULT); 27881 27882 return (rval); 27883 } 27884 27885 27886 /* 27887 * Function: sr_read_tochdr() 27888 * 27889 * Description: This routine is the driver entry point for handling CD-ROM 27890 * ioctl requests to read the Table of Contents (TOC) header 27891 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27892 * and ending track numbers 27893 * 27894 * Arguments: dev - the device 'dev_t' 27895 * data - pointer to user provided toc header structure, 27896 * specifying the starting and ending track numbers. 27897 * flag - this argument is a pass through to ddi_copyxxx() 27898 * directly from the mode argument of ioctl(). 27899 * 27900 * Return Code: the code returned by sd_send_scsi_cmd() 27901 * EFAULT if ddi_copyxxx() fails 27902 * ENXIO if fail ddi_get_soft_state 27903 * EINVAL if data pointer is NULL 27904 */ 27905 27906 static int 27907 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27908 { 27909 struct sd_lun *un; 27910 struct uscsi_cmd *com; 27911 struct cdrom_tochdr toc_header; 27912 struct cdrom_tochdr *hdr = &toc_header; 27913 char cdb[CDB_GROUP1]; 27914 int rval; 27915 caddr_t buffer; 27916 27917 if (data == NULL) { 27918 return (EINVAL); 27919 } 27920 27921 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27922 (un->un_state == SD_STATE_OFFLINE)) { 27923 return (ENXIO); 27924 } 27925 27926 buffer = kmem_zalloc(4, KM_SLEEP); 27927 bzero(cdb, CDB_GROUP1); 27928 cdb[0] = SCMD_READ_TOC; 27929 /* 27930 * Specifying a track number of 0x00 in the READ TOC command indicates 27931 * that the TOC header should be returned 27932 */ 27933 cdb[6] = 0x00; 27934 /* 27935 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27936 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27937 */ 27938 cdb[8] = 0x04; 27939 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27940 com->uscsi_cdb = cdb; 27941 com->uscsi_cdblen = CDB_GROUP1; 27942 com->uscsi_bufaddr = buffer; 27943 com->uscsi_buflen = 0x04; 27944 com->uscsi_timeout = 300; 27945 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27946 27947 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27948 SD_PATH_STANDARD); 27949 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27950 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27951 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27952 } else { 27953 hdr->cdth_trk0 = buffer[2]; 27954 hdr->cdth_trk1 = buffer[3]; 27955 } 27956 kmem_free(buffer, 4); 27957 kmem_free(com, sizeof (*com)); 27958 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27959 return (EFAULT); 27960 } 27961 return (rval); 27962 } 27963 27964 27965 /* 27966 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27967 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27968 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27969 * digital audio and extended architecture digital audio. These modes are 27970 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27971 * MMC specs. 27972 * 27973 * In addition to support for the various data formats these routines also 27974 * include support for devices that implement only the direct access READ 27975 * commands (0x08, 0x28), devices that implement the READ_CD commands 27976 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27977 * READ CDXA commands (0xD8, 0xDB) 27978 */ 27979 27980 /* 27981 * Function: sr_read_mode1() 27982 * 27983 * Description: This routine is the driver entry point for handling CD-ROM 27984 * ioctl read mode1 requests (CDROMREADMODE1). 27985 * 27986 * Arguments: dev - the device 'dev_t' 27987 * data - pointer to user provided cd read structure specifying 27988 * the lba buffer address and length. 27989 * flag - this argument is a pass through to ddi_copyxxx() 27990 * directly from the mode argument of ioctl(). 27991 * 27992 * Return Code: the code returned by sd_send_scsi_cmd() 27993 * EFAULT if ddi_copyxxx() fails 27994 * ENXIO if fail ddi_get_soft_state 27995 * EINVAL if data pointer is NULL 27996 */ 27997 27998 static int 27999 sr_read_mode1(dev_t dev, caddr_t data, int flag) 28000 { 28001 struct sd_lun *un; 28002 struct cdrom_read mode1_struct; 28003 struct cdrom_read *mode1 = &mode1_struct; 28004 int rval; 28005 sd_ssc_t *ssc; 28006 28007 #ifdef _MULTI_DATAMODEL 28008 /* To support ILP32 applications in an LP64 world */ 28009 struct cdrom_read32 cdrom_read32; 28010 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28011 #endif /* _MULTI_DATAMODEL */ 28012 28013 if (data == NULL) { 28014 return (EINVAL); 28015 } 28016 28017 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28018 (un->un_state == SD_STATE_OFFLINE)) { 28019 return (ENXIO); 28020 } 28021 28022 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28023 "sd_read_mode1: entry: un:0x%p\n", un); 28024 28025 #ifdef _MULTI_DATAMODEL 28026 switch (ddi_model_convert_from(flag & FMODELS)) { 28027 case DDI_MODEL_ILP32: 28028 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28029 return (EFAULT); 28030 } 28031 /* Convert the ILP32 uscsi data from the application to LP64 */ 28032 cdrom_read32tocdrom_read(cdrd32, mode1); 28033 break; 28034 case DDI_MODEL_NONE: 28035 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28036 return (EFAULT); 28037 } 28038 } 28039 #else /* ! _MULTI_DATAMODEL */ 28040 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28041 return (EFAULT); 28042 } 28043 #endif /* _MULTI_DATAMODEL */ 28044 28045 ssc = sd_ssc_init(un); 28046 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 28047 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 28048 sd_ssc_fini(ssc); 28049 28050 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28051 "sd_read_mode1: exit: un:0x%p\n", un); 28052 28053 return (rval); 28054 } 28055 28056 28057 /* 28058 * Function: sr_read_cd_mode2() 28059 * 28060 * Description: This routine is the driver entry point for handling CD-ROM 28061 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28062 * support the READ CD (0xBE) command or the 1st generation 28063 * READ CD (0xD4) command. 28064 * 28065 * Arguments: dev - the device 'dev_t' 28066 * data - pointer to user provided cd read structure specifying 28067 * the lba buffer address and length. 28068 * flag - this argument is a pass through to ddi_copyxxx() 28069 * directly from the mode argument of ioctl(). 28070 * 28071 * Return Code: the code returned by sd_send_scsi_cmd() 28072 * EFAULT if ddi_copyxxx() fails 28073 * ENXIO if fail ddi_get_soft_state 28074 * EINVAL if data pointer is NULL 28075 */ 28076 28077 static int 28078 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 28079 { 28080 struct sd_lun *un; 28081 struct uscsi_cmd *com; 28082 struct cdrom_read mode2_struct; 28083 struct cdrom_read *mode2 = &mode2_struct; 28084 uchar_t cdb[CDB_GROUP5]; 28085 int nblocks; 28086 int rval; 28087 #ifdef _MULTI_DATAMODEL 28088 /* To support ILP32 applications in an LP64 world */ 28089 struct cdrom_read32 cdrom_read32; 28090 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28091 #endif /* _MULTI_DATAMODEL */ 28092 28093 if (data == NULL) { 28094 return (EINVAL); 28095 } 28096 28097 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28098 (un->un_state == SD_STATE_OFFLINE)) { 28099 return (ENXIO); 28100 } 28101 28102 #ifdef _MULTI_DATAMODEL 28103 switch (ddi_model_convert_from(flag & FMODELS)) { 28104 case DDI_MODEL_ILP32: 28105 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28106 return (EFAULT); 28107 } 28108 /* Convert the ILP32 uscsi data from the application to LP64 */ 28109 cdrom_read32tocdrom_read(cdrd32, mode2); 28110 break; 28111 case DDI_MODEL_NONE: 28112 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28113 return (EFAULT); 28114 } 28115 break; 28116 } 28117 28118 #else /* ! _MULTI_DATAMODEL */ 28119 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28120 return (EFAULT); 28121 } 28122 #endif /* _MULTI_DATAMODEL */ 28123 28124 bzero(cdb, sizeof (cdb)); 28125 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 28126 /* Read command supported by 1st generation atapi drives */ 28127 cdb[0] = SCMD_READ_CDD4; 28128 } else { 28129 /* Universal CD Access Command */ 28130 cdb[0] = SCMD_READ_CD; 28131 } 28132 28133 /* 28134 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 28135 */ 28136 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 28137 28138 /* set the start address */ 28139 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 28140 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 28141 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28142 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 28143 28144 /* set the transfer length */ 28145 nblocks = mode2->cdread_buflen / 2336; 28146 cdb[6] = (uchar_t)(nblocks >> 16); 28147 cdb[7] = (uchar_t)(nblocks >> 8); 28148 cdb[8] = (uchar_t)nblocks; 28149 28150 /* set the filter bits */ 28151 cdb[9] = CDROM_READ_CD_USERDATA; 28152 28153 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28154 com->uscsi_cdb = (caddr_t)cdb; 28155 com->uscsi_cdblen = sizeof (cdb); 28156 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28157 com->uscsi_buflen = mode2->cdread_buflen; 28158 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28159 28160 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28161 SD_PATH_STANDARD); 28162 kmem_free(com, sizeof (*com)); 28163 return (rval); 28164 } 28165 28166 28167 /* 28168 * Function: sr_read_mode2() 28169 * 28170 * Description: This routine is the driver entry point for handling CD-ROM 28171 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28172 * do not support the READ CD (0xBE) command. 28173 * 28174 * Arguments: dev - the device 'dev_t' 28175 * data - pointer to user provided cd read structure specifying 28176 * the lba buffer address and length. 28177 * flag - this argument is a pass through to ddi_copyxxx() 28178 * directly from the mode argument of ioctl(). 28179 * 28180 * Return Code: the code returned by sd_send_scsi_cmd() 28181 * EFAULT if ddi_copyxxx() fails 28182 * ENXIO if fail ddi_get_soft_state 28183 * EINVAL if data pointer is NULL 28184 * EIO if fail to reset block size 28185 * EAGAIN if commands are in progress in the driver 28186 */ 28187 28188 static int 28189 sr_read_mode2(dev_t dev, caddr_t data, int flag) 28190 { 28191 struct sd_lun *un; 28192 struct cdrom_read mode2_struct; 28193 struct cdrom_read *mode2 = &mode2_struct; 28194 int rval; 28195 uint32_t restore_blksize; 28196 struct uscsi_cmd *com; 28197 uchar_t cdb[CDB_GROUP0]; 28198 int nblocks; 28199 28200 #ifdef _MULTI_DATAMODEL 28201 /* To support ILP32 applications in an LP64 world */ 28202 struct cdrom_read32 cdrom_read32; 28203 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28204 #endif /* _MULTI_DATAMODEL */ 28205 28206 if (data == NULL) { 28207 return (EINVAL); 28208 } 28209 28210 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28211 (un->un_state == SD_STATE_OFFLINE)) { 28212 return (ENXIO); 28213 } 28214 28215 /* 28216 * Because this routine will update the device and driver block size 28217 * being used we want to make sure there are no commands in progress. 28218 * If commands are in progress the user will have to try again. 28219 * 28220 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 28221 * in sdioctl to protect commands from sdioctl through to the top of 28222 * sd_uscsi_strategy. See sdioctl for details. 28223 */ 28224 mutex_enter(SD_MUTEX(un)); 28225 if (un->un_ncmds_in_driver != 1) { 28226 mutex_exit(SD_MUTEX(un)); 28227 return (EAGAIN); 28228 } 28229 mutex_exit(SD_MUTEX(un)); 28230 28231 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28232 "sd_read_mode2: entry: un:0x%p\n", un); 28233 28234 #ifdef _MULTI_DATAMODEL 28235 switch (ddi_model_convert_from(flag & FMODELS)) { 28236 case DDI_MODEL_ILP32: 28237 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28238 return (EFAULT); 28239 } 28240 /* Convert the ILP32 uscsi data from the application to LP64 */ 28241 cdrom_read32tocdrom_read(cdrd32, mode2); 28242 break; 28243 case DDI_MODEL_NONE: 28244 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28245 return (EFAULT); 28246 } 28247 break; 28248 } 28249 #else /* ! _MULTI_DATAMODEL */ 28250 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 28251 return (EFAULT); 28252 } 28253 #endif /* _MULTI_DATAMODEL */ 28254 28255 /* Store the current target block size for restoration later */ 28256 restore_blksize = un->un_tgt_blocksize; 28257 28258 /* Change the device and soft state target block size to 2336 */ 28259 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28260 rval = EIO; 28261 goto done; 28262 } 28263 28264 28265 bzero(cdb, sizeof (cdb)); 28266 28267 /* set READ operation */ 28268 cdb[0] = SCMD_READ; 28269 28270 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28271 mode2->cdread_lba >>= 2; 28272 28273 /* set the start address */ 28274 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28275 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28276 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28277 28278 /* set the transfer length */ 28279 nblocks = mode2->cdread_buflen / 2336; 28280 cdb[4] = (uchar_t)nblocks & 0xFF; 28281 28282 /* build command */ 28283 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28284 com->uscsi_cdb = (caddr_t)cdb; 28285 com->uscsi_cdblen = sizeof (cdb); 28286 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28287 com->uscsi_buflen = mode2->cdread_buflen; 28288 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28289 28290 /* 28291 * Issue SCSI command with user space address for read buffer. 28292 * 28293 * This sends the command through main channel in the driver. 28294 * 28295 * Since this is accessed via an IOCTL call, we go through the 28296 * standard path, so that if the device was powered down, then 28297 * it would be 'awakened' to handle the command. 28298 */ 28299 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28300 SD_PATH_STANDARD); 28301 28302 kmem_free(com, sizeof (*com)); 28303 28304 /* Restore the device and soft state target block size */ 28305 if (sr_sector_mode(dev, restore_blksize) != 0) { 28306 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28307 "can't do switch back to mode 1\n"); 28308 /* 28309 * If sd_send_scsi_READ succeeded we still need to report 28310 * an error because we failed to reset the block size 28311 */ 28312 if (rval == 0) { 28313 rval = EIO; 28314 } 28315 } 28316 28317 done: 28318 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28319 "sd_read_mode2: exit: un:0x%p\n", un); 28320 28321 return (rval); 28322 } 28323 28324 28325 /* 28326 * Function: sr_sector_mode() 28327 * 28328 * Description: This utility function is used by sr_read_mode2 to set the target 28329 * block size based on the user specified size. This is a legacy 28330 * implementation based upon a vendor specific mode page 28331 * 28332 * Arguments: dev - the device 'dev_t' 28333 * data - flag indicating if block size is being set to 2336 or 28334 * 512. 28335 * 28336 * Return Code: the code returned by sd_send_scsi_cmd() 28337 * EFAULT if ddi_copyxxx() fails 28338 * ENXIO if fail ddi_get_soft_state 28339 * EINVAL if data pointer is NULL 28340 */ 28341 28342 static int 28343 sr_sector_mode(dev_t dev, uint32_t blksize) 28344 { 28345 struct sd_lun *un; 28346 uchar_t *sense; 28347 uchar_t *select; 28348 int rval; 28349 sd_ssc_t *ssc; 28350 28351 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28352 (un->un_state == SD_STATE_OFFLINE)) { 28353 return (ENXIO); 28354 } 28355 28356 sense = kmem_zalloc(20, KM_SLEEP); 28357 28358 /* Note: This is a vendor specific mode page (0x81) */ 28359 ssc = sd_ssc_init(un); 28360 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 28361 SD_PATH_STANDARD); 28362 sd_ssc_fini(ssc); 28363 if (rval != 0) { 28364 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28365 "sr_sector_mode: Mode Sense failed\n"); 28366 kmem_free(sense, 20); 28367 return (rval); 28368 } 28369 select = kmem_zalloc(20, KM_SLEEP); 28370 select[3] = 0x08; 28371 select[10] = ((blksize >> 8) & 0xff); 28372 select[11] = (blksize & 0xff); 28373 select[12] = 0x01; 28374 select[13] = 0x06; 28375 select[14] = sense[14]; 28376 select[15] = sense[15]; 28377 if (blksize == SD_MODE2_BLKSIZE) { 28378 select[14] |= 0x01; 28379 } 28380 28381 ssc = sd_ssc_init(un); 28382 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 28383 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28384 sd_ssc_fini(ssc); 28385 if (rval != 0) { 28386 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28387 "sr_sector_mode: Mode Select failed\n"); 28388 } else { 28389 /* 28390 * Only update the softstate block size if we successfully 28391 * changed the device block mode. 28392 */ 28393 mutex_enter(SD_MUTEX(un)); 28394 sd_update_block_info(un, blksize, 0); 28395 mutex_exit(SD_MUTEX(un)); 28396 } 28397 kmem_free(sense, 20); 28398 kmem_free(select, 20); 28399 return (rval); 28400 } 28401 28402 28403 /* 28404 * Function: sr_read_cdda() 28405 * 28406 * Description: This routine is the driver entry point for handling CD-ROM 28407 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28408 * the target supports CDDA these requests are handled via a vendor 28409 * specific command (0xD8) If the target does not support CDDA 28410 * these requests are handled via the READ CD command (0xBE). 28411 * 28412 * Arguments: dev - the device 'dev_t' 28413 * data - pointer to user provided CD-DA structure specifying 28414 * the track starting address, transfer length, and 28415 * subcode options. 28416 * flag - this argument is a pass through to ddi_copyxxx() 28417 * directly from the mode argument of ioctl(). 28418 * 28419 * Return Code: the code returned by sd_send_scsi_cmd() 28420 * EFAULT if ddi_copyxxx() fails 28421 * ENXIO if fail ddi_get_soft_state 28422 * EINVAL if invalid arguments are provided 28423 * ENOTTY 28424 */ 28425 28426 static int 28427 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28428 { 28429 struct sd_lun *un; 28430 struct uscsi_cmd *com; 28431 struct cdrom_cdda *cdda; 28432 int rval; 28433 size_t buflen; 28434 char cdb[CDB_GROUP5]; 28435 28436 #ifdef _MULTI_DATAMODEL 28437 /* To support ILP32 applications in an LP64 world */ 28438 struct cdrom_cdda32 cdrom_cdda32; 28439 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28440 #endif /* _MULTI_DATAMODEL */ 28441 28442 if (data == NULL) { 28443 return (EINVAL); 28444 } 28445 28446 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28447 return (ENXIO); 28448 } 28449 28450 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28451 28452 #ifdef _MULTI_DATAMODEL 28453 switch (ddi_model_convert_from(flag & FMODELS)) { 28454 case DDI_MODEL_ILP32: 28455 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28456 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28457 "sr_read_cdda: ddi_copyin Failed\n"); 28458 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28459 return (EFAULT); 28460 } 28461 /* Convert the ILP32 uscsi data from the application to LP64 */ 28462 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28463 break; 28464 case DDI_MODEL_NONE: 28465 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28466 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28467 "sr_read_cdda: ddi_copyin Failed\n"); 28468 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28469 return (EFAULT); 28470 } 28471 break; 28472 } 28473 #else /* ! _MULTI_DATAMODEL */ 28474 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28475 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28476 "sr_read_cdda: ddi_copyin Failed\n"); 28477 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28478 return (EFAULT); 28479 } 28480 #endif /* _MULTI_DATAMODEL */ 28481 28482 /* 28483 * Since MMC-2 expects max 3 bytes for length, check if the 28484 * length input is greater than 3 bytes 28485 */ 28486 if ((cdda->cdda_length & 0xFF000000) != 0) { 28487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28488 "cdrom transfer length too large: %d (limit %d)\n", 28489 cdda->cdda_length, 0xFFFFFF); 28490 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28491 return (EINVAL); 28492 } 28493 28494 switch (cdda->cdda_subcode) { 28495 case CDROM_DA_NO_SUBCODE: 28496 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28497 break; 28498 case CDROM_DA_SUBQ: 28499 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28500 break; 28501 case CDROM_DA_ALL_SUBCODE: 28502 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28503 break; 28504 case CDROM_DA_SUBCODE_ONLY: 28505 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28506 break; 28507 default: 28508 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28509 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28510 cdda->cdda_subcode); 28511 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28512 return (EINVAL); 28513 } 28514 28515 /* Build and send the command */ 28516 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28517 bzero(cdb, CDB_GROUP5); 28518 28519 if (un->un_f_cfg_cdda == TRUE) { 28520 cdb[0] = (char)SCMD_READ_CD; 28521 cdb[1] = 0x04; 28522 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28523 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28524 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28525 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28526 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28527 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28528 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28529 cdb[9] = 0x10; 28530 switch (cdda->cdda_subcode) { 28531 case CDROM_DA_NO_SUBCODE : 28532 cdb[10] = 0x0; 28533 break; 28534 case CDROM_DA_SUBQ : 28535 cdb[10] = 0x2; 28536 break; 28537 case CDROM_DA_ALL_SUBCODE : 28538 cdb[10] = 0x1; 28539 break; 28540 case CDROM_DA_SUBCODE_ONLY : 28541 /* FALLTHROUGH */ 28542 default : 28543 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28544 kmem_free(com, sizeof (*com)); 28545 return (ENOTTY); 28546 } 28547 } else { 28548 cdb[0] = (char)SCMD_READ_CDDA; 28549 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28550 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28551 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28552 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28553 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28554 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28555 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28556 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28557 cdb[10] = cdda->cdda_subcode; 28558 } 28559 28560 com->uscsi_cdb = cdb; 28561 com->uscsi_cdblen = CDB_GROUP5; 28562 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28563 com->uscsi_buflen = buflen; 28564 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28565 28566 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28567 SD_PATH_STANDARD); 28568 28569 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28570 kmem_free(com, sizeof (*com)); 28571 return (rval); 28572 } 28573 28574 28575 /* 28576 * Function: sr_read_cdxa() 28577 * 28578 * Description: This routine is the driver entry point for handling CD-ROM 28579 * ioctl requests to return CD-XA (Extended Architecture) data. 28580 * (CDROMCDXA). 28581 * 28582 * Arguments: dev - the device 'dev_t' 28583 * data - pointer to user provided CD-XA structure specifying 28584 * the data starting address, transfer length, and format 28585 * flag - this argument is a pass through to ddi_copyxxx() 28586 * directly from the mode argument of ioctl(). 28587 * 28588 * Return Code: the code returned by sd_send_scsi_cmd() 28589 * EFAULT if ddi_copyxxx() fails 28590 * ENXIO if fail ddi_get_soft_state 28591 * EINVAL if data pointer is NULL 28592 */ 28593 28594 static int 28595 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28596 { 28597 struct sd_lun *un; 28598 struct uscsi_cmd *com; 28599 struct cdrom_cdxa *cdxa; 28600 int rval; 28601 size_t buflen; 28602 char cdb[CDB_GROUP5]; 28603 uchar_t read_flags; 28604 28605 #ifdef _MULTI_DATAMODEL 28606 /* To support ILP32 applications in an LP64 world */ 28607 struct cdrom_cdxa32 cdrom_cdxa32; 28608 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28609 #endif /* _MULTI_DATAMODEL */ 28610 28611 if (data == NULL) { 28612 return (EINVAL); 28613 } 28614 28615 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28616 return (ENXIO); 28617 } 28618 28619 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28620 28621 #ifdef _MULTI_DATAMODEL 28622 switch (ddi_model_convert_from(flag & FMODELS)) { 28623 case DDI_MODEL_ILP32: 28624 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28625 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28626 return (EFAULT); 28627 } 28628 /* 28629 * Convert the ILP32 uscsi data from the 28630 * application to LP64 for internal use. 28631 */ 28632 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28633 break; 28634 case DDI_MODEL_NONE: 28635 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28636 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28637 return (EFAULT); 28638 } 28639 break; 28640 } 28641 #else /* ! _MULTI_DATAMODEL */ 28642 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28643 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28644 return (EFAULT); 28645 } 28646 #endif /* _MULTI_DATAMODEL */ 28647 28648 /* 28649 * Since MMC-2 expects max 3 bytes for length, check if the 28650 * length input is greater than 3 bytes 28651 */ 28652 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28653 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28654 "cdrom transfer length too large: %d (limit %d)\n", 28655 cdxa->cdxa_length, 0xFFFFFF); 28656 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28657 return (EINVAL); 28658 } 28659 28660 switch (cdxa->cdxa_format) { 28661 case CDROM_XA_DATA: 28662 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28663 read_flags = 0x10; 28664 break; 28665 case CDROM_XA_SECTOR_DATA: 28666 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28667 read_flags = 0xf8; 28668 break; 28669 case CDROM_XA_DATA_W_ERROR: 28670 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28671 read_flags = 0xfc; 28672 break; 28673 default: 28674 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28675 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28676 cdxa->cdxa_format); 28677 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28678 return (EINVAL); 28679 } 28680 28681 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28682 bzero(cdb, CDB_GROUP5); 28683 if (un->un_f_mmc_cap == TRUE) { 28684 cdb[0] = (char)SCMD_READ_CD; 28685 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28686 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28687 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28688 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28689 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28690 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28691 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28692 cdb[9] = (char)read_flags; 28693 } else { 28694 /* 28695 * Note: A vendor specific command (0xDB) is being used her to 28696 * request a read of all subcodes. 28697 */ 28698 cdb[0] = (char)SCMD_READ_CDXA; 28699 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28700 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28701 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28702 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28703 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28704 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28705 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28706 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28707 cdb[10] = cdxa->cdxa_format; 28708 } 28709 com->uscsi_cdb = cdb; 28710 com->uscsi_cdblen = CDB_GROUP5; 28711 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28712 com->uscsi_buflen = buflen; 28713 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28714 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28715 SD_PATH_STANDARD); 28716 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28717 kmem_free(com, sizeof (*com)); 28718 return (rval); 28719 } 28720 28721 28722 /* 28723 * Function: sr_eject() 28724 * 28725 * Description: This routine is the driver entry point for handling CD-ROM 28726 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28727 * 28728 * Arguments: dev - the device 'dev_t' 28729 * 28730 * Return Code: the code returned by sd_send_scsi_cmd() 28731 */ 28732 28733 static int 28734 sr_eject(dev_t dev) 28735 { 28736 struct sd_lun *un; 28737 int rval; 28738 sd_ssc_t *ssc; 28739 28740 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28741 (un->un_state == SD_STATE_OFFLINE)) { 28742 return (ENXIO); 28743 } 28744 28745 /* 28746 * To prevent race conditions with the eject 28747 * command, keep track of an eject command as 28748 * it progresses. If we are already handling 28749 * an eject command in the driver for the given 28750 * unit and another request to eject is received 28751 * immediately return EAGAIN so we don't lose 28752 * the command if the current eject command fails. 28753 */ 28754 mutex_enter(SD_MUTEX(un)); 28755 if (un->un_f_ejecting == TRUE) { 28756 mutex_exit(SD_MUTEX(un)); 28757 return (EAGAIN); 28758 } 28759 un->un_f_ejecting = TRUE; 28760 mutex_exit(SD_MUTEX(un)); 28761 28762 ssc = sd_ssc_init(un); 28763 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28764 SD_PATH_STANDARD); 28765 sd_ssc_fini(ssc); 28766 28767 if (rval != 0) { 28768 mutex_enter(SD_MUTEX(un)); 28769 un->un_f_ejecting = FALSE; 28770 mutex_exit(SD_MUTEX(un)); 28771 return (rval); 28772 } 28773 28774 ssc = sd_ssc_init(un); 28775 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 28776 SD_TARGET_EJECT, SD_PATH_STANDARD); 28777 sd_ssc_fini(ssc); 28778 28779 if (rval == 0) { 28780 mutex_enter(SD_MUTEX(un)); 28781 sr_ejected(un); 28782 un->un_mediastate = DKIO_EJECTED; 28783 un->un_f_ejecting = FALSE; 28784 cv_broadcast(&un->un_state_cv); 28785 mutex_exit(SD_MUTEX(un)); 28786 } else { 28787 mutex_enter(SD_MUTEX(un)); 28788 un->un_f_ejecting = FALSE; 28789 mutex_exit(SD_MUTEX(un)); 28790 } 28791 return (rval); 28792 } 28793 28794 28795 /* 28796 * Function: sr_ejected() 28797 * 28798 * Description: This routine updates the soft state structure to invalidate the 28799 * geometry information after the media has been ejected or a 28800 * media eject has been detected. 28801 * 28802 * Arguments: un - driver soft state (unit) structure 28803 */ 28804 28805 static void 28806 sr_ejected(struct sd_lun *un) 28807 { 28808 struct sd_errstats *stp; 28809 28810 ASSERT(un != NULL); 28811 ASSERT(mutex_owned(SD_MUTEX(un))); 28812 28813 un->un_f_blockcount_is_valid = FALSE; 28814 un->un_f_tgt_blocksize_is_valid = FALSE; 28815 mutex_exit(SD_MUTEX(un)); 28816 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28817 mutex_enter(SD_MUTEX(un)); 28818 28819 if (un->un_errstats != NULL) { 28820 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28821 stp->sd_capacity.value.ui64 = 0; 28822 } 28823 } 28824 28825 28826 /* 28827 * Function: sr_check_wp() 28828 * 28829 * Description: This routine checks the write protection of a removable 28830 * media disk and hotpluggable devices via the write protect bit of 28831 * the Mode Page Header device specific field. Some devices choke 28832 * on unsupported mode page. In order to workaround this issue, 28833 * this routine has been implemented to use 0x3f mode page(request 28834 * for all pages) for all device types. 28835 * 28836 * Arguments: dev - the device 'dev_t' 28837 * 28838 * Return Code: int indicating if the device is write protected (1) or not (0) 28839 * 28840 * Context: Kernel thread. 28841 * 28842 */ 28843 28844 static int 28845 sr_check_wp(dev_t dev) 28846 { 28847 struct sd_lun *un; 28848 uchar_t device_specific; 28849 uchar_t *sense; 28850 int hdrlen; 28851 int rval = FALSE; 28852 int status; 28853 sd_ssc_t *ssc; 28854 28855 /* 28856 * Note: The return codes for this routine should be reworked to 28857 * properly handle the case of a NULL softstate. 28858 */ 28859 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28860 return (FALSE); 28861 } 28862 28863 if (un->un_f_cfg_is_atapi == TRUE) { 28864 /* 28865 * The mode page contents are not required; set the allocation 28866 * length for the mode page header only 28867 */ 28868 hdrlen = MODE_HEADER_LENGTH_GRP2; 28869 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28870 ssc = sd_ssc_init(un); 28871 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28872 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28873 sd_ssc_fini(ssc); 28874 if (status != 0) 28875 goto err_exit; 28876 device_specific = 28877 ((struct mode_header_grp2 *)sense)->device_specific; 28878 } else { 28879 hdrlen = MODE_HEADER_LENGTH; 28880 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28881 ssc = sd_ssc_init(un); 28882 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28883 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28884 sd_ssc_fini(ssc); 28885 if (status != 0) 28886 goto err_exit; 28887 device_specific = 28888 ((struct mode_header *)sense)->device_specific; 28889 } 28890 28891 28892 /* 28893 * Write protect mode sense failed; not all disks 28894 * understand this query. Return FALSE assuming that 28895 * these devices are not writable. 28896 */ 28897 if (device_specific & WRITE_PROTECT) { 28898 rval = TRUE; 28899 } 28900 28901 err_exit: 28902 kmem_free(sense, hdrlen); 28903 return (rval); 28904 } 28905 28906 /* 28907 * Function: sr_volume_ctrl() 28908 * 28909 * Description: This routine is the driver entry point for handling CD-ROM 28910 * audio output volume ioctl requests. (CDROMVOLCTRL) 28911 * 28912 * Arguments: dev - the device 'dev_t' 28913 * data - pointer to user audio volume control structure 28914 * flag - this argument is a pass through to ddi_copyxxx() 28915 * directly from the mode argument of ioctl(). 28916 * 28917 * Return Code: the code returned by sd_send_scsi_cmd() 28918 * EFAULT if ddi_copyxxx() fails 28919 * ENXIO if fail ddi_get_soft_state 28920 * EINVAL if data pointer is NULL 28921 * 28922 */ 28923 28924 static int 28925 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28926 { 28927 struct sd_lun *un; 28928 struct cdrom_volctrl volume; 28929 struct cdrom_volctrl *vol = &volume; 28930 uchar_t *sense_page; 28931 uchar_t *select_page; 28932 uchar_t *sense; 28933 uchar_t *select; 28934 int sense_buflen; 28935 int select_buflen; 28936 int rval; 28937 sd_ssc_t *ssc; 28938 28939 if (data == NULL) { 28940 return (EINVAL); 28941 } 28942 28943 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28944 (un->un_state == SD_STATE_OFFLINE)) { 28945 return (ENXIO); 28946 } 28947 28948 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28949 return (EFAULT); 28950 } 28951 28952 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28953 struct mode_header_grp2 *sense_mhp; 28954 struct mode_header_grp2 *select_mhp; 28955 int bd_len; 28956 28957 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28958 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28959 MODEPAGE_AUDIO_CTRL_LEN; 28960 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28961 select = kmem_zalloc(select_buflen, KM_SLEEP); 28962 ssc = sd_ssc_init(un); 28963 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 28964 sense_buflen, MODEPAGE_AUDIO_CTRL, 28965 SD_PATH_STANDARD); 28966 sd_ssc_fini(ssc); 28967 28968 if (rval != 0) { 28969 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28970 "sr_volume_ctrl: Mode Sense Failed\n"); 28971 kmem_free(sense, sense_buflen); 28972 kmem_free(select, select_buflen); 28973 return (rval); 28974 } 28975 sense_mhp = (struct mode_header_grp2 *)sense; 28976 select_mhp = (struct mode_header_grp2 *)select; 28977 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28978 sense_mhp->bdesc_length_lo; 28979 if (bd_len > MODE_BLK_DESC_LENGTH) { 28980 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28981 "sr_volume_ctrl: Mode Sense returned invalid " 28982 "block descriptor length\n"); 28983 kmem_free(sense, sense_buflen); 28984 kmem_free(select, select_buflen); 28985 return (EIO); 28986 } 28987 sense_page = (uchar_t *) 28988 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28989 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28990 select_mhp->length_msb = 0; 28991 select_mhp->length_lsb = 0; 28992 select_mhp->bdesc_length_hi = 0; 28993 select_mhp->bdesc_length_lo = 0; 28994 } else { 28995 struct mode_header *sense_mhp, *select_mhp; 28996 28997 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28998 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28999 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29000 select = kmem_zalloc(select_buflen, KM_SLEEP); 29001 ssc = sd_ssc_init(un); 29002 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 29003 sense_buflen, MODEPAGE_AUDIO_CTRL, 29004 SD_PATH_STANDARD); 29005 sd_ssc_fini(ssc); 29006 29007 if (rval != 0) { 29008 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29009 "sr_volume_ctrl: Mode Sense Failed\n"); 29010 kmem_free(sense, sense_buflen); 29011 kmem_free(select, select_buflen); 29012 return (rval); 29013 } 29014 sense_mhp = (struct mode_header *)sense; 29015 select_mhp = (struct mode_header *)select; 29016 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 29017 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29018 "sr_volume_ctrl: Mode Sense returned invalid " 29019 "block descriptor length\n"); 29020 kmem_free(sense, sense_buflen); 29021 kmem_free(select, select_buflen); 29022 return (EIO); 29023 } 29024 sense_page = (uchar_t *) 29025 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 29026 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 29027 select_mhp->length = 0; 29028 select_mhp->bdesc_length = 0; 29029 } 29030 /* 29031 * Note: An audio control data structure could be created and overlayed 29032 * on the following in place of the array indexing method implemented. 29033 */ 29034 29035 /* Build the select data for the user volume data */ 29036 select_page[0] = MODEPAGE_AUDIO_CTRL; 29037 select_page[1] = 0xE; 29038 /* Set the immediate bit */ 29039 select_page[2] = 0x04; 29040 /* Zero out reserved fields */ 29041 select_page[3] = 0x00; 29042 select_page[4] = 0x00; 29043 /* Return sense data for fields not to be modified */ 29044 select_page[5] = sense_page[5]; 29045 select_page[6] = sense_page[6]; 29046 select_page[7] = sense_page[7]; 29047 /* Set the user specified volume levels for channel 0 and 1 */ 29048 select_page[8] = 0x01; 29049 select_page[9] = vol->channel0; 29050 select_page[10] = 0x02; 29051 select_page[11] = vol->channel1; 29052 /* Channel 2 and 3 are currently unsupported so return the sense data */ 29053 select_page[12] = sense_page[12]; 29054 select_page[13] = sense_page[13]; 29055 select_page[14] = sense_page[14]; 29056 select_page[15] = sense_page[15]; 29057 29058 ssc = sd_ssc_init(un); 29059 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29060 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 29061 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29062 } else { 29063 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 29064 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29065 } 29066 sd_ssc_fini(ssc); 29067 29068 kmem_free(sense, sense_buflen); 29069 kmem_free(select, select_buflen); 29070 return (rval); 29071 } 29072 29073 29074 /* 29075 * Function: sr_read_sony_session_offset() 29076 * 29077 * Description: This routine is the driver entry point for handling CD-ROM 29078 * ioctl requests for session offset information. (CDROMREADOFFSET) 29079 * The address of the first track in the last session of a 29080 * multi-session CD-ROM is returned 29081 * 29082 * Note: This routine uses a vendor specific key value in the 29083 * command control field without implementing any vendor check here 29084 * or in the ioctl routine. 29085 * 29086 * Arguments: dev - the device 'dev_t' 29087 * data - pointer to an int to hold the requested address 29088 * flag - this argument is a pass through to ddi_copyxxx() 29089 * directly from the mode argument of ioctl(). 29090 * 29091 * Return Code: the code returned by sd_send_scsi_cmd() 29092 * EFAULT if ddi_copyxxx() fails 29093 * ENXIO if fail ddi_get_soft_state 29094 * EINVAL if data pointer is NULL 29095 */ 29096 29097 static int 29098 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 29099 { 29100 struct sd_lun *un; 29101 struct uscsi_cmd *com; 29102 caddr_t buffer; 29103 char cdb[CDB_GROUP1]; 29104 int session_offset = 0; 29105 int rval; 29106 29107 if (data == NULL) { 29108 return (EINVAL); 29109 } 29110 29111 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29112 (un->un_state == SD_STATE_OFFLINE)) { 29113 return (ENXIO); 29114 } 29115 29116 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 29117 bzero(cdb, CDB_GROUP1); 29118 cdb[0] = SCMD_READ_TOC; 29119 /* 29120 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 29121 * (4 byte TOC response header + 8 byte response data) 29122 */ 29123 cdb[8] = SONY_SESSION_OFFSET_LEN; 29124 /* Byte 9 is the control byte. A vendor specific value is used */ 29125 cdb[9] = SONY_SESSION_OFFSET_KEY; 29126 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 29127 com->uscsi_cdb = cdb; 29128 com->uscsi_cdblen = CDB_GROUP1; 29129 com->uscsi_bufaddr = buffer; 29130 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 29131 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 29132 29133 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 29134 SD_PATH_STANDARD); 29135 if (rval != 0) { 29136 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29137 kmem_free(com, sizeof (*com)); 29138 return (rval); 29139 } 29140 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 29141 session_offset = 29142 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 29143 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 29144 /* 29145 * Offset returned offset in current lbasize block's. Convert to 29146 * 2k block's to return to the user 29147 */ 29148 if (un->un_tgt_blocksize == CDROM_BLK_512) { 29149 session_offset >>= 2; 29150 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 29151 session_offset >>= 1; 29152 } 29153 } 29154 29155 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 29156 rval = EFAULT; 29157 } 29158 29159 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29160 kmem_free(com, sizeof (*com)); 29161 return (rval); 29162 } 29163 29164 29165 /* 29166 * Function: sd_wm_cache_constructor() 29167 * 29168 * Description: Cache Constructor for the wmap cache for the read/modify/write 29169 * devices. 29170 * 29171 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29172 * un - sd_lun structure for the device. 29173 * flag - the km flags passed to constructor 29174 * 29175 * Return Code: 0 on success. 29176 * -1 on failure. 29177 */ 29178 29179 /*ARGSUSED*/ 29180 static int 29181 sd_wm_cache_constructor(void *wm, void *un, int flags) 29182 { 29183 bzero(wm, sizeof (struct sd_w_map)); 29184 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 29185 return (0); 29186 } 29187 29188 29189 /* 29190 * Function: sd_wm_cache_destructor() 29191 * 29192 * Description: Cache destructor for the wmap cache for the read/modify/write 29193 * devices. 29194 * 29195 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29196 * un - sd_lun structure for the device. 29197 */ 29198 /*ARGSUSED*/ 29199 static void 29200 sd_wm_cache_destructor(void *wm, void *un) 29201 { 29202 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 29203 } 29204 29205 29206 /* 29207 * Function: sd_range_lock() 29208 * 29209 * Description: Lock the range of blocks specified as parameter to ensure 29210 * that read, modify write is atomic and no other i/o writes 29211 * to the same location. The range is specified in terms 29212 * of start and end blocks. Block numbers are the actual 29213 * media block numbers and not system. 29214 * 29215 * Arguments: un - sd_lun structure for the device. 29216 * startb - The starting block number 29217 * endb - The end block number 29218 * typ - type of i/o - simple/read_modify_write 29219 * 29220 * Return Code: wm - pointer to the wmap structure. 29221 * 29222 * Context: This routine can sleep. 29223 */ 29224 29225 static struct sd_w_map * 29226 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 29227 { 29228 struct sd_w_map *wmp = NULL; 29229 struct sd_w_map *sl_wmp = NULL; 29230 struct sd_w_map *tmp_wmp; 29231 wm_state state = SD_WM_CHK_LIST; 29232 29233 29234 ASSERT(un != NULL); 29235 ASSERT(!mutex_owned(SD_MUTEX(un))); 29236 29237 mutex_enter(SD_MUTEX(un)); 29238 29239 while (state != SD_WM_DONE) { 29240 29241 switch (state) { 29242 case SD_WM_CHK_LIST: 29243 /* 29244 * This is the starting state. Check the wmap list 29245 * to see if the range is currently available. 29246 */ 29247 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 29248 /* 29249 * If this is a simple write and no rmw 29250 * i/o is pending then try to lock the 29251 * range as the range should be available. 29252 */ 29253 state = SD_WM_LOCK_RANGE; 29254 } else { 29255 tmp_wmp = sd_get_range(un, startb, endb); 29256 if (tmp_wmp != NULL) { 29257 if ((wmp != NULL) && ONLIST(un, wmp)) { 29258 /* 29259 * Should not keep onlist wmps 29260 * while waiting this macro 29261 * will also do wmp = NULL; 29262 */ 29263 FREE_ONLIST_WMAP(un, wmp); 29264 } 29265 /* 29266 * sl_wmp is the wmap on which wait 29267 * is done, since the tmp_wmp points 29268 * to the inuse wmap, set sl_wmp to 29269 * tmp_wmp and change the state to sleep 29270 */ 29271 sl_wmp = tmp_wmp; 29272 state = SD_WM_WAIT_MAP; 29273 } else { 29274 state = SD_WM_LOCK_RANGE; 29275 } 29276 29277 } 29278 break; 29279 29280 case SD_WM_LOCK_RANGE: 29281 ASSERT(un->un_wm_cache); 29282 /* 29283 * The range need to be locked, try to get a wmap. 29284 * First attempt it with NO_SLEEP, want to avoid a sleep 29285 * if possible as we will have to release the sd mutex 29286 * if we have to sleep. 29287 */ 29288 if (wmp == NULL) 29289 wmp = kmem_cache_alloc(un->un_wm_cache, 29290 KM_NOSLEEP); 29291 if (wmp == NULL) { 29292 mutex_exit(SD_MUTEX(un)); 29293 _NOTE(DATA_READABLE_WITHOUT_LOCK 29294 (sd_lun::un_wm_cache)) 29295 wmp = kmem_cache_alloc(un->un_wm_cache, 29296 KM_SLEEP); 29297 mutex_enter(SD_MUTEX(un)); 29298 /* 29299 * we released the mutex so recheck and go to 29300 * check list state. 29301 */ 29302 state = SD_WM_CHK_LIST; 29303 } else { 29304 /* 29305 * We exit out of state machine since we 29306 * have the wmap. Do the housekeeping first. 29307 * place the wmap on the wmap list if it is not 29308 * on it already and then set the state to done. 29309 */ 29310 wmp->wm_start = startb; 29311 wmp->wm_end = endb; 29312 wmp->wm_flags = typ | SD_WM_BUSY; 29313 if (typ & SD_WTYPE_RMW) { 29314 un->un_rmw_count++; 29315 } 29316 /* 29317 * If not already on the list then link 29318 */ 29319 if (!ONLIST(un, wmp)) { 29320 wmp->wm_next = un->un_wm; 29321 wmp->wm_prev = NULL; 29322 if (wmp->wm_next) 29323 wmp->wm_next->wm_prev = wmp; 29324 un->un_wm = wmp; 29325 } 29326 state = SD_WM_DONE; 29327 } 29328 break; 29329 29330 case SD_WM_WAIT_MAP: 29331 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29332 /* 29333 * Wait is done on sl_wmp, which is set in the 29334 * check_list state. 29335 */ 29336 sl_wmp->wm_wanted_count++; 29337 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29338 sl_wmp->wm_wanted_count--; 29339 /* 29340 * We can reuse the memory from the completed sl_wmp 29341 * lock range for our new lock, but only if noone is 29342 * waiting for it. 29343 */ 29344 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 29345 if (sl_wmp->wm_wanted_count == 0) { 29346 if (wmp != NULL) 29347 CHK_N_FREEWMP(un, wmp); 29348 wmp = sl_wmp; 29349 } 29350 sl_wmp = NULL; 29351 /* 29352 * After waking up, need to recheck for availability of 29353 * range. 29354 */ 29355 state = SD_WM_CHK_LIST; 29356 break; 29357 29358 default: 29359 panic("sd_range_lock: " 29360 "Unknown state %d in sd_range_lock", state); 29361 /*NOTREACHED*/ 29362 } /* switch(state) */ 29363 29364 } /* while(state != SD_WM_DONE) */ 29365 29366 mutex_exit(SD_MUTEX(un)); 29367 29368 ASSERT(wmp != NULL); 29369 29370 return (wmp); 29371 } 29372 29373 29374 /* 29375 * Function: sd_get_range() 29376 * 29377 * Description: Find if there any overlapping I/O to this one 29378 * Returns the write-map of 1st such I/O, NULL otherwise. 29379 * 29380 * Arguments: un - sd_lun structure for the device. 29381 * startb - The starting block number 29382 * endb - The end block number 29383 * 29384 * Return Code: wm - pointer to the wmap structure. 29385 */ 29386 29387 static struct sd_w_map * 29388 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29389 { 29390 struct sd_w_map *wmp; 29391 29392 ASSERT(un != NULL); 29393 29394 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29395 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29396 continue; 29397 } 29398 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29399 break; 29400 } 29401 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29402 break; 29403 } 29404 } 29405 29406 return (wmp); 29407 } 29408 29409 29410 /* 29411 * Function: sd_free_inlist_wmap() 29412 * 29413 * Description: Unlink and free a write map struct. 29414 * 29415 * Arguments: un - sd_lun structure for the device. 29416 * wmp - sd_w_map which needs to be unlinked. 29417 */ 29418 29419 static void 29420 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29421 { 29422 ASSERT(un != NULL); 29423 29424 if (un->un_wm == wmp) { 29425 un->un_wm = wmp->wm_next; 29426 } else { 29427 wmp->wm_prev->wm_next = wmp->wm_next; 29428 } 29429 29430 if (wmp->wm_next) { 29431 wmp->wm_next->wm_prev = wmp->wm_prev; 29432 } 29433 29434 wmp->wm_next = wmp->wm_prev = NULL; 29435 29436 kmem_cache_free(un->un_wm_cache, wmp); 29437 } 29438 29439 29440 /* 29441 * Function: sd_range_unlock() 29442 * 29443 * Description: Unlock the range locked by wm. 29444 * Free write map if nobody else is waiting on it. 29445 * 29446 * Arguments: un - sd_lun structure for the device. 29447 * wmp - sd_w_map which needs to be unlinked. 29448 */ 29449 29450 static void 29451 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29452 { 29453 ASSERT(un != NULL); 29454 ASSERT(wm != NULL); 29455 ASSERT(!mutex_owned(SD_MUTEX(un))); 29456 29457 mutex_enter(SD_MUTEX(un)); 29458 29459 if (wm->wm_flags & SD_WTYPE_RMW) { 29460 un->un_rmw_count--; 29461 } 29462 29463 if (wm->wm_wanted_count) { 29464 wm->wm_flags = 0; 29465 /* 29466 * Broadcast that the wmap is available now. 29467 */ 29468 cv_broadcast(&wm->wm_avail); 29469 } else { 29470 /* 29471 * If no one is waiting on the map, it should be free'ed. 29472 */ 29473 sd_free_inlist_wmap(un, wm); 29474 } 29475 29476 mutex_exit(SD_MUTEX(un)); 29477 } 29478 29479 29480 /* 29481 * Function: sd_read_modify_write_task 29482 * 29483 * Description: Called from a taskq thread to initiate the write phase of 29484 * a read-modify-write request. This is used for targets where 29485 * un->un_sys_blocksize != un->un_tgt_blocksize. 29486 * 29487 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29488 * 29489 * Context: Called under taskq thread context. 29490 */ 29491 29492 static void 29493 sd_read_modify_write_task(void *arg) 29494 { 29495 struct sd_mapblocksize_info *bsp; 29496 struct buf *bp; 29497 struct sd_xbuf *xp; 29498 struct sd_lun *un; 29499 29500 bp = arg; /* The bp is given in arg */ 29501 ASSERT(bp != NULL); 29502 29503 /* Get the pointer to the layer-private data struct */ 29504 xp = SD_GET_XBUF(bp); 29505 ASSERT(xp != NULL); 29506 bsp = xp->xb_private; 29507 ASSERT(bsp != NULL); 29508 29509 un = SD_GET_UN(bp); 29510 ASSERT(un != NULL); 29511 ASSERT(!mutex_owned(SD_MUTEX(un))); 29512 29513 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29514 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29515 29516 /* 29517 * This is the write phase of a read-modify-write request, called 29518 * under the context of a taskq thread in response to the completion 29519 * of the read portion of the rmw request completing under interrupt 29520 * context. The write request must be sent from here down the iostart 29521 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29522 * we use the layer index saved in the layer-private data area. 29523 */ 29524 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29525 29526 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29527 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29528 } 29529 29530 29531 /* 29532 * Function: sddump_do_read_of_rmw() 29533 * 29534 * Description: This routine will be called from sddump, If sddump is called 29535 * with an I/O which not aligned on device blocksize boundary 29536 * then the write has to be converted to read-modify-write. 29537 * Do the read part here in order to keep sddump simple. 29538 * Note - That the sd_mutex is held across the call to this 29539 * routine. 29540 * 29541 * Arguments: un - sd_lun 29542 * blkno - block number in terms of media block size. 29543 * nblk - number of blocks. 29544 * bpp - pointer to pointer to the buf structure. On return 29545 * from this function, *bpp points to the valid buffer 29546 * to which the write has to be done. 29547 * 29548 * Return Code: 0 for success or errno-type return code 29549 */ 29550 29551 static int 29552 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29553 struct buf **bpp) 29554 { 29555 int err; 29556 int i; 29557 int rval; 29558 struct buf *bp; 29559 struct scsi_pkt *pkt = NULL; 29560 uint32_t target_blocksize; 29561 29562 ASSERT(un != NULL); 29563 ASSERT(mutex_owned(SD_MUTEX(un))); 29564 29565 target_blocksize = un->un_tgt_blocksize; 29566 29567 mutex_exit(SD_MUTEX(un)); 29568 29569 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29570 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29571 if (bp == NULL) { 29572 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29573 "no resources for dumping; giving up"); 29574 err = ENOMEM; 29575 goto done; 29576 } 29577 29578 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29579 blkno, nblk); 29580 if (rval != 0) { 29581 scsi_free_consistent_buf(bp); 29582 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29583 "no resources for dumping; giving up"); 29584 err = ENOMEM; 29585 goto done; 29586 } 29587 29588 pkt->pkt_flags |= FLAG_NOINTR; 29589 29590 err = EIO; 29591 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29592 29593 /* 29594 * Scsi_poll returns 0 (success) if the command completes and 29595 * the status block is STATUS_GOOD. We should only check 29596 * errors if this condition is not true. Even then we should 29597 * send our own request sense packet only if we have a check 29598 * condition and auto request sense has not been performed by 29599 * the hba. 29600 */ 29601 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29602 29603 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29604 err = 0; 29605 break; 29606 } 29607 29608 /* 29609 * Check CMD_DEV_GONE 1st, give up if device is gone, 29610 * no need to read RQS data. 29611 */ 29612 if (pkt->pkt_reason == CMD_DEV_GONE) { 29613 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29614 "Error while dumping state with rmw..." 29615 "Device is gone\n"); 29616 break; 29617 } 29618 29619 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29620 SD_INFO(SD_LOG_DUMP, un, 29621 "sddump: read failed with CHECK, try # %d\n", i); 29622 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29623 (void) sd_send_polled_RQS(un); 29624 } 29625 29626 continue; 29627 } 29628 29629 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29630 int reset_retval = 0; 29631 29632 SD_INFO(SD_LOG_DUMP, un, 29633 "sddump: read failed with BUSY, try # %d\n", i); 29634 29635 if (un->un_f_lun_reset_enabled == TRUE) { 29636 reset_retval = scsi_reset(SD_ADDRESS(un), 29637 RESET_LUN); 29638 } 29639 if (reset_retval == 0) { 29640 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29641 } 29642 (void) sd_send_polled_RQS(un); 29643 29644 } else { 29645 SD_INFO(SD_LOG_DUMP, un, 29646 "sddump: read failed with 0x%x, try # %d\n", 29647 SD_GET_PKT_STATUS(pkt), i); 29648 mutex_enter(SD_MUTEX(un)); 29649 sd_reset_target(un, pkt); 29650 mutex_exit(SD_MUTEX(un)); 29651 } 29652 29653 /* 29654 * If we are not getting anywhere with lun/target resets, 29655 * let's reset the bus. 29656 */ 29657 if (i > SD_NDUMP_RETRIES/2) { 29658 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29659 (void) sd_send_polled_RQS(un); 29660 } 29661 29662 } 29663 scsi_destroy_pkt(pkt); 29664 29665 if (err != 0) { 29666 scsi_free_consistent_buf(bp); 29667 *bpp = NULL; 29668 } else { 29669 *bpp = bp; 29670 } 29671 29672 done: 29673 mutex_enter(SD_MUTEX(un)); 29674 return (err); 29675 } 29676 29677 29678 /* 29679 * Function: sd_failfast_flushq 29680 * 29681 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29682 * in b_flags and move them onto the failfast queue, then kick 29683 * off a thread to return all bp's on the failfast queue to 29684 * their owners with an error set. 29685 * 29686 * Arguments: un - pointer to the soft state struct for the instance. 29687 * 29688 * Context: may execute in interrupt context. 29689 */ 29690 29691 static void 29692 sd_failfast_flushq(struct sd_lun *un) 29693 { 29694 struct buf *bp; 29695 struct buf *next_waitq_bp; 29696 struct buf *prev_waitq_bp = NULL; 29697 29698 ASSERT(un != NULL); 29699 ASSERT(mutex_owned(SD_MUTEX(un))); 29700 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29701 ASSERT(un->un_failfast_bp == NULL); 29702 29703 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29704 "sd_failfast_flushq: entry: un:0x%p\n", un); 29705 29706 /* 29707 * Check if we should flush all bufs when entering failfast state, or 29708 * just those with B_FAILFAST set. 29709 */ 29710 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29711 /* 29712 * Move *all* bp's on the wait queue to the failfast flush 29713 * queue, including those that do NOT have B_FAILFAST set. 29714 */ 29715 if (un->un_failfast_headp == NULL) { 29716 ASSERT(un->un_failfast_tailp == NULL); 29717 un->un_failfast_headp = un->un_waitq_headp; 29718 } else { 29719 ASSERT(un->un_failfast_tailp != NULL); 29720 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29721 } 29722 29723 un->un_failfast_tailp = un->un_waitq_tailp; 29724 29725 /* update kstat for each bp moved out of the waitq */ 29726 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29727 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29728 } 29729 29730 /* empty the waitq */ 29731 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29732 29733 } else { 29734 /* 29735 * Go thru the wait queue, pick off all entries with 29736 * B_FAILFAST set, and move these onto the failfast queue. 29737 */ 29738 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29739 /* 29740 * Save the pointer to the next bp on the wait queue, 29741 * so we get to it on the next iteration of this loop. 29742 */ 29743 next_waitq_bp = bp->av_forw; 29744 29745 /* 29746 * If this bp from the wait queue does NOT have 29747 * B_FAILFAST set, just move on to the next element 29748 * in the wait queue. Note, this is the only place 29749 * where it is correct to set prev_waitq_bp. 29750 */ 29751 if ((bp->b_flags & B_FAILFAST) == 0) { 29752 prev_waitq_bp = bp; 29753 continue; 29754 } 29755 29756 /* 29757 * Remove the bp from the wait queue. 29758 */ 29759 if (bp == un->un_waitq_headp) { 29760 /* The bp is the first element of the waitq. */ 29761 un->un_waitq_headp = next_waitq_bp; 29762 if (un->un_waitq_headp == NULL) { 29763 /* The wait queue is now empty */ 29764 un->un_waitq_tailp = NULL; 29765 } 29766 } else { 29767 /* 29768 * The bp is either somewhere in the middle 29769 * or at the end of the wait queue. 29770 */ 29771 ASSERT(un->un_waitq_headp != NULL); 29772 ASSERT(prev_waitq_bp != NULL); 29773 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29774 == 0); 29775 if (bp == un->un_waitq_tailp) { 29776 /* bp is the last entry on the waitq. */ 29777 ASSERT(next_waitq_bp == NULL); 29778 un->un_waitq_tailp = prev_waitq_bp; 29779 } 29780 prev_waitq_bp->av_forw = next_waitq_bp; 29781 } 29782 bp->av_forw = NULL; 29783 29784 /* 29785 * update kstat since the bp is moved out of 29786 * the waitq 29787 */ 29788 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29789 29790 /* 29791 * Now put the bp onto the failfast queue. 29792 */ 29793 if (un->un_failfast_headp == NULL) { 29794 /* failfast queue is currently empty */ 29795 ASSERT(un->un_failfast_tailp == NULL); 29796 un->un_failfast_headp = 29797 un->un_failfast_tailp = bp; 29798 } else { 29799 /* Add the bp to the end of the failfast q */ 29800 ASSERT(un->un_failfast_tailp != NULL); 29801 ASSERT(un->un_failfast_tailp->b_flags & 29802 B_FAILFAST); 29803 un->un_failfast_tailp->av_forw = bp; 29804 un->un_failfast_tailp = bp; 29805 } 29806 } 29807 } 29808 29809 /* 29810 * Now return all bp's on the failfast queue to their owners. 29811 */ 29812 while ((bp = un->un_failfast_headp) != NULL) { 29813 29814 un->un_failfast_headp = bp->av_forw; 29815 if (un->un_failfast_headp == NULL) { 29816 un->un_failfast_tailp = NULL; 29817 } 29818 29819 /* 29820 * We want to return the bp with a failure error code, but 29821 * we do not want a call to sd_start_cmds() to occur here, 29822 * so use sd_return_failed_command_no_restart() instead of 29823 * sd_return_failed_command(). 29824 */ 29825 sd_return_failed_command_no_restart(un, bp, EIO); 29826 } 29827 29828 /* Flush the xbuf queues if required. */ 29829 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29830 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29831 } 29832 29833 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29834 "sd_failfast_flushq: exit: un:0x%p\n", un); 29835 } 29836 29837 29838 /* 29839 * Function: sd_failfast_flushq_callback 29840 * 29841 * Description: Return TRUE if the given bp meets the criteria for failfast 29842 * flushing. Used with ddi_xbuf_flushq(9F). 29843 * 29844 * Arguments: bp - ptr to buf struct to be examined. 29845 * 29846 * Context: Any 29847 */ 29848 29849 static int 29850 sd_failfast_flushq_callback(struct buf *bp) 29851 { 29852 /* 29853 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29854 * state is entered; OR (2) the given bp has B_FAILFAST set. 29855 */ 29856 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29857 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29858 } 29859 29860 29861 29862 /* 29863 * Function: sd_setup_next_xfer 29864 * 29865 * Description: Prepare next I/O operation using DMA_PARTIAL 29866 * 29867 */ 29868 29869 static int 29870 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29871 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29872 { 29873 ssize_t num_blks_not_xfered; 29874 daddr_t strt_blk_num; 29875 ssize_t bytes_not_xfered; 29876 int rval; 29877 29878 ASSERT(pkt->pkt_resid == 0); 29879 29880 /* 29881 * Calculate next block number and amount to be transferred. 29882 * 29883 * How much data NOT transfered to the HBA yet. 29884 */ 29885 bytes_not_xfered = xp->xb_dma_resid; 29886 29887 /* 29888 * figure how many blocks NOT transfered to the HBA yet. 29889 */ 29890 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29891 29892 /* 29893 * set starting block number to the end of what WAS transfered. 29894 */ 29895 strt_blk_num = xp->xb_blkno + 29896 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29897 29898 /* 29899 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29900 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29901 * the disk mutex here. 29902 */ 29903 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29904 strt_blk_num, num_blks_not_xfered); 29905 29906 if (rval == 0) { 29907 29908 /* 29909 * Success. 29910 * 29911 * Adjust things if there are still more blocks to be 29912 * transfered. 29913 */ 29914 xp->xb_dma_resid = pkt->pkt_resid; 29915 pkt->pkt_resid = 0; 29916 29917 return (1); 29918 } 29919 29920 /* 29921 * There's really only one possible return value from 29922 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29923 * returns NULL. 29924 */ 29925 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29926 29927 bp->b_resid = bp->b_bcount; 29928 bp->b_flags |= B_ERROR; 29929 29930 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29931 "Error setting up next portion of DMA transfer\n"); 29932 29933 return (0); 29934 } 29935 29936 /* 29937 * Function: sd_panic_for_res_conflict 29938 * 29939 * Description: Call panic with a string formatted with "Reservation Conflict" 29940 * and a human readable identifier indicating the SD instance 29941 * that experienced the reservation conflict. 29942 * 29943 * Arguments: un - pointer to the soft state struct for the instance. 29944 * 29945 * Context: may execute in interrupt context. 29946 */ 29947 29948 #define SD_RESV_CONFLICT_FMT_LEN 40 29949 void 29950 sd_panic_for_res_conflict(struct sd_lun *un) 29951 { 29952 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 29953 char path_str[MAXPATHLEN]; 29954 29955 (void) snprintf(panic_str, sizeof (panic_str), 29956 "Reservation Conflict\nDisk: %s", 29957 ddi_pathname(SD_DEVINFO(un), path_str)); 29958 29959 panic(panic_str); 29960 } 29961 29962 /* 29963 * Note: The following sd_faultinjection_ioctl( ) routines implement 29964 * driver support for handling fault injection for error analysis 29965 * causing faults in multiple layers of the driver. 29966 * 29967 */ 29968 29969 #ifdef SD_FAULT_INJECTION 29970 static uint_t sd_fault_injection_on = 0; 29971 29972 /* 29973 * Function: sd_faultinjection_ioctl() 29974 * 29975 * Description: This routine is the driver entry point for handling 29976 * faultinjection ioctls to inject errors into the 29977 * layer model 29978 * 29979 * Arguments: cmd - the ioctl cmd received 29980 * arg - the arguments from user and returns 29981 */ 29982 29983 static void 29984 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29985 29986 uint_t i = 0; 29987 uint_t rval; 29988 29989 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29990 29991 mutex_enter(SD_MUTEX(un)); 29992 29993 switch (cmd) { 29994 case SDIOCRUN: 29995 /* Allow pushed faults to be injected */ 29996 SD_INFO(SD_LOG_SDTEST, un, 29997 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29998 29999 sd_fault_injection_on = 1; 30000 30001 SD_INFO(SD_LOG_IOERR, un, 30002 "sd_faultinjection_ioctl: run finished\n"); 30003 break; 30004 30005 case SDIOCSTART: 30006 /* Start Injection Session */ 30007 SD_INFO(SD_LOG_SDTEST, un, 30008 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 30009 30010 sd_fault_injection_on = 0; 30011 un->sd_injection_mask = 0xFFFFFFFF; 30012 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30013 un->sd_fi_fifo_pkt[i] = NULL; 30014 un->sd_fi_fifo_xb[i] = NULL; 30015 un->sd_fi_fifo_un[i] = NULL; 30016 un->sd_fi_fifo_arq[i] = NULL; 30017 } 30018 un->sd_fi_fifo_start = 0; 30019 un->sd_fi_fifo_end = 0; 30020 30021 mutex_enter(&(un->un_fi_mutex)); 30022 un->sd_fi_log[0] = '\0'; 30023 un->sd_fi_buf_len = 0; 30024 mutex_exit(&(un->un_fi_mutex)); 30025 30026 SD_INFO(SD_LOG_IOERR, un, 30027 "sd_faultinjection_ioctl: start finished\n"); 30028 break; 30029 30030 case SDIOCSTOP: 30031 /* Stop Injection Session */ 30032 SD_INFO(SD_LOG_SDTEST, un, 30033 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 30034 sd_fault_injection_on = 0; 30035 un->sd_injection_mask = 0x0; 30036 30037 /* Empty stray or unuseds structs from fifo */ 30038 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30039 if (un->sd_fi_fifo_pkt[i] != NULL) { 30040 kmem_free(un->sd_fi_fifo_pkt[i], 30041 sizeof (struct sd_fi_pkt)); 30042 } 30043 if (un->sd_fi_fifo_xb[i] != NULL) { 30044 kmem_free(un->sd_fi_fifo_xb[i], 30045 sizeof (struct sd_fi_xb)); 30046 } 30047 if (un->sd_fi_fifo_un[i] != NULL) { 30048 kmem_free(un->sd_fi_fifo_un[i], 30049 sizeof (struct sd_fi_un)); 30050 } 30051 if (un->sd_fi_fifo_arq[i] != NULL) { 30052 kmem_free(un->sd_fi_fifo_arq[i], 30053 sizeof (struct sd_fi_arq)); 30054 } 30055 un->sd_fi_fifo_pkt[i] = NULL; 30056 un->sd_fi_fifo_un[i] = NULL; 30057 un->sd_fi_fifo_xb[i] = NULL; 30058 un->sd_fi_fifo_arq[i] = NULL; 30059 } 30060 un->sd_fi_fifo_start = 0; 30061 un->sd_fi_fifo_end = 0; 30062 30063 SD_INFO(SD_LOG_IOERR, un, 30064 "sd_faultinjection_ioctl: stop finished\n"); 30065 break; 30066 30067 case SDIOCINSERTPKT: 30068 /* Store a packet struct to be pushed onto fifo */ 30069 SD_INFO(SD_LOG_SDTEST, un, 30070 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 30071 30072 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30073 30074 sd_fault_injection_on = 0; 30075 30076 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 30077 if (un->sd_fi_fifo_pkt[i] != NULL) { 30078 kmem_free(un->sd_fi_fifo_pkt[i], 30079 sizeof (struct sd_fi_pkt)); 30080 } 30081 if (arg != NULL) { 30082 un->sd_fi_fifo_pkt[i] = 30083 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 30084 if (un->sd_fi_fifo_pkt[i] == NULL) { 30085 /* Alloc failed don't store anything */ 30086 break; 30087 } 30088 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 30089 sizeof (struct sd_fi_pkt), 0); 30090 if (rval == -1) { 30091 kmem_free(un->sd_fi_fifo_pkt[i], 30092 sizeof (struct sd_fi_pkt)); 30093 un->sd_fi_fifo_pkt[i] = NULL; 30094 } 30095 } else { 30096 SD_INFO(SD_LOG_IOERR, un, 30097 "sd_faultinjection_ioctl: pkt null\n"); 30098 } 30099 break; 30100 30101 case SDIOCINSERTXB: 30102 /* Store a xb struct to be pushed onto fifo */ 30103 SD_INFO(SD_LOG_SDTEST, un, 30104 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 30105 30106 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30107 30108 sd_fault_injection_on = 0; 30109 30110 if (un->sd_fi_fifo_xb[i] != NULL) { 30111 kmem_free(un->sd_fi_fifo_xb[i], 30112 sizeof (struct sd_fi_xb)); 30113 un->sd_fi_fifo_xb[i] = NULL; 30114 } 30115 if (arg != NULL) { 30116 un->sd_fi_fifo_xb[i] = 30117 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 30118 if (un->sd_fi_fifo_xb[i] == NULL) { 30119 /* Alloc failed don't store anything */ 30120 break; 30121 } 30122 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 30123 sizeof (struct sd_fi_xb), 0); 30124 30125 if (rval == -1) { 30126 kmem_free(un->sd_fi_fifo_xb[i], 30127 sizeof (struct sd_fi_xb)); 30128 un->sd_fi_fifo_xb[i] = NULL; 30129 } 30130 } else { 30131 SD_INFO(SD_LOG_IOERR, un, 30132 "sd_faultinjection_ioctl: xb null\n"); 30133 } 30134 break; 30135 30136 case SDIOCINSERTUN: 30137 /* Store a un struct to be pushed onto fifo */ 30138 SD_INFO(SD_LOG_SDTEST, un, 30139 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 30140 30141 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30142 30143 sd_fault_injection_on = 0; 30144 30145 if (un->sd_fi_fifo_un[i] != NULL) { 30146 kmem_free(un->sd_fi_fifo_un[i], 30147 sizeof (struct sd_fi_un)); 30148 un->sd_fi_fifo_un[i] = NULL; 30149 } 30150 if (arg != NULL) { 30151 un->sd_fi_fifo_un[i] = 30152 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 30153 if (un->sd_fi_fifo_un[i] == NULL) { 30154 /* Alloc failed don't store anything */ 30155 break; 30156 } 30157 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 30158 sizeof (struct sd_fi_un), 0); 30159 if (rval == -1) { 30160 kmem_free(un->sd_fi_fifo_un[i], 30161 sizeof (struct sd_fi_un)); 30162 un->sd_fi_fifo_un[i] = NULL; 30163 } 30164 30165 } else { 30166 SD_INFO(SD_LOG_IOERR, un, 30167 "sd_faultinjection_ioctl: un null\n"); 30168 } 30169 30170 break; 30171 30172 case SDIOCINSERTARQ: 30173 /* Store a arq struct to be pushed onto fifo */ 30174 SD_INFO(SD_LOG_SDTEST, un, 30175 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 30176 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30177 30178 sd_fault_injection_on = 0; 30179 30180 if (un->sd_fi_fifo_arq[i] != NULL) { 30181 kmem_free(un->sd_fi_fifo_arq[i], 30182 sizeof (struct sd_fi_arq)); 30183 un->sd_fi_fifo_arq[i] = NULL; 30184 } 30185 if (arg != NULL) { 30186 un->sd_fi_fifo_arq[i] = 30187 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 30188 if (un->sd_fi_fifo_arq[i] == NULL) { 30189 /* Alloc failed don't store anything */ 30190 break; 30191 } 30192 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 30193 sizeof (struct sd_fi_arq), 0); 30194 if (rval == -1) { 30195 kmem_free(un->sd_fi_fifo_arq[i], 30196 sizeof (struct sd_fi_arq)); 30197 un->sd_fi_fifo_arq[i] = NULL; 30198 } 30199 30200 } else { 30201 SD_INFO(SD_LOG_IOERR, un, 30202 "sd_faultinjection_ioctl: arq null\n"); 30203 } 30204 30205 break; 30206 30207 case SDIOCPUSH: 30208 /* Push stored xb, pkt, un, and arq onto fifo */ 30209 sd_fault_injection_on = 0; 30210 30211 if (arg != NULL) { 30212 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 30213 if (rval != -1 && 30214 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30215 un->sd_fi_fifo_end += i; 30216 } 30217 } else { 30218 SD_INFO(SD_LOG_IOERR, un, 30219 "sd_faultinjection_ioctl: push arg null\n"); 30220 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30221 un->sd_fi_fifo_end++; 30222 } 30223 } 30224 SD_INFO(SD_LOG_IOERR, un, 30225 "sd_faultinjection_ioctl: push to end=%d\n", 30226 un->sd_fi_fifo_end); 30227 break; 30228 30229 case SDIOCRETRIEVE: 30230 /* Return buffer of log from Injection session */ 30231 SD_INFO(SD_LOG_SDTEST, un, 30232 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 30233 30234 sd_fault_injection_on = 0; 30235 30236 mutex_enter(&(un->un_fi_mutex)); 30237 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 30238 un->sd_fi_buf_len+1, 0); 30239 mutex_exit(&(un->un_fi_mutex)); 30240 30241 if (rval == -1) { 30242 /* 30243 * arg is possibly invalid setting 30244 * it to NULL for return 30245 */ 30246 arg = NULL; 30247 } 30248 break; 30249 } 30250 30251 mutex_exit(SD_MUTEX(un)); 30252 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 30253 " exit\n"); 30254 } 30255 30256 30257 /* 30258 * Function: sd_injection_log() 30259 * 30260 * Description: This routine adds buff to the already existing injection log 30261 * for retrieval via faultinjection_ioctl for use in fault 30262 * detection and recovery 30263 * 30264 * Arguments: buf - the string to add to the log 30265 */ 30266 30267 static void 30268 sd_injection_log(char *buf, struct sd_lun *un) 30269 { 30270 uint_t len; 30271 30272 ASSERT(un != NULL); 30273 ASSERT(buf != NULL); 30274 30275 mutex_enter(&(un->un_fi_mutex)); 30276 30277 len = min(strlen(buf), 255); 30278 /* Add logged value to Injection log to be returned later */ 30279 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 30280 uint_t offset = strlen((char *)un->sd_fi_log); 30281 char *destp = (char *)un->sd_fi_log + offset; 30282 int i; 30283 for (i = 0; i < len; i++) { 30284 *destp++ = *buf++; 30285 } 30286 un->sd_fi_buf_len += len; 30287 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30288 } 30289 30290 mutex_exit(&(un->un_fi_mutex)); 30291 } 30292 30293 30294 /* 30295 * Function: sd_faultinjection() 30296 * 30297 * Description: This routine takes the pkt and changes its 30298 * content based on error injection scenerio. 30299 * 30300 * Arguments: pktp - packet to be changed 30301 */ 30302 30303 static void 30304 sd_faultinjection(struct scsi_pkt *pktp) 30305 { 30306 uint_t i; 30307 struct sd_fi_pkt *fi_pkt; 30308 struct sd_fi_xb *fi_xb; 30309 struct sd_fi_un *fi_un; 30310 struct sd_fi_arq *fi_arq; 30311 struct buf *bp; 30312 struct sd_xbuf *xb; 30313 struct sd_lun *un; 30314 30315 ASSERT(pktp != NULL); 30316 30317 /* pull bp xb and un from pktp */ 30318 bp = (struct buf *)pktp->pkt_private; 30319 xb = SD_GET_XBUF(bp); 30320 un = SD_GET_UN(bp); 30321 30322 ASSERT(un != NULL); 30323 30324 mutex_enter(SD_MUTEX(un)); 30325 30326 SD_TRACE(SD_LOG_SDTEST, un, 30327 "sd_faultinjection: entry Injection from sdintr\n"); 30328 30329 /* if injection is off return */ 30330 if (sd_fault_injection_on == 0 || 30331 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30332 mutex_exit(SD_MUTEX(un)); 30333 return; 30334 } 30335 30336 SD_INFO(SD_LOG_SDTEST, un, 30337 "sd_faultinjection: is working for copying\n"); 30338 30339 /* take next set off fifo */ 30340 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30341 30342 fi_pkt = un->sd_fi_fifo_pkt[i]; 30343 fi_xb = un->sd_fi_fifo_xb[i]; 30344 fi_un = un->sd_fi_fifo_un[i]; 30345 fi_arq = un->sd_fi_fifo_arq[i]; 30346 30347 30348 /* set variables accordingly */ 30349 /* set pkt if it was on fifo */ 30350 if (fi_pkt != NULL) { 30351 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30352 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30353 if (fi_pkt->pkt_cdbp != 0xff) 30354 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30355 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30356 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30357 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30358 30359 } 30360 /* set xb if it was on fifo */ 30361 if (fi_xb != NULL) { 30362 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30363 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30364 if (fi_xb->xb_retry_count != 0) 30365 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30366 SD_CONDSET(xb, xb, xb_victim_retry_count, 30367 "xb_victim_retry_count"); 30368 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30369 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30370 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30371 30372 /* copy in block data from sense */ 30373 /* 30374 * if (fi_xb->xb_sense_data[0] != -1) { 30375 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30376 * SENSE_LENGTH); 30377 * } 30378 */ 30379 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 30380 30381 /* copy in extended sense codes */ 30382 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30383 xb, es_code, "es_code"); 30384 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30385 xb, es_key, "es_key"); 30386 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30387 xb, es_add_code, "es_add_code"); 30388 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30389 xb, es_qual_code, "es_qual_code"); 30390 struct scsi_extended_sense *esp; 30391 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 30392 esp->es_class = CLASS_EXTENDED_SENSE; 30393 } 30394 30395 /* set un if it was on fifo */ 30396 if (fi_un != NULL) { 30397 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30398 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30399 SD_CONDSET(un, un, un_reset_retry_count, 30400 "un_reset_retry_count"); 30401 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30402 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30403 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30404 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30405 "un_f_allow_bus_device_reset"); 30406 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30407 30408 } 30409 30410 /* copy in auto request sense if it was on fifo */ 30411 if (fi_arq != NULL) { 30412 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30413 } 30414 30415 /* free structs */ 30416 if (un->sd_fi_fifo_pkt[i] != NULL) { 30417 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30418 } 30419 if (un->sd_fi_fifo_xb[i] != NULL) { 30420 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30421 } 30422 if (un->sd_fi_fifo_un[i] != NULL) { 30423 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30424 } 30425 if (un->sd_fi_fifo_arq[i] != NULL) { 30426 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30427 } 30428 30429 /* 30430 * kmem_free does not gurantee to set to NULL 30431 * since we uses these to determine if we set 30432 * values or not lets confirm they are always 30433 * NULL after free 30434 */ 30435 un->sd_fi_fifo_pkt[i] = NULL; 30436 un->sd_fi_fifo_un[i] = NULL; 30437 un->sd_fi_fifo_xb[i] = NULL; 30438 un->sd_fi_fifo_arq[i] = NULL; 30439 30440 un->sd_fi_fifo_start++; 30441 30442 mutex_exit(SD_MUTEX(un)); 30443 30444 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30445 } 30446 30447 #endif /* SD_FAULT_INJECTION */ 30448 30449 /* 30450 * This routine is invoked in sd_unit_attach(). Before calling it, the 30451 * properties in conf file should be processed already, and "hotpluggable" 30452 * property was processed also. 30453 * 30454 * The sd driver distinguishes 3 different type of devices: removable media, 30455 * non-removable media, and hotpluggable. Below the differences are defined: 30456 * 30457 * 1. Device ID 30458 * 30459 * The device ID of a device is used to identify this device. Refer to 30460 * ddi_devid_register(9F). 30461 * 30462 * For a non-removable media disk device which can provide 0x80 or 0x83 30463 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30464 * device ID is created to identify this device. For other non-removable 30465 * media devices, a default device ID is created only if this device has 30466 * at least 2 alter cylinders. Otherwise, this device has no devid. 30467 * 30468 * ------------------------------------------------------- 30469 * removable media hotpluggable | Can Have Device ID 30470 * ------------------------------------------------------- 30471 * false false | Yes 30472 * false true | Yes 30473 * true x | No 30474 * ------------------------------------------------------ 30475 * 30476 * 30477 * 2. SCSI group 4 commands 30478 * 30479 * In SCSI specs, only some commands in group 4 command set can use 30480 * 8-byte addresses that can be used to access >2TB storage spaces. 30481 * Other commands have no such capability. Without supporting group4, 30482 * it is impossible to make full use of storage spaces of a disk with 30483 * capacity larger than 2TB. 30484 * 30485 * ----------------------------------------------- 30486 * removable media hotpluggable LP64 | Group 30487 * ----------------------------------------------- 30488 * false false false | 1 30489 * false false true | 4 30490 * false true false | 1 30491 * false true true | 4 30492 * true x x | 5 30493 * ----------------------------------------------- 30494 * 30495 * 30496 * 3. Check for VTOC Label 30497 * 30498 * If a direct-access disk has no EFI label, sd will check if it has a 30499 * valid VTOC label. Now, sd also does that check for removable media 30500 * and hotpluggable devices. 30501 * 30502 * -------------------------------------------------------------- 30503 * Direct-Access removable media hotpluggable | Check Label 30504 * ------------------------------------------------------------- 30505 * false false false | No 30506 * false false true | No 30507 * false true false | Yes 30508 * false true true | Yes 30509 * true x x | Yes 30510 * -------------------------------------------------------------- 30511 * 30512 * 30513 * 4. Building default VTOC label 30514 * 30515 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30516 * If those devices have no valid VTOC label, sd(7d) will attempt to 30517 * create default VTOC for them. Currently sd creates default VTOC label 30518 * for all devices on x86 platform (VTOC_16), but only for removable 30519 * media devices on SPARC (VTOC_8). 30520 * 30521 * ----------------------------------------------------------- 30522 * removable media hotpluggable platform | Default Label 30523 * ----------------------------------------------------------- 30524 * false false sparc | No 30525 * false true x86 | Yes 30526 * false true sparc | Yes 30527 * true x x | Yes 30528 * ---------------------------------------------------------- 30529 * 30530 * 30531 * 5. Supported blocksizes of target devices 30532 * 30533 * Sd supports non-512-byte blocksize for removable media devices only. 30534 * For other devices, only 512-byte blocksize is supported. This may be 30535 * changed in near future because some RAID devices require non-512-byte 30536 * blocksize 30537 * 30538 * ----------------------------------------------------------- 30539 * removable media hotpluggable | non-512-byte blocksize 30540 * ----------------------------------------------------------- 30541 * false false | No 30542 * false true | No 30543 * true x | Yes 30544 * ----------------------------------------------------------- 30545 * 30546 * 30547 * 6. Automatic mount & unmount 30548 * 30549 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30550 * if a device is removable media device. It return 1 for removable media 30551 * devices, and 0 for others. 30552 * 30553 * The automatic mounting subsystem should distinguish between the types 30554 * of devices and apply automounting policies to each. 30555 * 30556 * 30557 * 7. fdisk partition management 30558 * 30559 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30560 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30561 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30562 * fdisk partitions on both x86 and SPARC platform. 30563 * 30564 * ----------------------------------------------------------- 30565 * platform removable media USB/1394 | fdisk supported 30566 * ----------------------------------------------------------- 30567 * x86 X X | true 30568 * ------------------------------------------------------------ 30569 * sparc X X | false 30570 * ------------------------------------------------------------ 30571 * 30572 * 30573 * 8. MBOOT/MBR 30574 * 30575 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30576 * read/write mboot for removable media devices on sparc platform. 30577 * 30578 * ----------------------------------------------------------- 30579 * platform removable media USB/1394 | mboot supported 30580 * ----------------------------------------------------------- 30581 * x86 X X | true 30582 * ------------------------------------------------------------ 30583 * sparc false false | false 30584 * sparc false true | true 30585 * sparc true false | true 30586 * sparc true true | true 30587 * ------------------------------------------------------------ 30588 * 30589 * 30590 * 9. error handling during opening device 30591 * 30592 * If failed to open a disk device, an errno is returned. For some kinds 30593 * of errors, different errno is returned depending on if this device is 30594 * a removable media device. This brings USB/1394 hard disks in line with 30595 * expected hard disk behavior. It is not expected that this breaks any 30596 * application. 30597 * 30598 * ------------------------------------------------------ 30599 * removable media hotpluggable | errno 30600 * ------------------------------------------------------ 30601 * false false | EIO 30602 * false true | EIO 30603 * true x | ENXIO 30604 * ------------------------------------------------------ 30605 * 30606 * 30607 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30608 * 30609 * These IOCTLs are applicable only to removable media devices. 30610 * 30611 * ----------------------------------------------------------- 30612 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30613 * ----------------------------------------------------------- 30614 * false false | No 30615 * false true | No 30616 * true x | Yes 30617 * ----------------------------------------------------------- 30618 * 30619 * 30620 * 12. Kstats for partitions 30621 * 30622 * sd creates partition kstat for non-removable media devices. USB and 30623 * Firewire hard disks now have partition kstats 30624 * 30625 * ------------------------------------------------------ 30626 * removable media hotpluggable | kstat 30627 * ------------------------------------------------------ 30628 * false false | Yes 30629 * false true | Yes 30630 * true x | No 30631 * ------------------------------------------------------ 30632 * 30633 * 30634 * 13. Removable media & hotpluggable properties 30635 * 30636 * Sd driver creates a "removable-media" property for removable media 30637 * devices. Parent nexus drivers create a "hotpluggable" property if 30638 * it supports hotplugging. 30639 * 30640 * --------------------------------------------------------------------- 30641 * removable media hotpluggable | "removable-media" " hotpluggable" 30642 * --------------------------------------------------------------------- 30643 * false false | No No 30644 * false true | No Yes 30645 * true false | Yes No 30646 * true true | Yes Yes 30647 * --------------------------------------------------------------------- 30648 * 30649 * 30650 * 14. Power Management 30651 * 30652 * sd only power manages removable media devices or devices that support 30653 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30654 * 30655 * A parent nexus that supports hotplugging can also set "pm-capable" 30656 * if the disk can be power managed. 30657 * 30658 * ------------------------------------------------------------ 30659 * removable media hotpluggable pm-capable | power manage 30660 * ------------------------------------------------------------ 30661 * false false false | No 30662 * false false true | Yes 30663 * false true false | No 30664 * false true true | Yes 30665 * true x x | Yes 30666 * ------------------------------------------------------------ 30667 * 30668 * USB and firewire hard disks can now be power managed independently 30669 * of the framebuffer 30670 * 30671 * 30672 * 15. Support for USB disks with capacity larger than 1TB 30673 * 30674 * Currently, sd doesn't permit a fixed disk device with capacity 30675 * larger than 1TB to be used in a 32-bit operating system environment. 30676 * However, sd doesn't do that for removable media devices. Instead, it 30677 * assumes that removable media devices cannot have a capacity larger 30678 * than 1TB. Therefore, using those devices on 32-bit system is partially 30679 * supported, which can cause some unexpected results. 30680 * 30681 * --------------------------------------------------------------------- 30682 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30683 * --------------------------------------------------------------------- 30684 * false false | true | no 30685 * false true | true | no 30686 * true false | true | Yes 30687 * true true | true | Yes 30688 * --------------------------------------------------------------------- 30689 * 30690 * 30691 * 16. Check write-protection at open time 30692 * 30693 * When a removable media device is being opened for writing without NDELAY 30694 * flag, sd will check if this device is writable. If attempting to open 30695 * without NDELAY flag a write-protected device, this operation will abort. 30696 * 30697 * ------------------------------------------------------------ 30698 * removable media USB/1394 | WP Check 30699 * ------------------------------------------------------------ 30700 * false false | No 30701 * false true | No 30702 * true false | Yes 30703 * true true | Yes 30704 * ------------------------------------------------------------ 30705 * 30706 * 30707 * 17. syslog when corrupted VTOC is encountered 30708 * 30709 * Currently, if an invalid VTOC is encountered, sd only print syslog 30710 * for fixed SCSI disks. 30711 * ------------------------------------------------------------ 30712 * removable media USB/1394 | print syslog 30713 * ------------------------------------------------------------ 30714 * false false | Yes 30715 * false true | No 30716 * true false | No 30717 * true true | No 30718 * ------------------------------------------------------------ 30719 */ 30720 static void 30721 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30722 { 30723 int pm_cap; 30724 30725 ASSERT(un->un_sd); 30726 ASSERT(un->un_sd->sd_inq); 30727 30728 /* 30729 * Enable SYNC CACHE support for all devices. 30730 */ 30731 un->un_f_sync_cache_supported = TRUE; 30732 30733 /* 30734 * Set the sync cache required flag to false. 30735 * This would ensure that there is no SYNC CACHE 30736 * sent when there are no writes 30737 */ 30738 un->un_f_sync_cache_required = FALSE; 30739 30740 if (un->un_sd->sd_inq->inq_rmb) { 30741 /* 30742 * The media of this device is removable. And for this kind 30743 * of devices, it is possible to change medium after opening 30744 * devices. Thus we should support this operation. 30745 */ 30746 un->un_f_has_removable_media = TRUE; 30747 30748 /* 30749 * support non-512-byte blocksize of removable media devices 30750 */ 30751 un->un_f_non_devbsize_supported = TRUE; 30752 30753 /* 30754 * Assume that all removable media devices support DOOR_LOCK 30755 */ 30756 un->un_f_doorlock_supported = TRUE; 30757 30758 /* 30759 * For a removable media device, it is possible to be opened 30760 * with NDELAY flag when there is no media in drive, in this 30761 * case we don't care if device is writable. But if without 30762 * NDELAY flag, we need to check if media is write-protected. 30763 */ 30764 un->un_f_chk_wp_open = TRUE; 30765 30766 /* 30767 * need to start a SCSI watch thread to monitor media state, 30768 * when media is being inserted or ejected, notify syseventd. 30769 */ 30770 un->un_f_monitor_media_state = TRUE; 30771 30772 /* 30773 * Some devices don't support START_STOP_UNIT command. 30774 * Therefore, we'd better check if a device supports it 30775 * before sending it. 30776 */ 30777 un->un_f_check_start_stop = TRUE; 30778 30779 /* 30780 * support eject media ioctl: 30781 * FDEJECT, DKIOCEJECT, CDROMEJECT 30782 */ 30783 un->un_f_eject_media_supported = TRUE; 30784 30785 /* 30786 * Because many removable-media devices don't support 30787 * LOG_SENSE, we couldn't use this command to check if 30788 * a removable media device support power-management. 30789 * We assume that they support power-management via 30790 * START_STOP_UNIT command and can be spun up and down 30791 * without limitations. 30792 */ 30793 un->un_f_pm_supported = TRUE; 30794 30795 /* 30796 * Need to create a zero length (Boolean) property 30797 * removable-media for the removable media devices. 30798 * Note that the return value of the property is not being 30799 * checked, since if unable to create the property 30800 * then do not want the attach to fail altogether. Consistent 30801 * with other property creation in attach. 30802 */ 30803 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30804 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30805 30806 } else { 30807 /* 30808 * create device ID for device 30809 */ 30810 un->un_f_devid_supported = TRUE; 30811 30812 /* 30813 * Spin up non-removable-media devices once it is attached 30814 */ 30815 un->un_f_attach_spinup = TRUE; 30816 30817 /* 30818 * According to SCSI specification, Sense data has two kinds of 30819 * format: fixed format, and descriptor format. At present, we 30820 * don't support descriptor format sense data for removable 30821 * media. 30822 */ 30823 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30824 un->un_f_descr_format_supported = TRUE; 30825 } 30826 30827 /* 30828 * kstats are created only for non-removable media devices. 30829 * 30830 * Set this in sd.conf to 0 in order to disable kstats. The 30831 * default is 1, so they are enabled by default. 30832 */ 30833 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30834 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30835 "enable-partition-kstats", 1)); 30836 30837 /* 30838 * Check if HBA has set the "pm-capable" property. 30839 * If "pm-capable" exists and is non-zero then we can 30840 * power manage the device without checking the start/stop 30841 * cycle count log sense page. 30842 * 30843 * If "pm-capable" exists and is set to be false (0), 30844 * then we should not power manage the device. 30845 * 30846 * If "pm-capable" doesn't exist then pm_cap will 30847 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30848 * sd will check the start/stop cycle count log sense page 30849 * and power manage the device if the cycle count limit has 30850 * not been exceeded. 30851 */ 30852 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30853 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30854 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) { 30855 un->un_f_log_sense_supported = TRUE; 30856 if (!un->un_f_power_condition_disabled && 30857 SD_INQUIRY(un)->inq_ansi == 6) { 30858 un->un_f_power_condition_supported = TRUE; 30859 } 30860 } else { 30861 /* 30862 * pm-capable property exists. 30863 * 30864 * Convert "TRUE" values for pm_cap to 30865 * SD_PM_CAPABLE_IS_TRUE to make it easier to check 30866 * later. "TRUE" values are any values defined in 30867 * inquiry.h. 30868 */ 30869 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) { 30870 un->un_f_log_sense_supported = FALSE; 30871 } else { 30872 /* SD_PM_CAPABLE_IS_TRUE case */ 30873 un->un_f_pm_supported = TRUE; 30874 if (!un->un_f_power_condition_disabled && 30875 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) { 30876 un->un_f_power_condition_supported = 30877 TRUE; 30878 } 30879 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) { 30880 un->un_f_log_sense_supported = TRUE; 30881 un->un_f_pm_log_sense_smart = 30882 SD_PM_CAP_SMART_LOG(pm_cap); 30883 } 30884 } 30885 30886 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30887 "sd_unit_attach: un:0x%p pm-capable " 30888 "property set to %d.\n", un, un->un_f_pm_supported); 30889 } 30890 } 30891 30892 if (un->un_f_is_hotpluggable) { 30893 30894 /* 30895 * Have to watch hotpluggable devices as well, since 30896 * that's the only way for userland applications to 30897 * detect hot removal while device is busy/mounted. 30898 */ 30899 un->un_f_monitor_media_state = TRUE; 30900 30901 un->un_f_check_start_stop = TRUE; 30902 30903 } 30904 } 30905 30906 /* 30907 * sd_tg_rdwr: 30908 * Provides rdwr access for cmlb via sd_tgops. The start_block is 30909 * in sys block size, req_length in bytes. 30910 * 30911 */ 30912 static int 30913 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 30914 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 30915 { 30916 struct sd_lun *un; 30917 int path_flag = (int)(uintptr_t)tg_cookie; 30918 char *dkl = NULL; 30919 diskaddr_t real_addr = start_block; 30920 diskaddr_t first_byte, end_block; 30921 30922 size_t buffer_size = reqlength; 30923 int rval = 0; 30924 diskaddr_t cap; 30925 uint32_t lbasize; 30926 sd_ssc_t *ssc; 30927 30928 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30929 if (un == NULL) 30930 return (ENXIO); 30931 30932 if (cmd != TG_READ && cmd != TG_WRITE) 30933 return (EINVAL); 30934 30935 ssc = sd_ssc_init(un); 30936 mutex_enter(SD_MUTEX(un)); 30937 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 30938 mutex_exit(SD_MUTEX(un)); 30939 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30940 &lbasize, path_flag); 30941 if (rval != 0) 30942 goto done1; 30943 mutex_enter(SD_MUTEX(un)); 30944 sd_update_block_info(un, lbasize, cap); 30945 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 30946 mutex_exit(SD_MUTEX(un)); 30947 rval = EIO; 30948 goto done; 30949 } 30950 } 30951 30952 if (NOT_DEVBSIZE(un)) { 30953 /* 30954 * sys_blocksize != tgt_blocksize, need to re-adjust 30955 * blkno and save the index to beginning of dk_label 30956 */ 30957 first_byte = SD_SYSBLOCKS2BYTES(start_block); 30958 real_addr = first_byte / un->un_tgt_blocksize; 30959 30960 end_block = (first_byte + reqlength + 30961 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 30962 30963 /* round up buffer size to multiple of target block size */ 30964 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 30965 30966 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 30967 "label_addr: 0x%x allocation size: 0x%x\n", 30968 real_addr, buffer_size); 30969 30970 if (((first_byte % un->un_tgt_blocksize) != 0) || 30971 (reqlength % un->un_tgt_blocksize) != 0) 30972 /* the request is not aligned */ 30973 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 30974 } 30975 30976 /* 30977 * The MMC standard allows READ CAPACITY to be 30978 * inaccurate by a bounded amount (in the interest of 30979 * response latency). As a result, failed READs are 30980 * commonplace (due to the reading of metadata and not 30981 * data). Depending on the per-Vendor/drive Sense data, 30982 * the failed READ can cause many (unnecessary) retries. 30983 */ 30984 30985 if (ISCD(un) && (cmd == TG_READ) && 30986 (un->un_f_blockcount_is_valid == TRUE) && 30987 ((start_block == (un->un_blockcount - 1))|| 30988 (start_block == (un->un_blockcount - 2)))) { 30989 path_flag = SD_PATH_DIRECT_PRIORITY; 30990 } 30991 30992 mutex_exit(SD_MUTEX(un)); 30993 if (cmd == TG_READ) { 30994 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 30995 buffer_size, real_addr, path_flag); 30996 if (dkl != NULL) 30997 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 30998 real_addr), bufaddr, reqlength); 30999 } else { 31000 if (dkl) { 31001 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 31002 real_addr, path_flag); 31003 if (rval) { 31004 goto done1; 31005 } 31006 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 31007 real_addr), reqlength); 31008 } 31009 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 31010 buffer_size, real_addr, path_flag); 31011 } 31012 31013 done1: 31014 if (dkl != NULL) 31015 kmem_free(dkl, buffer_size); 31016 31017 if (rval != 0) { 31018 if (rval == EIO) 31019 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 31020 else 31021 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31022 } 31023 done: 31024 sd_ssc_fini(ssc); 31025 return (rval); 31026 } 31027 31028 31029 static int 31030 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 31031 { 31032 31033 struct sd_lun *un; 31034 diskaddr_t cap; 31035 uint32_t lbasize; 31036 int path_flag = (int)(uintptr_t)tg_cookie; 31037 int ret = 0; 31038 31039 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31040 if (un == NULL) 31041 return (ENXIO); 31042 31043 switch (cmd) { 31044 case TG_GETPHYGEOM: 31045 case TG_GETVIRTGEOM: 31046 case TG_GETCAPACITY: 31047 case TG_GETBLOCKSIZE: 31048 mutex_enter(SD_MUTEX(un)); 31049 31050 if ((un->un_f_blockcount_is_valid == TRUE) && 31051 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 31052 cap = un->un_blockcount; 31053 lbasize = un->un_tgt_blocksize; 31054 mutex_exit(SD_MUTEX(un)); 31055 } else { 31056 sd_ssc_t *ssc; 31057 mutex_exit(SD_MUTEX(un)); 31058 ssc = sd_ssc_init(un); 31059 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31060 &lbasize, path_flag); 31061 if (ret != 0) { 31062 if (ret == EIO) 31063 sd_ssc_assessment(ssc, 31064 SD_FMT_STATUS_CHECK); 31065 else 31066 sd_ssc_assessment(ssc, 31067 SD_FMT_IGNORE); 31068 sd_ssc_fini(ssc); 31069 return (ret); 31070 } 31071 sd_ssc_fini(ssc); 31072 mutex_enter(SD_MUTEX(un)); 31073 sd_update_block_info(un, lbasize, cap); 31074 if ((un->un_f_blockcount_is_valid == FALSE) || 31075 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 31076 mutex_exit(SD_MUTEX(un)); 31077 return (EIO); 31078 } 31079 mutex_exit(SD_MUTEX(un)); 31080 } 31081 31082 if (cmd == TG_GETCAPACITY) { 31083 *(diskaddr_t *)arg = cap; 31084 return (0); 31085 } 31086 31087 if (cmd == TG_GETBLOCKSIZE) { 31088 *(uint32_t *)arg = lbasize; 31089 return (0); 31090 } 31091 31092 if (cmd == TG_GETPHYGEOM) 31093 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 31094 cap, lbasize, path_flag); 31095 else 31096 /* TG_GETVIRTGEOM */ 31097 ret = sd_get_virtual_geometry(un, 31098 (cmlb_geom_t *)arg, cap, lbasize); 31099 31100 return (ret); 31101 31102 case TG_GETATTR: 31103 mutex_enter(SD_MUTEX(un)); 31104 ((tg_attribute_t *)arg)->media_is_writable = 31105 un->un_f_mmc_writable_media; 31106 ((tg_attribute_t *)arg)->media_is_solid_state = 31107 un->un_f_is_solid_state; 31108 mutex_exit(SD_MUTEX(un)); 31109 return (0); 31110 default: 31111 return (ENOTTY); 31112 31113 } 31114 } 31115 31116 /* 31117 * Function: sd_ssc_ereport_post 31118 * 31119 * Description: Will be called when SD driver need to post an ereport. 31120 * 31121 * Context: Kernel thread or interrupt context. 31122 */ 31123 31124 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown" 31125 31126 static void 31127 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 31128 { 31129 int uscsi_path_instance = 0; 31130 uchar_t uscsi_pkt_reason; 31131 uint32_t uscsi_pkt_state; 31132 uint32_t uscsi_pkt_statistics; 31133 uint64_t uscsi_ena; 31134 uchar_t op_code; 31135 uint8_t *sensep; 31136 union scsi_cdb *cdbp; 31137 uint_t cdblen = 0; 31138 uint_t senlen = 0; 31139 struct sd_lun *un; 31140 dev_info_t *dip; 31141 char *devid; 31142 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 31143 SSC_FLAGS_INVALID_STATUS | 31144 SSC_FLAGS_INVALID_SENSE | 31145 SSC_FLAGS_INVALID_DATA; 31146 char assessment[16]; 31147 31148 ASSERT(ssc != NULL); 31149 ASSERT(ssc->ssc_uscsi_cmd != NULL); 31150 ASSERT(ssc->ssc_uscsi_info != NULL); 31151 31152 un = ssc->ssc_un; 31153 ASSERT(un != NULL); 31154 31155 dip = un->un_sd->sd_dev; 31156 31157 /* 31158 * Get the devid: 31159 * devid will only be passed to non-transport error reports. 31160 */ 31161 devid = DEVI(dip)->devi_devid_str; 31162 31163 /* 31164 * If we are syncing or dumping, the command will not be executed 31165 * so we bypass this situation. 31166 */ 31167 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 31168 (un->un_state == SD_STATE_DUMPING)) 31169 return; 31170 31171 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 31172 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 31173 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 31174 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 31175 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 31176 31177 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 31178 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 31179 31180 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 31181 if (cdbp == NULL) { 31182 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 31183 "sd_ssc_ereport_post meet empty cdb\n"); 31184 return; 31185 } 31186 31187 op_code = cdbp->scc_cmd; 31188 31189 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 31190 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 31191 ssc->ssc_uscsi_cmd->uscsi_rqresid); 31192 31193 if (senlen > 0) 31194 ASSERT(sensep != NULL); 31195 31196 /* 31197 * Initialize drv_assess to corresponding values. 31198 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 31199 * on the sense-key returned back. 31200 */ 31201 switch (drv_assess) { 31202 case SD_FM_DRV_RECOVERY: 31203 (void) sprintf(assessment, "%s", "recovered"); 31204 break; 31205 case SD_FM_DRV_RETRY: 31206 (void) sprintf(assessment, "%s", "retry"); 31207 break; 31208 case SD_FM_DRV_NOTICE: 31209 (void) sprintf(assessment, "%s", "info"); 31210 break; 31211 case SD_FM_DRV_FATAL: 31212 default: 31213 (void) sprintf(assessment, "%s", "unknown"); 31214 } 31215 /* 31216 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 31217 * command, we will post ereport.io.scsi.cmd.disk.recovered. 31218 * driver-assessment will always be "recovered" here. 31219 */ 31220 if (drv_assess == SD_FM_DRV_RECOVERY) { 31221 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31222 "cmd.disk.recovered", uscsi_ena, devid, NULL, 31223 DDI_NOSLEEP, NULL, 31224 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31225 DEVID_IF_KNOWN(devid), 31226 "driver-assessment", DATA_TYPE_STRING, assessment, 31227 "op-code", DATA_TYPE_UINT8, op_code, 31228 "cdb", DATA_TYPE_UINT8_ARRAY, 31229 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31230 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31231 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31232 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31233 NULL); 31234 return; 31235 } 31236 31237 /* 31238 * If there is un-expected/un-decodable data, we should post 31239 * ereport.io.scsi.cmd.disk.dev.uderr. 31240 * driver-assessment will be set based on parameter drv_assess. 31241 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 31242 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 31243 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 31244 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 31245 */ 31246 if (ssc->ssc_flags & ssc_invalid_flags) { 31247 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 31248 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31249 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, 31250 NULL, DDI_NOSLEEP, NULL, 31251 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31252 DEVID_IF_KNOWN(devid), 31253 "driver-assessment", DATA_TYPE_STRING, 31254 drv_assess == SD_FM_DRV_FATAL ? 31255 "fail" : assessment, 31256 "op-code", DATA_TYPE_UINT8, op_code, 31257 "cdb", DATA_TYPE_UINT8_ARRAY, 31258 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31259 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31260 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31261 "pkt-stats", DATA_TYPE_UINT32, 31262 uscsi_pkt_statistics, 31263 "stat-code", DATA_TYPE_UINT8, 31264 ssc->ssc_uscsi_cmd->uscsi_status, 31265 "un-decode-info", DATA_TYPE_STRING, 31266 ssc->ssc_info, 31267 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31268 senlen, sensep, 31269 NULL); 31270 } else { 31271 /* 31272 * For other type of invalid data, the 31273 * un-decode-value field would be empty because the 31274 * un-decodable content could be seen from upper 31275 * level payload or inside un-decode-info. 31276 */ 31277 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31278 NULL, 31279 "cmd.disk.dev.uderr", uscsi_ena, devid, 31280 NULL, DDI_NOSLEEP, NULL, 31281 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31282 DEVID_IF_KNOWN(devid), 31283 "driver-assessment", DATA_TYPE_STRING, 31284 drv_assess == SD_FM_DRV_FATAL ? 31285 "fail" : assessment, 31286 "op-code", DATA_TYPE_UINT8, op_code, 31287 "cdb", DATA_TYPE_UINT8_ARRAY, 31288 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31289 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31290 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31291 "pkt-stats", DATA_TYPE_UINT32, 31292 uscsi_pkt_statistics, 31293 "stat-code", DATA_TYPE_UINT8, 31294 ssc->ssc_uscsi_cmd->uscsi_status, 31295 "un-decode-info", DATA_TYPE_STRING, 31296 ssc->ssc_info, 31297 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31298 0, NULL, 31299 NULL); 31300 } 31301 ssc->ssc_flags &= ~ssc_invalid_flags; 31302 return; 31303 } 31304 31305 if (uscsi_pkt_reason != CMD_CMPLT || 31306 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 31307 /* 31308 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 31309 * set inside sd_start_cmds due to errors(bad packet or 31310 * fatal transport error), we should take it as a 31311 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 31312 * driver-assessment will be set based on drv_assess. 31313 * We will set devid to NULL because it is a transport 31314 * error. 31315 */ 31316 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 31317 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 31318 31319 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31320 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL, 31321 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31322 DEVID_IF_KNOWN(devid), 31323 "driver-assessment", DATA_TYPE_STRING, 31324 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31325 "op-code", DATA_TYPE_UINT8, op_code, 31326 "cdb", DATA_TYPE_UINT8_ARRAY, 31327 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31328 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31329 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 31330 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31331 NULL); 31332 } else { 31333 /* 31334 * If we got here, we have a completed command, and we need 31335 * to further investigate the sense data to see what kind 31336 * of ereport we should post. 31337 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 31338 * if sense-key == 0x3. 31339 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 31340 * driver-assessment will be set based on the parameter 31341 * drv_assess. 31342 */ 31343 if (senlen > 0) { 31344 /* 31345 * Here we have sense data available. 31346 */ 31347 uint8_t sense_key; 31348 sense_key = scsi_sense_key(sensep); 31349 if (sense_key == 0x3) { 31350 /* 31351 * sense-key == 0x3(medium error), 31352 * driver-assessment should be "fatal" if 31353 * drv_assess is SD_FM_DRV_FATAL. 31354 */ 31355 scsi_fm_ereport_post(un->un_sd, 31356 uscsi_path_instance, NULL, 31357 "cmd.disk.dev.rqs.merr", 31358 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, 31359 FM_VERSION, DATA_TYPE_UINT8, 31360 FM_EREPORT_VERS0, 31361 DEVID_IF_KNOWN(devid), 31362 "driver-assessment", 31363 DATA_TYPE_STRING, 31364 drv_assess == SD_FM_DRV_FATAL ? 31365 "fatal" : assessment, 31366 "op-code", 31367 DATA_TYPE_UINT8, op_code, 31368 "cdb", 31369 DATA_TYPE_UINT8_ARRAY, cdblen, 31370 ssc->ssc_uscsi_cmd->uscsi_cdb, 31371 "pkt-reason", 31372 DATA_TYPE_UINT8, uscsi_pkt_reason, 31373 "pkt-state", 31374 DATA_TYPE_UINT8, uscsi_pkt_state, 31375 "pkt-stats", 31376 DATA_TYPE_UINT32, 31377 uscsi_pkt_statistics, 31378 "stat-code", 31379 DATA_TYPE_UINT8, 31380 ssc->ssc_uscsi_cmd->uscsi_status, 31381 "key", 31382 DATA_TYPE_UINT8, 31383 scsi_sense_key(sensep), 31384 "asc", 31385 DATA_TYPE_UINT8, 31386 scsi_sense_asc(sensep), 31387 "ascq", 31388 DATA_TYPE_UINT8, 31389 scsi_sense_ascq(sensep), 31390 "sense-data", 31391 DATA_TYPE_UINT8_ARRAY, 31392 senlen, sensep, 31393 "lba", 31394 DATA_TYPE_UINT64, 31395 ssc->ssc_uscsi_info->ui_lba, 31396 NULL); 31397 } else { 31398 /* 31399 * if sense-key == 0x4(hardware 31400 * error), driver-assessment should 31401 * be "fatal" if drv_assess is 31402 * SD_FM_DRV_FATAL. 31403 */ 31404 scsi_fm_ereport_post(un->un_sd, 31405 uscsi_path_instance, NULL, 31406 "cmd.disk.dev.rqs.derr", 31407 uscsi_ena, devid, 31408 NULL, DDI_NOSLEEP, NULL, 31409 FM_VERSION, 31410 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31411 DEVID_IF_KNOWN(devid), 31412 "driver-assessment", 31413 DATA_TYPE_STRING, 31414 drv_assess == SD_FM_DRV_FATAL ? 31415 (sense_key == 0x4 ? 31416 "fatal" : "fail") : assessment, 31417 "op-code", 31418 DATA_TYPE_UINT8, op_code, 31419 "cdb", 31420 DATA_TYPE_UINT8_ARRAY, cdblen, 31421 ssc->ssc_uscsi_cmd->uscsi_cdb, 31422 "pkt-reason", 31423 DATA_TYPE_UINT8, uscsi_pkt_reason, 31424 "pkt-state", 31425 DATA_TYPE_UINT8, uscsi_pkt_state, 31426 "pkt-stats", 31427 DATA_TYPE_UINT32, 31428 uscsi_pkt_statistics, 31429 "stat-code", 31430 DATA_TYPE_UINT8, 31431 ssc->ssc_uscsi_cmd->uscsi_status, 31432 "key", 31433 DATA_TYPE_UINT8, 31434 scsi_sense_key(sensep), 31435 "asc", 31436 DATA_TYPE_UINT8, 31437 scsi_sense_asc(sensep), 31438 "ascq", 31439 DATA_TYPE_UINT8, 31440 scsi_sense_ascq(sensep), 31441 "sense-data", 31442 DATA_TYPE_UINT8_ARRAY, 31443 senlen, sensep, 31444 NULL); 31445 } 31446 } else { 31447 /* 31448 * For stat_code == STATUS_GOOD, this is not a 31449 * hardware error. 31450 */ 31451 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31452 return; 31453 31454 /* 31455 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31456 * stat-code but with sense data unavailable. 31457 * driver-assessment will be set based on parameter 31458 * drv_assess. 31459 */ 31460 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31461 NULL, 31462 "cmd.disk.dev.serr", uscsi_ena, 31463 devid, NULL, DDI_NOSLEEP, NULL, 31464 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31465 DEVID_IF_KNOWN(devid), 31466 "driver-assessment", DATA_TYPE_STRING, 31467 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31468 "op-code", DATA_TYPE_UINT8, op_code, 31469 "cdb", 31470 DATA_TYPE_UINT8_ARRAY, 31471 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31472 "pkt-reason", 31473 DATA_TYPE_UINT8, uscsi_pkt_reason, 31474 "pkt-state", 31475 DATA_TYPE_UINT8, uscsi_pkt_state, 31476 "pkt-stats", 31477 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31478 "stat-code", 31479 DATA_TYPE_UINT8, 31480 ssc->ssc_uscsi_cmd->uscsi_status, 31481 NULL); 31482 } 31483 } 31484 } 31485 31486 /* 31487 * Function: sd_ssc_extract_info 31488 * 31489 * Description: Extract information available to help generate ereport. 31490 * 31491 * Context: Kernel thread or interrupt context. 31492 */ 31493 static void 31494 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31495 struct buf *bp, struct sd_xbuf *xp) 31496 { 31497 size_t senlen = 0; 31498 union scsi_cdb *cdbp; 31499 int path_instance; 31500 /* 31501 * Need scsi_cdb_size array to determine the cdb length. 31502 */ 31503 extern uchar_t scsi_cdb_size[]; 31504 31505 ASSERT(un != NULL); 31506 ASSERT(pktp != NULL); 31507 ASSERT(bp != NULL); 31508 ASSERT(xp != NULL); 31509 ASSERT(ssc != NULL); 31510 ASSERT(mutex_owned(SD_MUTEX(un))); 31511 31512 /* 31513 * Transfer the cdb buffer pointer here. 31514 */ 31515 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31516 31517 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31518 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31519 31520 /* 31521 * Transfer the sense data buffer pointer if sense data is available, 31522 * calculate the sense data length first. 31523 */ 31524 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31525 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31526 /* 31527 * For arq case, we will enter here. 31528 */ 31529 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31530 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31531 } else { 31532 senlen = SENSE_LENGTH; 31533 } 31534 } else { 31535 /* 31536 * For non-arq case, we will enter this branch. 31537 */ 31538 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31539 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31540 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31541 } 31542 31543 } 31544 31545 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31546 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31547 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31548 31549 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31550 31551 /* 31552 * Only transfer path_instance when scsi_pkt was properly allocated. 31553 */ 31554 path_instance = pktp->pkt_path_instance; 31555 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31556 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31557 else 31558 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31559 31560 /* 31561 * Copy in the other fields we may need when posting ereport. 31562 */ 31563 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31564 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31565 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31566 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31567 31568 /* 31569 * For partially read/write command, we will not create ena 31570 * in case of a successful command be reconized as recovered. 31571 */ 31572 if ((pktp->pkt_reason == CMD_CMPLT) && 31573 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31574 (senlen == 0)) { 31575 return; 31576 } 31577 31578 /* 31579 * To associate ereports of a single command execution flow, we 31580 * need a shared ena for a specific command. 31581 */ 31582 if (xp->xb_ena == 0) 31583 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31584 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31585 } 31586 31587 31588 /* 31589 * Function: sd_check_solid_state 31590 * 31591 * Description: Query the optional INQUIRY VPD page 0xb1. If the device 31592 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION 31593 * RATE. If the MEDIUM ROTATION RATE is 1, sd assumes the 31594 * device is a solid state drive. 31595 * 31596 * Context: Kernel thread or interrupt context. 31597 */ 31598 31599 static void 31600 sd_check_solid_state(sd_ssc_t *ssc) 31601 { 31602 int rval = 0; 31603 uchar_t *inqb1 = NULL; 31604 size_t inqb1_len = MAX_INQUIRY_SIZE; 31605 size_t inqb1_resid = 0; 31606 struct sd_lun *un; 31607 31608 ASSERT(ssc != NULL); 31609 un = ssc->ssc_un; 31610 ASSERT(un != NULL); 31611 ASSERT(!mutex_owned(SD_MUTEX(un))); 31612 31613 mutex_enter(SD_MUTEX(un)); 31614 un->un_f_is_solid_state = FALSE; 31615 31616 if (ISCD(un)) { 31617 mutex_exit(SD_MUTEX(un)); 31618 return; 31619 } 31620 31621 if (sd_check_vpd_page_support(ssc) == 0 && 31622 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) { 31623 mutex_exit(SD_MUTEX(un)); 31624 /* collect page b1 data */ 31625 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP); 31626 31627 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len, 31628 0x01, 0xB1, &inqb1_resid); 31629 31630 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) { 31631 SD_TRACE(SD_LOG_COMMON, un, 31632 "sd_check_solid_state: \ 31633 successfully get VPD page: %x \ 31634 PAGE LENGTH: %x BYTE 4: %x \ 31635 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4], 31636 inqb1[5]); 31637 31638 mutex_enter(SD_MUTEX(un)); 31639 /* 31640 * Check the MEDIUM ROTATION RATE. If it is set 31641 * to 1, the device is a solid state drive. 31642 */ 31643 if (inqb1[4] == 0 && inqb1[5] == 1) { 31644 un->un_f_is_solid_state = TRUE; 31645 } 31646 mutex_exit(SD_MUTEX(un)); 31647 } else if (rval != 0) { 31648 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31649 } 31650 31651 kmem_free(inqb1, inqb1_len); 31652 } else { 31653 mutex_exit(SD_MUTEX(un)); 31654 } 31655 } 31656 31657 /* 31658 * Function: sd_check_emulation_mode 31659 * 31660 * Description: Check whether the SSD is at emulation mode 31661 * by issuing READ_CAPACITY_16 to see whether 31662 * we can get physical block size of the drive. 31663 * 31664 * Context: Kernel thread or interrupt context. 31665 */ 31666 31667 static void 31668 sd_check_emulation_mode(sd_ssc_t *ssc) 31669 { 31670 int rval = 0; 31671 uint64_t capacity; 31672 uint_t lbasize; 31673 uint_t pbsize; 31674 int i; 31675 int devid_len; 31676 struct sd_lun *un; 31677 31678 ASSERT(ssc != NULL); 31679 un = ssc->ssc_un; 31680 ASSERT(un != NULL); 31681 ASSERT(!mutex_owned(SD_MUTEX(un))); 31682 31683 mutex_enter(SD_MUTEX(un)); 31684 if (ISCD(un)) { 31685 mutex_exit(SD_MUTEX(un)); 31686 return; 31687 } 31688 31689 if (un->un_f_descr_format_supported) { 31690 mutex_exit(SD_MUTEX(un)); 31691 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 31692 &pbsize, SD_PATH_DIRECT); 31693 mutex_enter(SD_MUTEX(un)); 31694 31695 if (rval != 0) { 31696 un->un_phy_blocksize = DEV_BSIZE; 31697 } else { 31698 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) { 31699 un->un_phy_blocksize = DEV_BSIZE; 31700 } else { 31701 un->un_phy_blocksize = pbsize; 31702 } 31703 } 31704 } 31705 31706 for (i = 0; i < sd_flash_dev_table_size; i++) { 31707 devid_len = (int)strlen(sd_flash_dev_table[i]); 31708 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len) 31709 == SD_SUCCESS) { 31710 un->un_phy_blocksize = SSD_SECSIZE; 31711 if (un->un_f_is_solid_state && 31712 un->un_phy_blocksize != un->un_tgt_blocksize) 31713 un->un_f_enable_rmw = TRUE; 31714 } 31715 } 31716 31717 mutex_exit(SD_MUTEX(un)); 31718 } 31719