1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 27 * Copyright (c) 2012 by Delphix. All rights reserved. 28 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 29 * Copyright 2016 Joyent, Inc. 30 * Copyright 2017 Nexenta Systems, Inc. 31 */ 32 /* 33 * Copyright 2011 cyril.galibern@opensvc.com 34 */ 35 36 /* 37 * SCSI disk target driver. 38 */ 39 #include <sys/scsi/scsi.h> 40 #include <sys/dkbad.h> 41 #include <sys/dklabel.h> 42 #include <sys/dkio.h> 43 #include <sys/fdio.h> 44 #include <sys/cdio.h> 45 #include <sys/mhd.h> 46 #include <sys/vtoc.h> 47 #include <sys/dktp/fdisk.h> 48 #include <sys/kstat.h> 49 #include <sys/vtrace.h> 50 #include <sys/note.h> 51 #include <sys/thread.h> 52 #include <sys/proc.h> 53 #include <sys/efi_partition.h> 54 #include <sys/var.h> 55 #include <sys/aio_req.h> 56 57 #ifdef __lock_lint 58 #define _LP64 59 #define __amd64 60 #endif 61 62 #if (defined(__fibre)) 63 /* Note: is there a leadville version of the following? */ 64 #include <sys/fc4/fcal_linkapp.h> 65 #endif 66 #include <sys/taskq.h> 67 #include <sys/uuid.h> 68 #include <sys/byteorder.h> 69 #include <sys/sdt.h> 70 71 #include "sd_xbuf.h" 72 73 #include <sys/scsi/targets/sddef.h> 74 #include <sys/cmlb.h> 75 #include <sys/sysevent/eventdefs.h> 76 #include <sys/sysevent/dev.h> 77 78 #include <sys/fm/protocol.h> 79 80 /* 81 * Loadable module info. 82 */ 83 #if (defined(__fibre)) 84 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 85 #else /* !__fibre */ 86 #define SD_MODULE_NAME "SCSI Disk Driver" 87 #endif /* !__fibre */ 88 89 /* 90 * Define the interconnect type, to allow the driver to distinguish 91 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 92 * 93 * This is really for backward compatibility. In the future, the driver 94 * should actually check the "interconnect-type" property as reported by 95 * the HBA; however at present this property is not defined by all HBAs, 96 * so we will use this #define (1) to permit the driver to run in 97 * backward-compatibility mode; and (2) to print a notification message 98 * if an FC HBA does not support the "interconnect-type" property. The 99 * behavior of the driver will be to assume parallel SCSI behaviors unless 100 * the "interconnect-type" property is defined by the HBA **AND** has a 101 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 102 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 103 * Channel behaviors (as per the old ssd). (Note that the 104 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 105 * will result in the driver assuming parallel SCSI behaviors.) 106 * 107 * (see common/sys/scsi/impl/services.h) 108 * 109 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 110 * since some FC HBAs may already support that, and there is some code in 111 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 112 * default would confuse that code, and besides things should work fine 113 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 114 * "interconnect_type" property. 115 * 116 */ 117 #if (defined(__fibre)) 118 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 119 #else 120 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 121 #endif 122 123 /* 124 * The name of the driver, established from the module name in _init. 125 */ 126 static char *sd_label = NULL; 127 128 /* 129 * Driver name is unfortunately prefixed on some driver.conf properties. 130 */ 131 #if (defined(__fibre)) 132 #define sd_max_xfer_size ssd_max_xfer_size 133 #define sd_config_list ssd_config_list 134 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 135 static char *sd_config_list = "ssd-config-list"; 136 #else 137 static char *sd_max_xfer_size = "sd_max_xfer_size"; 138 static char *sd_config_list = "sd-config-list"; 139 #endif 140 141 /* 142 * Driver global variables 143 */ 144 145 #if (defined(__fibre)) 146 /* 147 * These #defines are to avoid namespace collisions that occur because this 148 * code is currently used to compile two separate driver modules: sd and ssd. 149 * All global variables need to be treated this way (even if declared static) 150 * in order to allow the debugger to resolve the names properly. 151 * It is anticipated that in the near future the ssd module will be obsoleted, 152 * at which time this namespace issue should go away. 153 */ 154 #define sd_state ssd_state 155 #define sd_io_time ssd_io_time 156 #define sd_failfast_enable ssd_failfast_enable 157 #define sd_ua_retry_count ssd_ua_retry_count 158 #define sd_report_pfa ssd_report_pfa 159 #define sd_max_throttle ssd_max_throttle 160 #define sd_min_throttle ssd_min_throttle 161 #define sd_rot_delay ssd_rot_delay 162 163 #define sd_retry_on_reservation_conflict \ 164 ssd_retry_on_reservation_conflict 165 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 166 #define sd_resv_conflict_name ssd_resv_conflict_name 167 168 #define sd_component_mask ssd_component_mask 169 #define sd_level_mask ssd_level_mask 170 #define sd_debug_un ssd_debug_un 171 #define sd_error_level ssd_error_level 172 173 #define sd_xbuf_active_limit ssd_xbuf_active_limit 174 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 175 176 #define sd_tr ssd_tr 177 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 178 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 179 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 180 #define sd_check_media_time ssd_check_media_time 181 #define sd_wait_cmds_complete ssd_wait_cmds_complete 182 #define sd_label_mutex ssd_label_mutex 183 #define sd_detach_mutex ssd_detach_mutex 184 #define sd_log_buf ssd_log_buf 185 #define sd_log_mutex ssd_log_mutex 186 187 #define sd_disk_table ssd_disk_table 188 #define sd_disk_table_size ssd_disk_table_size 189 #define sd_sense_mutex ssd_sense_mutex 190 #define sd_cdbtab ssd_cdbtab 191 192 #define sd_cb_ops ssd_cb_ops 193 #define sd_ops ssd_ops 194 #define sd_additional_codes ssd_additional_codes 195 #define sd_tgops ssd_tgops 196 197 #define sd_minor_data ssd_minor_data 198 #define sd_minor_data_efi ssd_minor_data_efi 199 200 #define sd_tq ssd_tq 201 #define sd_wmr_tq ssd_wmr_tq 202 #define sd_taskq_name ssd_taskq_name 203 #define sd_wmr_taskq_name ssd_wmr_taskq_name 204 #define sd_taskq_minalloc ssd_taskq_minalloc 205 #define sd_taskq_maxalloc ssd_taskq_maxalloc 206 207 #define sd_dump_format_string ssd_dump_format_string 208 209 #define sd_iostart_chain ssd_iostart_chain 210 #define sd_iodone_chain ssd_iodone_chain 211 212 #define sd_pm_idletime ssd_pm_idletime 213 214 #define sd_force_pm_supported ssd_force_pm_supported 215 216 #define sd_dtype_optical_bind ssd_dtype_optical_bind 217 218 #define sd_ssc_init ssd_ssc_init 219 #define sd_ssc_send ssd_ssc_send 220 #define sd_ssc_fini ssd_ssc_fini 221 #define sd_ssc_assessment ssd_ssc_assessment 222 #define sd_ssc_post ssd_ssc_post 223 #define sd_ssc_print ssd_ssc_print 224 #define sd_ssc_ereport_post ssd_ssc_ereport_post 225 #define sd_ssc_set_info ssd_ssc_set_info 226 #define sd_ssc_extract_info ssd_ssc_extract_info 227 228 #endif 229 230 #ifdef SDDEBUG 231 int sd_force_pm_supported = 0; 232 #endif /* SDDEBUG */ 233 234 void *sd_state = NULL; 235 int sd_io_time = SD_IO_TIME; 236 int sd_failfast_enable = 1; 237 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 238 int sd_report_pfa = 1; 239 int sd_max_throttle = SD_MAX_THROTTLE; 240 int sd_min_throttle = SD_MIN_THROTTLE; 241 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 242 int sd_qfull_throttle_enable = TRUE; 243 244 int sd_retry_on_reservation_conflict = 1; 245 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 246 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 247 248 static int sd_dtype_optical_bind = -1; 249 250 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 251 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 252 253 /* 254 * Global data for debug logging. To enable debug printing, sd_component_mask 255 * and sd_level_mask should be set to the desired bit patterns as outlined in 256 * sddef.h. 257 */ 258 uint_t sd_component_mask = 0x0; 259 uint_t sd_level_mask = 0x0; 260 struct sd_lun *sd_debug_un = NULL; 261 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 262 263 /* Note: these may go away in the future... */ 264 static uint32_t sd_xbuf_active_limit = 512; 265 static uint32_t sd_xbuf_reserve_limit = 16; 266 267 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 268 269 /* 270 * Timer value used to reset the throttle after it has been reduced 271 * (typically in response to TRAN_BUSY or STATUS_QFULL) 272 */ 273 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 274 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 275 276 /* 277 * Interval value associated with the media change scsi watch. 278 */ 279 static int sd_check_media_time = 3000000; 280 281 /* 282 * Wait value used for in progress operations during a DDI_SUSPEND 283 */ 284 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 285 286 /* 287 * sd_label_mutex protects a static buffer used in the disk label 288 * component of the driver 289 */ 290 static kmutex_t sd_label_mutex; 291 292 /* 293 * sd_detach_mutex protects un_layer_count, un_detach_count, and 294 * un_opens_in_progress in the sd_lun structure. 295 */ 296 static kmutex_t sd_detach_mutex; 297 298 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 299 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 300 301 /* 302 * Global buffer and mutex for debug logging 303 */ 304 static char sd_log_buf[1024]; 305 static kmutex_t sd_log_mutex; 306 307 /* 308 * Structs and globals for recording attached lun information. 309 * This maintains a chain. Each node in the chain represents a SCSI controller. 310 * The structure records the number of luns attached to each target connected 311 * with the controller. 312 * For parallel scsi device only. 313 */ 314 struct sd_scsi_hba_tgt_lun { 315 struct sd_scsi_hba_tgt_lun *next; 316 dev_info_t *pdip; 317 int nlun[NTARGETS_WIDE]; 318 }; 319 320 /* 321 * Flag to indicate the lun is attached or detached 322 */ 323 #define SD_SCSI_LUN_ATTACH 0 324 #define SD_SCSI_LUN_DETACH 1 325 326 static kmutex_t sd_scsi_target_lun_mutex; 327 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 328 329 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 330 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 331 332 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 333 sd_scsi_target_lun_head)) 334 335 /* 336 * "Smart" Probe Caching structs, globals, #defines, etc. 337 * For parallel scsi and non-self-identify device only. 338 */ 339 340 /* 341 * The following resources and routines are implemented to support 342 * "smart" probing, which caches the scsi_probe() results in an array, 343 * in order to help avoid long probe times. 344 */ 345 struct sd_scsi_probe_cache { 346 struct sd_scsi_probe_cache *next; 347 dev_info_t *pdip; 348 int cache[NTARGETS_WIDE]; 349 }; 350 351 static kmutex_t sd_scsi_probe_cache_mutex; 352 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 353 354 /* 355 * Really we only need protection on the head of the linked list, but 356 * better safe than sorry. 357 */ 358 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 359 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 360 361 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 362 sd_scsi_probe_cache_head)) 363 364 /* 365 * Power attribute table 366 */ 367 static sd_power_attr_ss sd_pwr_ss = { 368 { "NAME=spindle-motor", "0=off", "1=on", NULL }, 369 {0, 100}, 370 {30, 0}, 371 {20000, 0} 372 }; 373 374 static sd_power_attr_pc sd_pwr_pc = { 375 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle", 376 "3=active", NULL }, 377 {0, 0, 0, 100}, 378 {90, 90, 20, 0}, 379 {15000, 15000, 1000, 0} 380 }; 381 382 /* 383 * Power level to power condition 384 */ 385 static int sd_pl2pc[] = { 386 SD_TARGET_START_VALID, 387 SD_TARGET_STANDBY, 388 SD_TARGET_IDLE, 389 SD_TARGET_ACTIVE 390 }; 391 392 /* 393 * Vendor specific data name property declarations 394 */ 395 396 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 397 398 static sd_tunables seagate_properties = { 399 SEAGATE_THROTTLE_VALUE, 400 0, 401 0, 402 0, 403 0, 404 0, 405 0, 406 0, 407 0 408 }; 409 410 411 static sd_tunables fujitsu_properties = { 412 FUJITSU_THROTTLE_VALUE, 413 0, 414 0, 415 0, 416 0, 417 0, 418 0, 419 0, 420 0 421 }; 422 423 static sd_tunables ibm_properties = { 424 IBM_THROTTLE_VALUE, 425 0, 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0 433 }; 434 435 static sd_tunables purple_properties = { 436 PURPLE_THROTTLE_VALUE, 437 0, 438 0, 439 PURPLE_BUSY_RETRIES, 440 PURPLE_RESET_RETRY_COUNT, 441 PURPLE_RESERVE_RELEASE_TIME, 442 0, 443 0, 444 0 445 }; 446 447 static sd_tunables sve_properties = { 448 SVE_THROTTLE_VALUE, 449 0, 450 0, 451 SVE_BUSY_RETRIES, 452 SVE_RESET_RETRY_COUNT, 453 SVE_RESERVE_RELEASE_TIME, 454 SVE_MIN_THROTTLE_VALUE, 455 SVE_DISKSORT_DISABLED_FLAG, 456 0 457 }; 458 459 static sd_tunables maserati_properties = { 460 0, 461 0, 462 0, 463 0, 464 0, 465 0, 466 0, 467 MASERATI_DISKSORT_DISABLED_FLAG, 468 MASERATI_LUN_RESET_ENABLED_FLAG 469 }; 470 471 static sd_tunables pirus_properties = { 472 PIRUS_THROTTLE_VALUE, 473 0, 474 PIRUS_NRR_COUNT, 475 PIRUS_BUSY_RETRIES, 476 PIRUS_RESET_RETRY_COUNT, 477 0, 478 PIRUS_MIN_THROTTLE_VALUE, 479 PIRUS_DISKSORT_DISABLED_FLAG, 480 PIRUS_LUN_RESET_ENABLED_FLAG 481 }; 482 483 #endif 484 485 #if (defined(__sparc) && !defined(__fibre)) || \ 486 (defined(__i386) || defined(__amd64)) 487 488 489 static sd_tunables elite_properties = { 490 ELITE_THROTTLE_VALUE, 491 0, 492 0, 493 0, 494 0, 495 0, 496 0, 497 0, 498 0 499 }; 500 501 static sd_tunables st31200n_properties = { 502 ST31200N_THROTTLE_VALUE, 503 0, 504 0, 505 0, 506 0, 507 0, 508 0, 509 0, 510 0 511 }; 512 513 #endif /* Fibre or not */ 514 515 static sd_tunables lsi_properties_scsi = { 516 LSI_THROTTLE_VALUE, 517 0, 518 LSI_NOTREADY_RETRIES, 519 0, 520 0, 521 0, 522 0, 523 0, 524 0 525 }; 526 527 static sd_tunables symbios_properties = { 528 SYMBIOS_THROTTLE_VALUE, 529 0, 530 SYMBIOS_NOTREADY_RETRIES, 531 0, 532 0, 533 0, 534 0, 535 0, 536 0 537 }; 538 539 static sd_tunables lsi_properties = { 540 0, 541 0, 542 LSI_NOTREADY_RETRIES, 543 0, 544 0, 545 0, 546 0, 547 0, 548 0 549 }; 550 551 static sd_tunables lsi_oem_properties = { 552 0, 553 0, 554 LSI_OEM_NOTREADY_RETRIES, 555 0, 556 0, 557 0, 558 0, 559 0, 560 0, 561 1 562 }; 563 564 565 566 #if (defined(SD_PROP_TST)) 567 568 #define SD_TST_CTYPE_VAL CTYPE_CDROM 569 #define SD_TST_THROTTLE_VAL 16 570 #define SD_TST_NOTREADY_VAL 12 571 #define SD_TST_BUSY_VAL 60 572 #define SD_TST_RST_RETRY_VAL 36 573 #define SD_TST_RSV_REL_TIME 60 574 575 static sd_tunables tst_properties = { 576 SD_TST_THROTTLE_VAL, 577 SD_TST_CTYPE_VAL, 578 SD_TST_NOTREADY_VAL, 579 SD_TST_BUSY_VAL, 580 SD_TST_RST_RETRY_VAL, 581 SD_TST_RSV_REL_TIME, 582 0, 583 0, 584 0 585 }; 586 #endif 587 588 /* This is similar to the ANSI toupper implementation */ 589 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 590 591 /* 592 * Static Driver Configuration Table 593 * 594 * This is the table of disks which need throttle adjustment (or, perhaps 595 * something else as defined by the flags at a future time.) device_id 596 * is a string consisting of concatenated vid (vendor), pid (product/model) 597 * and revision strings as defined in the scsi_inquiry structure. Offsets of 598 * the parts of the string are as defined by the sizes in the scsi_inquiry 599 * structure. Device type is searched as far as the device_id string is 600 * defined. Flags defines which values are to be set in the driver from the 601 * properties list. 602 * 603 * Entries below which begin and end with a "*" are a special case. 604 * These do not have a specific vendor, and the string which follows 605 * can appear anywhere in the 16 byte PID portion of the inquiry data. 606 * 607 * Entries below which begin and end with a " " (blank) are a special 608 * case. The comparison function will treat multiple consecutive blanks 609 * as equivalent to a single blank. For example, this causes a 610 * sd_disk_table entry of " NEC CDROM " to match a device's id string 611 * of "NEC CDROM". 612 * 613 * Note: The MD21 controller type has been obsoleted. 614 * ST318202F is a Legacy device 615 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 616 * made with an FC connection. The entries here are a legacy. 617 */ 618 static sd_disk_config_t sd_disk_table[] = { 619 #if defined(__fibre) || defined(__i386) || defined(__amd64) 620 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 621 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 622 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 623 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 624 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 625 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 626 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 627 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 628 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 629 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 630 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 631 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 632 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 633 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 634 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 635 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 636 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 637 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 638 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 639 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 640 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 641 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 642 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 643 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 644 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 645 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 646 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 647 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 648 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 649 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 650 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 651 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 652 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 653 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 654 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 655 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 656 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 657 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 658 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 659 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 660 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 661 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 662 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 663 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 664 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 665 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 666 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 667 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 668 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 669 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 670 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 671 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 672 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 673 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 674 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 675 { "SUN T3", SD_CONF_BSET_THROTTLE | 676 SD_CONF_BSET_BSY_RETRY_COUNT| 677 SD_CONF_BSET_RST_RETRIES| 678 SD_CONF_BSET_RSV_REL_TIME, 679 &purple_properties }, 680 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 681 SD_CONF_BSET_BSY_RETRY_COUNT| 682 SD_CONF_BSET_RST_RETRIES| 683 SD_CONF_BSET_RSV_REL_TIME| 684 SD_CONF_BSET_MIN_THROTTLE| 685 SD_CONF_BSET_DISKSORT_DISABLED, 686 &sve_properties }, 687 { "SUN T4", SD_CONF_BSET_THROTTLE | 688 SD_CONF_BSET_BSY_RETRY_COUNT| 689 SD_CONF_BSET_RST_RETRIES| 690 SD_CONF_BSET_RSV_REL_TIME, 691 &purple_properties }, 692 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 693 SD_CONF_BSET_LUN_RESET_ENABLED, 694 &maserati_properties }, 695 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 696 SD_CONF_BSET_NRR_COUNT| 697 SD_CONF_BSET_BSY_RETRY_COUNT| 698 SD_CONF_BSET_RST_RETRIES| 699 SD_CONF_BSET_MIN_THROTTLE| 700 SD_CONF_BSET_DISKSORT_DISABLED| 701 SD_CONF_BSET_LUN_RESET_ENABLED, 702 &pirus_properties }, 703 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 704 SD_CONF_BSET_NRR_COUNT| 705 SD_CONF_BSET_BSY_RETRY_COUNT| 706 SD_CONF_BSET_RST_RETRIES| 707 SD_CONF_BSET_MIN_THROTTLE| 708 SD_CONF_BSET_DISKSORT_DISABLED| 709 SD_CONF_BSET_LUN_RESET_ENABLED, 710 &pirus_properties }, 711 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 712 SD_CONF_BSET_NRR_COUNT| 713 SD_CONF_BSET_BSY_RETRY_COUNT| 714 SD_CONF_BSET_RST_RETRIES| 715 SD_CONF_BSET_MIN_THROTTLE| 716 SD_CONF_BSET_DISKSORT_DISABLED| 717 SD_CONF_BSET_LUN_RESET_ENABLED, 718 &pirus_properties }, 719 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 720 SD_CONF_BSET_NRR_COUNT| 721 SD_CONF_BSET_BSY_RETRY_COUNT| 722 SD_CONF_BSET_RST_RETRIES| 723 SD_CONF_BSET_MIN_THROTTLE| 724 SD_CONF_BSET_DISKSORT_DISABLED| 725 SD_CONF_BSET_LUN_RESET_ENABLED, 726 &pirus_properties }, 727 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 728 SD_CONF_BSET_NRR_COUNT| 729 SD_CONF_BSET_BSY_RETRY_COUNT| 730 SD_CONF_BSET_RST_RETRIES| 731 SD_CONF_BSET_MIN_THROTTLE| 732 SD_CONF_BSET_DISKSORT_DISABLED| 733 SD_CONF_BSET_LUN_RESET_ENABLED, 734 &pirus_properties }, 735 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 736 SD_CONF_BSET_NRR_COUNT| 737 SD_CONF_BSET_BSY_RETRY_COUNT| 738 SD_CONF_BSET_RST_RETRIES| 739 SD_CONF_BSET_MIN_THROTTLE| 740 SD_CONF_BSET_DISKSORT_DISABLED| 741 SD_CONF_BSET_LUN_RESET_ENABLED, 742 &pirus_properties }, 743 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 744 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 745 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 746 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 747 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 748 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 749 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 750 #endif /* fibre or NON-sparc platforms */ 751 #if ((defined(__sparc) && !defined(__fibre)) ||\ 752 (defined(__i386) || defined(__amd64))) 753 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 754 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 755 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 756 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 757 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 758 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 759 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 760 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 761 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 762 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 763 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 764 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 765 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 766 &symbios_properties }, 767 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 768 &lsi_properties_scsi }, 769 #if defined(__i386) || defined(__amd64) 770 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 771 | SD_CONF_BSET_READSUB_BCD 772 | SD_CONF_BSET_READ_TOC_ADDR_BCD 773 | SD_CONF_BSET_NO_READ_HEADER 774 | SD_CONF_BSET_READ_CD_XD4), NULL }, 775 776 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 777 | SD_CONF_BSET_READSUB_BCD 778 | SD_CONF_BSET_READ_TOC_ADDR_BCD 779 | SD_CONF_BSET_NO_READ_HEADER 780 | SD_CONF_BSET_READ_CD_XD4), NULL }, 781 #endif /* __i386 || __amd64 */ 782 #endif /* sparc NON-fibre or NON-sparc platforms */ 783 784 #if (defined(SD_PROP_TST)) 785 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 786 | SD_CONF_BSET_CTYPE 787 | SD_CONF_BSET_NRR_COUNT 788 | SD_CONF_BSET_FAB_DEVID 789 | SD_CONF_BSET_NOCACHE 790 | SD_CONF_BSET_BSY_RETRY_COUNT 791 | SD_CONF_BSET_PLAYMSF_BCD 792 | SD_CONF_BSET_READSUB_BCD 793 | SD_CONF_BSET_READ_TOC_TRK_BCD 794 | SD_CONF_BSET_READ_TOC_ADDR_BCD 795 | SD_CONF_BSET_NO_READ_HEADER 796 | SD_CONF_BSET_READ_CD_XD4 797 | SD_CONF_BSET_RST_RETRIES 798 | SD_CONF_BSET_RSV_REL_TIME 799 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 800 #endif 801 }; 802 803 static const int sd_disk_table_size = 804 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 805 806 /* 807 * Emulation mode disk drive VID/PID table 808 */ 809 static char sd_flash_dev_table[][25] = { 810 "ATA MARVELL SD88SA02", 811 "MARVELL SD88SA02", 812 "TOSHIBA THNSNV05", 813 }; 814 815 static const int sd_flash_dev_table_size = 816 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]); 817 818 #define SD_INTERCONNECT_PARALLEL 0 819 #define SD_INTERCONNECT_FABRIC 1 820 #define SD_INTERCONNECT_FIBRE 2 821 #define SD_INTERCONNECT_SSA 3 822 #define SD_INTERCONNECT_SATA 4 823 #define SD_INTERCONNECT_SAS 5 824 825 #define SD_IS_PARALLEL_SCSI(un) \ 826 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 827 #define SD_IS_SERIAL(un) \ 828 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 829 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 830 831 /* 832 * Definitions used by device id registration routines 833 */ 834 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 835 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 836 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 837 838 static kmutex_t sd_sense_mutex = {0}; 839 840 /* 841 * Macros for updates of the driver state 842 */ 843 #define New_state(un, s) \ 844 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 845 #define Restore_state(un) \ 846 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 847 848 static struct sd_cdbinfo sd_cdbtab[] = { 849 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 850 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 851 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 852 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 853 }; 854 855 /* 856 * Specifies the number of seconds that must have elapsed since the last 857 * cmd. has completed for a device to be declared idle to the PM framework. 858 */ 859 static int sd_pm_idletime = 1; 860 861 /* 862 * Internal function prototypes 863 */ 864 865 #if (defined(__fibre)) 866 /* 867 * These #defines are to avoid namespace collisions that occur because this 868 * code is currently used to compile two separate driver modules: sd and ssd. 869 * All function names need to be treated this way (even if declared static) 870 * in order to allow the debugger to resolve the names properly. 871 * It is anticipated that in the near future the ssd module will be obsoleted, 872 * at which time this ugliness should go away. 873 */ 874 #define sd_log_trace ssd_log_trace 875 #define sd_log_info ssd_log_info 876 #define sd_log_err ssd_log_err 877 #define sdprobe ssdprobe 878 #define sdinfo ssdinfo 879 #define sd_prop_op ssd_prop_op 880 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 881 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 882 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 883 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 884 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 885 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 886 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 887 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 888 #define sd_spin_up_unit ssd_spin_up_unit 889 #define sd_enable_descr_sense ssd_enable_descr_sense 890 #define sd_reenable_dsense_task ssd_reenable_dsense_task 891 #define sd_set_mmc_caps ssd_set_mmc_caps 892 #define sd_read_unit_properties ssd_read_unit_properties 893 #define sd_process_sdconf_file ssd_process_sdconf_file 894 #define sd_process_sdconf_table ssd_process_sdconf_table 895 #define sd_sdconf_id_match ssd_sdconf_id_match 896 #define sd_blank_cmp ssd_blank_cmp 897 #define sd_chk_vers1_data ssd_chk_vers1_data 898 #define sd_set_vers1_properties ssd_set_vers1_properties 899 #define sd_check_bdc_vpd ssd_check_bdc_vpd 900 #define sd_check_emulation_mode ssd_check_emulation_mode 901 902 #define sd_get_physical_geometry ssd_get_physical_geometry 903 #define sd_get_virtual_geometry ssd_get_virtual_geometry 904 #define sd_update_block_info ssd_update_block_info 905 #define sd_register_devid ssd_register_devid 906 #define sd_get_devid ssd_get_devid 907 #define sd_create_devid ssd_create_devid 908 #define sd_write_deviceid ssd_write_deviceid 909 #define sd_check_vpd_page_support ssd_check_vpd_page_support 910 #define sd_setup_pm ssd_setup_pm 911 #define sd_create_pm_components ssd_create_pm_components 912 #define sd_ddi_suspend ssd_ddi_suspend 913 #define sd_ddi_resume ssd_ddi_resume 914 #define sd_pm_state_change ssd_pm_state_change 915 #define sdpower ssdpower 916 #define sdattach ssdattach 917 #define sddetach ssddetach 918 #define sd_unit_attach ssd_unit_attach 919 #define sd_unit_detach ssd_unit_detach 920 #define sd_set_unit_attributes ssd_set_unit_attributes 921 #define sd_create_errstats ssd_create_errstats 922 #define sd_set_errstats ssd_set_errstats 923 #define sd_set_pstats ssd_set_pstats 924 #define sddump ssddump 925 #define sd_scsi_poll ssd_scsi_poll 926 #define sd_send_polled_RQS ssd_send_polled_RQS 927 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 928 #define sd_init_event_callbacks ssd_init_event_callbacks 929 #define sd_event_callback ssd_event_callback 930 #define sd_cache_control ssd_cache_control 931 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 932 #define sd_get_write_cache_changeable ssd_get_write_cache_changeable 933 #define sd_get_nv_sup ssd_get_nv_sup 934 #define sd_make_device ssd_make_device 935 #define sdopen ssdopen 936 #define sdclose ssdclose 937 #define sd_ready_and_valid ssd_ready_and_valid 938 #define sdmin ssdmin 939 #define sdread ssdread 940 #define sdwrite ssdwrite 941 #define sdaread ssdaread 942 #define sdawrite ssdawrite 943 #define sdstrategy ssdstrategy 944 #define sdioctl ssdioctl 945 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 946 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 947 #define sd_checksum_iostart ssd_checksum_iostart 948 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 949 #define sd_pm_iostart ssd_pm_iostart 950 #define sd_core_iostart ssd_core_iostart 951 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 952 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 953 #define sd_checksum_iodone ssd_checksum_iodone 954 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 955 #define sd_pm_iodone ssd_pm_iodone 956 #define sd_initpkt_for_buf ssd_initpkt_for_buf 957 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 958 #define sd_setup_rw_pkt ssd_setup_rw_pkt 959 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 960 #define sd_buf_iodone ssd_buf_iodone 961 #define sd_uscsi_strategy ssd_uscsi_strategy 962 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 963 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 964 #define sd_uscsi_iodone ssd_uscsi_iodone 965 #define sd_xbuf_strategy ssd_xbuf_strategy 966 #define sd_xbuf_init ssd_xbuf_init 967 #define sd_pm_entry ssd_pm_entry 968 #define sd_pm_exit ssd_pm_exit 969 970 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 971 #define sd_pm_timeout_handler ssd_pm_timeout_handler 972 973 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 974 #define sdintr ssdintr 975 #define sd_start_cmds ssd_start_cmds 976 #define sd_send_scsi_cmd ssd_send_scsi_cmd 977 #define sd_bioclone_alloc ssd_bioclone_alloc 978 #define sd_bioclone_free ssd_bioclone_free 979 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 980 #define sd_shadow_buf_free ssd_shadow_buf_free 981 #define sd_print_transport_rejected_message \ 982 ssd_print_transport_rejected_message 983 #define sd_retry_command ssd_retry_command 984 #define sd_set_retry_bp ssd_set_retry_bp 985 #define sd_send_request_sense_command ssd_send_request_sense_command 986 #define sd_start_retry_command ssd_start_retry_command 987 #define sd_start_direct_priority_command \ 988 ssd_start_direct_priority_command 989 #define sd_return_failed_command ssd_return_failed_command 990 #define sd_return_failed_command_no_restart \ 991 ssd_return_failed_command_no_restart 992 #define sd_return_command ssd_return_command 993 #define sd_sync_with_callback ssd_sync_with_callback 994 #define sdrunout ssdrunout 995 #define sd_mark_rqs_busy ssd_mark_rqs_busy 996 #define sd_mark_rqs_idle ssd_mark_rqs_idle 997 #define sd_reduce_throttle ssd_reduce_throttle 998 #define sd_restore_throttle ssd_restore_throttle 999 #define sd_print_incomplete_msg ssd_print_incomplete_msg 1000 #define sd_init_cdb_limits ssd_init_cdb_limits 1001 #define sd_pkt_status_good ssd_pkt_status_good 1002 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 1003 #define sd_pkt_status_busy ssd_pkt_status_busy 1004 #define sd_pkt_status_reservation_conflict \ 1005 ssd_pkt_status_reservation_conflict 1006 #define sd_pkt_status_qfull ssd_pkt_status_qfull 1007 #define sd_handle_request_sense ssd_handle_request_sense 1008 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 1009 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 1010 #define sd_validate_sense_data ssd_validate_sense_data 1011 #define sd_decode_sense ssd_decode_sense 1012 #define sd_print_sense_msg ssd_print_sense_msg 1013 #define sd_sense_key_no_sense ssd_sense_key_no_sense 1014 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 1015 #define sd_sense_key_not_ready ssd_sense_key_not_ready 1016 #define sd_sense_key_medium_or_hardware_error \ 1017 ssd_sense_key_medium_or_hardware_error 1018 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 1019 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 1020 #define sd_sense_key_fail_command ssd_sense_key_fail_command 1021 #define sd_sense_key_blank_check ssd_sense_key_blank_check 1022 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 1023 #define sd_sense_key_default ssd_sense_key_default 1024 #define sd_print_retry_msg ssd_print_retry_msg 1025 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 1026 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 1027 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 1028 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 1029 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 1030 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 1031 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 1032 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 1033 #define sd_pkt_reason_default ssd_pkt_reason_default 1034 #define sd_reset_target ssd_reset_target 1035 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 1036 #define sd_start_stop_unit_task ssd_start_stop_unit_task 1037 #define sd_taskq_create ssd_taskq_create 1038 #define sd_taskq_delete ssd_taskq_delete 1039 #define sd_target_change_task ssd_target_change_task 1040 #define sd_log_dev_status_event ssd_log_dev_status_event 1041 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 1042 #define sd_log_eject_request_event ssd_log_eject_request_event 1043 #define sd_media_change_task ssd_media_change_task 1044 #define sd_handle_mchange ssd_handle_mchange 1045 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 1046 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 1047 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1048 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1049 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1050 sd_send_scsi_feature_GET_CONFIGURATION 1051 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1052 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1053 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1054 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1055 ssd_send_scsi_PERSISTENT_RESERVE_IN 1056 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1057 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1058 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1059 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1060 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1061 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1062 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1063 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1064 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1065 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \ 1066 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 1067 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid 1068 #define sd_alloc_rqs ssd_alloc_rqs 1069 #define sd_free_rqs ssd_free_rqs 1070 #define sd_dump_memory ssd_dump_memory 1071 #define sd_get_media_info_com ssd_get_media_info_com 1072 #define sd_get_media_info ssd_get_media_info 1073 #define sd_get_media_info_ext ssd_get_media_info_ext 1074 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1075 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1076 #define sd_strtok_r ssd_strtok_r 1077 #define sd_set_properties ssd_set_properties 1078 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1079 #define sd_setup_next_xfer ssd_setup_next_xfer 1080 #define sd_dkio_get_temp ssd_dkio_get_temp 1081 #define sd_check_mhd ssd_check_mhd 1082 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1083 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1084 #define sd_sname ssd_sname 1085 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1086 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1087 #define sd_take_ownership ssd_take_ownership 1088 #define sd_reserve_release ssd_reserve_release 1089 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1090 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1091 #define sd_persistent_reservation_in_read_keys \ 1092 ssd_persistent_reservation_in_read_keys 1093 #define sd_persistent_reservation_in_read_resv \ 1094 ssd_persistent_reservation_in_read_resv 1095 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1096 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1097 #define sd_mhdioc_release ssd_mhdioc_release 1098 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1099 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1100 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1101 #define sr_change_blkmode ssr_change_blkmode 1102 #define sr_change_speed ssr_change_speed 1103 #define sr_atapi_change_speed ssr_atapi_change_speed 1104 #define sr_pause_resume ssr_pause_resume 1105 #define sr_play_msf ssr_play_msf 1106 #define sr_play_trkind ssr_play_trkind 1107 #define sr_read_all_subcodes ssr_read_all_subcodes 1108 #define sr_read_subchannel ssr_read_subchannel 1109 #define sr_read_tocentry ssr_read_tocentry 1110 #define sr_read_tochdr ssr_read_tochdr 1111 #define sr_read_cdda ssr_read_cdda 1112 #define sr_read_cdxa ssr_read_cdxa 1113 #define sr_read_mode1 ssr_read_mode1 1114 #define sr_read_mode2 ssr_read_mode2 1115 #define sr_read_cd_mode2 ssr_read_cd_mode2 1116 #define sr_sector_mode ssr_sector_mode 1117 #define sr_eject ssr_eject 1118 #define sr_ejected ssr_ejected 1119 #define sr_check_wp ssr_check_wp 1120 #define sd_watch_request_submit ssd_watch_request_submit 1121 #define sd_check_media ssd_check_media 1122 #define sd_media_watch_cb ssd_media_watch_cb 1123 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1124 #define sr_volume_ctrl ssr_volume_ctrl 1125 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1126 #define sd_log_page_supported ssd_log_page_supported 1127 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1128 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1129 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1130 #define sd_range_lock ssd_range_lock 1131 #define sd_get_range ssd_get_range 1132 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1133 #define sd_range_unlock ssd_range_unlock 1134 #define sd_read_modify_write_task ssd_read_modify_write_task 1135 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1136 1137 #define sd_iostart_chain ssd_iostart_chain 1138 #define sd_iodone_chain ssd_iodone_chain 1139 #define sd_initpkt_map ssd_initpkt_map 1140 #define sd_destroypkt_map ssd_destroypkt_map 1141 #define sd_chain_type_map ssd_chain_type_map 1142 #define sd_chain_index_map ssd_chain_index_map 1143 1144 #define sd_failfast_flushctl ssd_failfast_flushctl 1145 #define sd_failfast_flushq ssd_failfast_flushq 1146 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1147 1148 #define sd_is_lsi ssd_is_lsi 1149 #define sd_tg_rdwr ssd_tg_rdwr 1150 #define sd_tg_getinfo ssd_tg_getinfo 1151 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1152 1153 #endif /* #if (defined(__fibre)) */ 1154 1155 1156 int _init(void); 1157 int _fini(void); 1158 int _info(struct modinfo *modinfop); 1159 1160 /*PRINTFLIKE3*/ 1161 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1162 /*PRINTFLIKE3*/ 1163 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1164 /*PRINTFLIKE3*/ 1165 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1166 1167 static int sdprobe(dev_info_t *devi); 1168 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1169 void **result); 1170 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1171 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1172 1173 /* 1174 * Smart probe for parallel scsi 1175 */ 1176 static void sd_scsi_probe_cache_init(void); 1177 static void sd_scsi_probe_cache_fini(void); 1178 static void sd_scsi_clear_probe_cache(void); 1179 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1180 1181 /* 1182 * Attached luns on target for parallel scsi 1183 */ 1184 static void sd_scsi_target_lun_init(void); 1185 static void sd_scsi_target_lun_fini(void); 1186 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1187 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1188 1189 static int sd_spin_up_unit(sd_ssc_t *ssc); 1190 1191 /* 1192 * Using sd_ssc_init to establish sd_ssc_t struct 1193 * Using sd_ssc_send to send uscsi internal command 1194 * Using sd_ssc_fini to free sd_ssc_t struct 1195 */ 1196 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1197 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1198 int flag, enum uio_seg dataspace, int path_flag); 1199 static void sd_ssc_fini(sd_ssc_t *ssc); 1200 1201 /* 1202 * Using sd_ssc_assessment to set correct type-of-assessment 1203 * Using sd_ssc_post to post ereport & system log 1204 * sd_ssc_post will call sd_ssc_print to print system log 1205 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1206 */ 1207 static void sd_ssc_assessment(sd_ssc_t *ssc, 1208 enum sd_type_assessment tp_assess); 1209 1210 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1211 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1212 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1213 enum sd_driver_assessment drv_assess); 1214 1215 /* 1216 * Using sd_ssc_set_info to mark an un-decodable-data error. 1217 * Using sd_ssc_extract_info to transfer information from internal 1218 * data structures to sd_ssc_t. 1219 */ 1220 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1221 const char *fmt, ...); 1222 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1223 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1224 1225 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1226 enum uio_seg dataspace, int path_flag); 1227 1228 #ifdef _LP64 1229 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1230 static void sd_reenable_dsense_task(void *arg); 1231 #endif /* _LP64 */ 1232 1233 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1234 1235 static void sd_read_unit_properties(struct sd_lun *un); 1236 static int sd_process_sdconf_file(struct sd_lun *un); 1237 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1238 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1239 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1240 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1241 int *data_list, sd_tunables *values); 1242 static void sd_process_sdconf_table(struct sd_lun *un); 1243 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1244 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1245 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1246 int list_len, char *dataname_ptr); 1247 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1248 sd_tunables *prop_list); 1249 1250 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1251 int reservation_flag); 1252 static int sd_get_devid(sd_ssc_t *ssc); 1253 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1254 static int sd_write_deviceid(sd_ssc_t *ssc); 1255 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1256 1257 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1258 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1259 1260 static int sd_ddi_suspend(dev_info_t *devi); 1261 static int sd_ddi_resume(dev_info_t *devi); 1262 static int sd_pm_state_change(struct sd_lun *un, int level, int flag); 1263 static int sdpower(dev_info_t *devi, int component, int level); 1264 1265 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1266 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1267 static int sd_unit_attach(dev_info_t *devi); 1268 static int sd_unit_detach(dev_info_t *devi); 1269 1270 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1271 static void sd_create_errstats(struct sd_lun *un, int instance); 1272 static void sd_set_errstats(struct sd_lun *un); 1273 static void sd_set_pstats(struct sd_lun *un); 1274 1275 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1276 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1277 static int sd_send_polled_RQS(struct sd_lun *un); 1278 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1279 1280 #if (defined(__fibre)) 1281 /* 1282 * Event callbacks (photon) 1283 */ 1284 static void sd_init_event_callbacks(struct sd_lun *un); 1285 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1286 #endif 1287 1288 /* 1289 * Defines for sd_cache_control 1290 */ 1291 1292 #define SD_CACHE_ENABLE 1 1293 #define SD_CACHE_DISABLE 0 1294 #define SD_CACHE_NOCHANGE -1 1295 1296 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1297 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1298 static void sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable); 1299 static void sd_get_nv_sup(sd_ssc_t *ssc); 1300 static dev_t sd_make_device(dev_info_t *devi); 1301 static void sd_check_bdc_vpd(sd_ssc_t *ssc); 1302 static void sd_check_emulation_mode(sd_ssc_t *ssc); 1303 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1304 uint64_t capacity); 1305 1306 /* 1307 * Driver entry point functions. 1308 */ 1309 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1310 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1311 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1312 1313 static void sdmin(struct buf *bp); 1314 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1315 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1316 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1317 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1318 1319 static int sdstrategy(struct buf *bp); 1320 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1321 1322 /* 1323 * Function prototypes for layering functions in the iostart chain. 1324 */ 1325 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1326 struct buf *bp); 1327 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1328 struct buf *bp); 1329 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1330 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1331 struct buf *bp); 1332 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1333 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1334 1335 /* 1336 * Function prototypes for layering functions in the iodone chain. 1337 */ 1338 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1339 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1340 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1341 struct buf *bp); 1342 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1343 struct buf *bp); 1344 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1345 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1346 struct buf *bp); 1347 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1348 1349 /* 1350 * Prototypes for functions to support buf(9S) based IO. 1351 */ 1352 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1353 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1354 static void sd_destroypkt_for_buf(struct buf *); 1355 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1356 struct buf *bp, int flags, 1357 int (*callback)(caddr_t), caddr_t callback_arg, 1358 diskaddr_t lba, uint32_t blockcount); 1359 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1360 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1361 1362 /* 1363 * Prototypes for functions to support USCSI IO. 1364 */ 1365 static int sd_uscsi_strategy(struct buf *bp); 1366 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1367 static void sd_destroypkt_for_uscsi(struct buf *); 1368 1369 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1370 uchar_t chain_type, void *pktinfop); 1371 1372 static int sd_pm_entry(struct sd_lun *un); 1373 static void sd_pm_exit(struct sd_lun *un); 1374 1375 static void sd_pm_idletimeout_handler(void *arg); 1376 1377 /* 1378 * sd_core internal functions (used at the sd_core_io layer). 1379 */ 1380 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1381 static void sdintr(struct scsi_pkt *pktp); 1382 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1383 1384 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1385 enum uio_seg dataspace, int path_flag); 1386 1387 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1388 daddr_t blkno, int (*func)(struct buf *)); 1389 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1390 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1391 static void sd_bioclone_free(struct buf *bp); 1392 static void sd_shadow_buf_free(struct buf *bp); 1393 1394 static void sd_print_transport_rejected_message(struct sd_lun *un, 1395 struct sd_xbuf *xp, int code); 1396 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1397 void *arg, int code); 1398 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1401 void *arg, int code); 1402 1403 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1404 int retry_check_flag, 1405 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1406 int c), 1407 void *user_arg, int failure_code, clock_t retry_delay, 1408 void (*statp)(kstat_io_t *)); 1409 1410 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1411 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1412 1413 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1414 struct scsi_pkt *pktp); 1415 static void sd_start_retry_command(void *arg); 1416 static void sd_start_direct_priority_command(void *arg); 1417 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1418 int errcode); 1419 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1420 struct buf *bp, int errcode); 1421 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1422 static void sd_sync_with_callback(struct sd_lun *un); 1423 static int sdrunout(caddr_t arg); 1424 1425 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1426 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1427 1428 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1429 static void sd_restore_throttle(void *arg); 1430 1431 static void sd_init_cdb_limits(struct sd_lun *un); 1432 1433 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1434 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1435 1436 /* 1437 * Error handling functions 1438 */ 1439 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1441 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1442 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1443 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1444 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1445 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1446 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1447 1448 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1449 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1450 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1451 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1452 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1453 struct sd_xbuf *xp, size_t actual_len); 1454 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1455 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1456 1457 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1458 void *arg, int code); 1459 1460 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1461 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1462 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1463 uint8_t *sense_datap, 1464 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1465 static void sd_sense_key_not_ready(struct sd_lun *un, 1466 uint8_t *sense_datap, 1467 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1468 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1469 uint8_t *sense_datap, 1470 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1471 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1472 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1473 static void sd_sense_key_unit_attention(struct sd_lun *un, 1474 uint8_t *sense_datap, 1475 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1476 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1477 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1478 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1479 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1480 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1481 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1482 static void sd_sense_key_default(struct sd_lun *un, 1483 uint8_t *sense_datap, 1484 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1485 1486 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1487 void *arg, int flag); 1488 1489 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1490 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1491 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1492 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1493 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1494 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1495 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1496 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1497 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1498 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1499 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1500 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1501 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1502 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1503 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1504 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1505 1506 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1507 1508 static void sd_start_stop_unit_callback(void *arg); 1509 static void sd_start_stop_unit_task(void *arg); 1510 1511 static void sd_taskq_create(void); 1512 static void sd_taskq_delete(void); 1513 static void sd_target_change_task(void *arg); 1514 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag); 1515 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1516 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag); 1517 static void sd_media_change_task(void *arg); 1518 1519 static int sd_handle_mchange(struct sd_lun *un); 1520 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1521 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1522 uint32_t *lbap, int path_flag); 1523 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1524 uint32_t *lbap, uint32_t *psp, int path_flag); 1525 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, 1526 int flag, int path_flag); 1527 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1528 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1529 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1530 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1531 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1532 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1533 uchar_t usr_cmd, uchar_t *usr_bufp); 1534 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1535 struct dk_callback *dkc); 1536 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1537 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1538 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1539 uchar_t *bufaddr, uint_t buflen, int path_flag); 1540 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1541 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1542 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1543 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1544 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1545 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1546 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1547 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1548 size_t buflen, daddr_t start_block, int path_flag); 1549 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1550 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1551 path_flag) 1552 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1553 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1554 path_flag) 1555 1556 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1557 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1558 uint16_t param_ptr, int path_flag); 1559 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, 1560 uchar_t *bufaddr, size_t buflen, uchar_t class_req); 1561 static boolean_t sd_gesn_media_data_valid(uchar_t *data); 1562 1563 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1564 static void sd_free_rqs(struct sd_lun *un); 1565 1566 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1567 uchar_t *data, int len, int fmt); 1568 static void sd_panic_for_res_conflict(struct sd_lun *un); 1569 1570 /* 1571 * Disk Ioctl Function Prototypes 1572 */ 1573 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1574 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1575 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1576 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1577 1578 /* 1579 * Multi-host Ioctl Prototypes 1580 */ 1581 static int sd_check_mhd(dev_t dev, int interval); 1582 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1583 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1584 static char *sd_sname(uchar_t status); 1585 static void sd_mhd_resvd_recover(void *arg); 1586 static void sd_resv_reclaim_thread(); 1587 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1588 static int sd_reserve_release(dev_t dev, int cmd); 1589 static void sd_rmv_resv_reclaim_req(dev_t dev); 1590 static void sd_mhd_reset_notify_cb(caddr_t arg); 1591 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1592 mhioc_inkeys_t *usrp, int flag); 1593 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1594 mhioc_inresvs_t *usrp, int flag); 1595 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1596 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1597 static int sd_mhdioc_release(dev_t dev); 1598 static int sd_mhdioc_register_devid(dev_t dev); 1599 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1600 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1601 1602 /* 1603 * SCSI removable prototypes 1604 */ 1605 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1606 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1607 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1608 static int sr_pause_resume(dev_t dev, int mode); 1609 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1610 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1611 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1612 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1613 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1614 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1615 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1616 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1617 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1618 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1619 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1620 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1621 static int sr_eject(dev_t dev); 1622 static void sr_ejected(register struct sd_lun *un); 1623 static int sr_check_wp(dev_t dev); 1624 static opaque_t sd_watch_request_submit(struct sd_lun *un); 1625 static int sd_check_media(dev_t dev, enum dkio_state state); 1626 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1627 static void sd_delayed_cv_broadcast(void *arg); 1628 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1629 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1630 1631 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1632 1633 /* 1634 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1635 */ 1636 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1637 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1638 static void sd_wm_cache_destructor(void *wm, void *un); 1639 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1640 daddr_t endb, ushort_t typ); 1641 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1642 daddr_t endb); 1643 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1644 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1645 static void sd_read_modify_write_task(void * arg); 1646 static int 1647 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1648 struct buf **bpp); 1649 1650 1651 /* 1652 * Function prototypes for failfast support. 1653 */ 1654 static void sd_failfast_flushq(struct sd_lun *un); 1655 static int sd_failfast_flushq_callback(struct buf *bp); 1656 1657 /* 1658 * Function prototypes to check for lsi devices 1659 */ 1660 static void sd_is_lsi(struct sd_lun *un); 1661 1662 /* 1663 * Function prototypes for partial DMA support 1664 */ 1665 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1666 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1667 1668 1669 /* Function prototypes for cmlb */ 1670 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1671 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1672 1673 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1674 1675 /* 1676 * For printing RMW warning message timely 1677 */ 1678 static void sd_rmw_msg_print_handler(void *arg); 1679 1680 /* 1681 * Constants for failfast support: 1682 * 1683 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1684 * failfast processing being performed. 1685 * 1686 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1687 * failfast processing on all bufs with B_FAILFAST set. 1688 */ 1689 1690 #define SD_FAILFAST_INACTIVE 0 1691 #define SD_FAILFAST_ACTIVE 1 1692 1693 /* 1694 * Bitmask to control behavior of buf(9S) flushes when a transition to 1695 * the failfast state occurs. Optional bits include: 1696 * 1697 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1698 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1699 * be flushed. 1700 * 1701 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1702 * driver, in addition to the regular wait queue. This includes the xbuf 1703 * queues. When clear, only the driver's wait queue will be flushed. 1704 */ 1705 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1706 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1707 1708 /* 1709 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1710 * to flush all queues within the driver. 1711 */ 1712 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1713 1714 1715 /* 1716 * SD Testing Fault Injection 1717 */ 1718 #ifdef SD_FAULT_INJECTION 1719 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1720 static void sd_faultinjection(struct scsi_pkt *pktp); 1721 static void sd_injection_log(char *buf, struct sd_lun *un); 1722 #endif 1723 1724 /* 1725 * Device driver ops vector 1726 */ 1727 static struct cb_ops sd_cb_ops = { 1728 sdopen, /* open */ 1729 sdclose, /* close */ 1730 sdstrategy, /* strategy */ 1731 nodev, /* print */ 1732 sddump, /* dump */ 1733 sdread, /* read */ 1734 sdwrite, /* write */ 1735 sdioctl, /* ioctl */ 1736 nodev, /* devmap */ 1737 nodev, /* mmap */ 1738 nodev, /* segmap */ 1739 nochpoll, /* poll */ 1740 sd_prop_op, /* cb_prop_op */ 1741 0, /* streamtab */ 1742 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1743 CB_REV, /* cb_rev */ 1744 sdaread, /* async I/O read entry point */ 1745 sdawrite /* async I/O write entry point */ 1746 }; 1747 1748 struct dev_ops sd_ops = { 1749 DEVO_REV, /* devo_rev, */ 1750 0, /* refcnt */ 1751 sdinfo, /* info */ 1752 nulldev, /* identify */ 1753 sdprobe, /* probe */ 1754 sdattach, /* attach */ 1755 sddetach, /* detach */ 1756 nodev, /* reset */ 1757 &sd_cb_ops, /* driver operations */ 1758 NULL, /* bus operations */ 1759 sdpower, /* power */ 1760 ddi_quiesce_not_needed, /* quiesce */ 1761 }; 1762 1763 /* 1764 * This is the loadable module wrapper. 1765 */ 1766 #include <sys/modctl.h> 1767 1768 #ifndef XPV_HVM_DRIVER 1769 static struct modldrv modldrv = { 1770 &mod_driverops, /* Type of module. This one is a driver */ 1771 SD_MODULE_NAME, /* Module name. */ 1772 &sd_ops /* driver ops */ 1773 }; 1774 1775 static struct modlinkage modlinkage = { 1776 MODREV_1, &modldrv, NULL 1777 }; 1778 1779 #else /* XPV_HVM_DRIVER */ 1780 static struct modlmisc modlmisc = { 1781 &mod_miscops, /* Type of module. This one is a misc */ 1782 "HVM " SD_MODULE_NAME, /* Module name. */ 1783 }; 1784 1785 static struct modlinkage modlinkage = { 1786 MODREV_1, &modlmisc, NULL 1787 }; 1788 1789 #endif /* XPV_HVM_DRIVER */ 1790 1791 static cmlb_tg_ops_t sd_tgops = { 1792 TG_DK_OPS_VERSION_1, 1793 sd_tg_rdwr, 1794 sd_tg_getinfo 1795 }; 1796 1797 static struct scsi_asq_key_strings sd_additional_codes[] = { 1798 0x81, 0, "Logical Unit is Reserved", 1799 0x85, 0, "Audio Address Not Valid", 1800 0xb6, 0, "Media Load Mechanism Failed", 1801 0xB9, 0, "Audio Play Operation Aborted", 1802 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1803 0x53, 2, "Medium removal prevented", 1804 0x6f, 0, "Authentication failed during key exchange", 1805 0x6f, 1, "Key not present", 1806 0x6f, 2, "Key not established", 1807 0x6f, 3, "Read without proper authentication", 1808 0x6f, 4, "Mismatched region to this logical unit", 1809 0x6f, 5, "Region reset count error", 1810 0xffff, 0x0, NULL 1811 }; 1812 1813 1814 /* 1815 * Struct for passing printing information for sense data messages 1816 */ 1817 struct sd_sense_info { 1818 int ssi_severity; 1819 int ssi_pfa_flag; 1820 }; 1821 1822 /* 1823 * Table of function pointers for iostart-side routines. Separate "chains" 1824 * of layered function calls are formed by placing the function pointers 1825 * sequentially in the desired order. Functions are called according to an 1826 * incrementing table index ordering. The last function in each chain must 1827 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1828 * in the sd_iodone_chain[] array. 1829 * 1830 * Note: It may seem more natural to organize both the iostart and iodone 1831 * functions together, into an array of structures (or some similar 1832 * organization) with a common index, rather than two separate arrays which 1833 * must be maintained in synchronization. The purpose of this division is 1834 * to achieve improved performance: individual arrays allows for more 1835 * effective cache line utilization on certain platforms. 1836 */ 1837 1838 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1839 1840 1841 static sd_chain_t sd_iostart_chain[] = { 1842 1843 /* Chain for buf IO for disk drive targets (PM enabled) */ 1844 sd_mapblockaddr_iostart, /* Index: 0 */ 1845 sd_pm_iostart, /* Index: 1 */ 1846 sd_core_iostart, /* Index: 2 */ 1847 1848 /* Chain for buf IO for disk drive targets (PM disabled) */ 1849 sd_mapblockaddr_iostart, /* Index: 3 */ 1850 sd_core_iostart, /* Index: 4 */ 1851 1852 /* 1853 * Chain for buf IO for removable-media or large sector size 1854 * disk drive targets with RMW needed (PM enabled) 1855 */ 1856 sd_mapblockaddr_iostart, /* Index: 5 */ 1857 sd_mapblocksize_iostart, /* Index: 6 */ 1858 sd_pm_iostart, /* Index: 7 */ 1859 sd_core_iostart, /* Index: 8 */ 1860 1861 /* 1862 * Chain for buf IO for removable-media or large sector size 1863 * disk drive targets with RMW needed (PM disabled) 1864 */ 1865 sd_mapblockaddr_iostart, /* Index: 9 */ 1866 sd_mapblocksize_iostart, /* Index: 10 */ 1867 sd_core_iostart, /* Index: 11 */ 1868 1869 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1870 sd_mapblockaddr_iostart, /* Index: 12 */ 1871 sd_checksum_iostart, /* Index: 13 */ 1872 sd_pm_iostart, /* Index: 14 */ 1873 sd_core_iostart, /* Index: 15 */ 1874 1875 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1876 sd_mapblockaddr_iostart, /* Index: 16 */ 1877 sd_checksum_iostart, /* Index: 17 */ 1878 sd_core_iostart, /* Index: 18 */ 1879 1880 /* Chain for USCSI commands (all targets) */ 1881 sd_pm_iostart, /* Index: 19 */ 1882 sd_core_iostart, /* Index: 20 */ 1883 1884 /* Chain for checksumming USCSI commands (all targets) */ 1885 sd_checksum_uscsi_iostart, /* Index: 21 */ 1886 sd_pm_iostart, /* Index: 22 */ 1887 sd_core_iostart, /* Index: 23 */ 1888 1889 /* Chain for "direct" USCSI commands (all targets) */ 1890 sd_core_iostart, /* Index: 24 */ 1891 1892 /* Chain for "direct priority" USCSI commands (all targets) */ 1893 sd_core_iostart, /* Index: 25 */ 1894 1895 /* 1896 * Chain for buf IO for large sector size disk drive targets 1897 * with RMW needed with checksumming (PM enabled) 1898 */ 1899 sd_mapblockaddr_iostart, /* Index: 26 */ 1900 sd_mapblocksize_iostart, /* Index: 27 */ 1901 sd_checksum_iostart, /* Index: 28 */ 1902 sd_pm_iostart, /* Index: 29 */ 1903 sd_core_iostart, /* Index: 30 */ 1904 1905 /* 1906 * Chain for buf IO for large sector size disk drive targets 1907 * with RMW needed with checksumming (PM disabled) 1908 */ 1909 sd_mapblockaddr_iostart, /* Index: 31 */ 1910 sd_mapblocksize_iostart, /* Index: 32 */ 1911 sd_checksum_iostart, /* Index: 33 */ 1912 sd_core_iostart, /* Index: 34 */ 1913 1914 }; 1915 1916 /* 1917 * Macros to locate the first function of each iostart chain in the 1918 * sd_iostart_chain[] array. These are located by the index in the array. 1919 */ 1920 #define SD_CHAIN_DISK_IOSTART 0 1921 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1922 #define SD_CHAIN_MSS_DISK_IOSTART 5 1923 #define SD_CHAIN_RMMEDIA_IOSTART 5 1924 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1925 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1926 #define SD_CHAIN_CHKSUM_IOSTART 12 1927 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1928 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1929 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1930 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1931 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1932 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1933 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1934 1935 1936 /* 1937 * Table of function pointers for the iodone-side routines for the driver- 1938 * internal layering mechanism. The calling sequence for iodone routines 1939 * uses a decrementing table index, so the last routine called in a chain 1940 * must be at the lowest array index location for that chain. The last 1941 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1942 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1943 * of the functions in an iodone side chain must correspond to the ordering 1944 * of the iostart routines for that chain. Note that there is no iodone 1945 * side routine that corresponds to sd_core_iostart(), so there is no 1946 * entry in the table for this. 1947 */ 1948 1949 static sd_chain_t sd_iodone_chain[] = { 1950 1951 /* Chain for buf IO for disk drive targets (PM enabled) */ 1952 sd_buf_iodone, /* Index: 0 */ 1953 sd_mapblockaddr_iodone, /* Index: 1 */ 1954 sd_pm_iodone, /* Index: 2 */ 1955 1956 /* Chain for buf IO for disk drive targets (PM disabled) */ 1957 sd_buf_iodone, /* Index: 3 */ 1958 sd_mapblockaddr_iodone, /* Index: 4 */ 1959 1960 /* 1961 * Chain for buf IO for removable-media or large sector size 1962 * disk drive targets with RMW needed (PM enabled) 1963 */ 1964 sd_buf_iodone, /* Index: 5 */ 1965 sd_mapblockaddr_iodone, /* Index: 6 */ 1966 sd_mapblocksize_iodone, /* Index: 7 */ 1967 sd_pm_iodone, /* Index: 8 */ 1968 1969 /* 1970 * Chain for buf IO for removable-media or large sector size 1971 * disk drive targets with RMW needed (PM disabled) 1972 */ 1973 sd_buf_iodone, /* Index: 9 */ 1974 sd_mapblockaddr_iodone, /* Index: 10 */ 1975 sd_mapblocksize_iodone, /* Index: 11 */ 1976 1977 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1978 sd_buf_iodone, /* Index: 12 */ 1979 sd_mapblockaddr_iodone, /* Index: 13 */ 1980 sd_checksum_iodone, /* Index: 14 */ 1981 sd_pm_iodone, /* Index: 15 */ 1982 1983 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1984 sd_buf_iodone, /* Index: 16 */ 1985 sd_mapblockaddr_iodone, /* Index: 17 */ 1986 sd_checksum_iodone, /* Index: 18 */ 1987 1988 /* Chain for USCSI commands (non-checksum targets) */ 1989 sd_uscsi_iodone, /* Index: 19 */ 1990 sd_pm_iodone, /* Index: 20 */ 1991 1992 /* Chain for USCSI commands (checksum targets) */ 1993 sd_uscsi_iodone, /* Index: 21 */ 1994 sd_checksum_uscsi_iodone, /* Index: 22 */ 1995 sd_pm_iodone, /* Index: 22 */ 1996 1997 /* Chain for "direct" USCSI commands (all targets) */ 1998 sd_uscsi_iodone, /* Index: 24 */ 1999 2000 /* Chain for "direct priority" USCSI commands (all targets) */ 2001 sd_uscsi_iodone, /* Index: 25 */ 2002 2003 /* 2004 * Chain for buf IO for large sector size disk drive targets 2005 * with checksumming (PM enabled) 2006 */ 2007 sd_buf_iodone, /* Index: 26 */ 2008 sd_mapblockaddr_iodone, /* Index: 27 */ 2009 sd_mapblocksize_iodone, /* Index: 28 */ 2010 sd_checksum_iodone, /* Index: 29 */ 2011 sd_pm_iodone, /* Index: 30 */ 2012 2013 /* 2014 * Chain for buf IO for large sector size disk drive targets 2015 * with checksumming (PM disabled) 2016 */ 2017 sd_buf_iodone, /* Index: 31 */ 2018 sd_mapblockaddr_iodone, /* Index: 32 */ 2019 sd_mapblocksize_iodone, /* Index: 33 */ 2020 sd_checksum_iodone, /* Index: 34 */ 2021 }; 2022 2023 2024 /* 2025 * Macros to locate the "first" function in the sd_iodone_chain[] array for 2026 * each iodone-side chain. These are located by the array index, but as the 2027 * iodone side functions are called in a decrementing-index order, the 2028 * highest index number in each chain must be specified (as these correspond 2029 * to the first function in the iodone chain that will be called by the core 2030 * at IO completion time). 2031 */ 2032 2033 #define SD_CHAIN_DISK_IODONE 2 2034 #define SD_CHAIN_DISK_IODONE_NO_PM 4 2035 #define SD_CHAIN_RMMEDIA_IODONE 8 2036 #define SD_CHAIN_MSS_DISK_IODONE 8 2037 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 2038 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 2039 #define SD_CHAIN_CHKSUM_IODONE 15 2040 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 2041 #define SD_CHAIN_USCSI_CMD_IODONE 20 2042 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 2043 #define SD_CHAIN_DIRECT_CMD_IODONE 24 2044 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 2045 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 2046 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 2047 2048 2049 2050 /* 2051 * Array to map a layering chain index to the appropriate initpkt routine. 2052 * The redundant entries are present so that the index used for accessing 2053 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2054 * with this table as well. 2055 */ 2056 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 2057 2058 static sd_initpkt_t sd_initpkt_map[] = { 2059 2060 /* Chain for buf IO for disk drive targets (PM enabled) */ 2061 sd_initpkt_for_buf, /* Index: 0 */ 2062 sd_initpkt_for_buf, /* Index: 1 */ 2063 sd_initpkt_for_buf, /* Index: 2 */ 2064 2065 /* Chain for buf IO for disk drive targets (PM disabled) */ 2066 sd_initpkt_for_buf, /* Index: 3 */ 2067 sd_initpkt_for_buf, /* Index: 4 */ 2068 2069 /* 2070 * Chain for buf IO for removable-media or large sector size 2071 * disk drive targets (PM enabled) 2072 */ 2073 sd_initpkt_for_buf, /* Index: 5 */ 2074 sd_initpkt_for_buf, /* Index: 6 */ 2075 sd_initpkt_for_buf, /* Index: 7 */ 2076 sd_initpkt_for_buf, /* Index: 8 */ 2077 2078 /* 2079 * Chain for buf IO for removable-media or large sector size 2080 * disk drive targets (PM disabled) 2081 */ 2082 sd_initpkt_for_buf, /* Index: 9 */ 2083 sd_initpkt_for_buf, /* Index: 10 */ 2084 sd_initpkt_for_buf, /* Index: 11 */ 2085 2086 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2087 sd_initpkt_for_buf, /* Index: 12 */ 2088 sd_initpkt_for_buf, /* Index: 13 */ 2089 sd_initpkt_for_buf, /* Index: 14 */ 2090 sd_initpkt_for_buf, /* Index: 15 */ 2091 2092 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2093 sd_initpkt_for_buf, /* Index: 16 */ 2094 sd_initpkt_for_buf, /* Index: 17 */ 2095 sd_initpkt_for_buf, /* Index: 18 */ 2096 2097 /* Chain for USCSI commands (non-checksum targets) */ 2098 sd_initpkt_for_uscsi, /* Index: 19 */ 2099 sd_initpkt_for_uscsi, /* Index: 20 */ 2100 2101 /* Chain for USCSI commands (checksum targets) */ 2102 sd_initpkt_for_uscsi, /* Index: 21 */ 2103 sd_initpkt_for_uscsi, /* Index: 22 */ 2104 sd_initpkt_for_uscsi, /* Index: 22 */ 2105 2106 /* Chain for "direct" USCSI commands (all targets) */ 2107 sd_initpkt_for_uscsi, /* Index: 24 */ 2108 2109 /* Chain for "direct priority" USCSI commands (all targets) */ 2110 sd_initpkt_for_uscsi, /* Index: 25 */ 2111 2112 /* 2113 * Chain for buf IO for large sector size disk drive targets 2114 * with checksumming (PM enabled) 2115 */ 2116 sd_initpkt_for_buf, /* Index: 26 */ 2117 sd_initpkt_for_buf, /* Index: 27 */ 2118 sd_initpkt_for_buf, /* Index: 28 */ 2119 sd_initpkt_for_buf, /* Index: 29 */ 2120 sd_initpkt_for_buf, /* Index: 30 */ 2121 2122 /* 2123 * Chain for buf IO for large sector size disk drive targets 2124 * with checksumming (PM disabled) 2125 */ 2126 sd_initpkt_for_buf, /* Index: 31 */ 2127 sd_initpkt_for_buf, /* Index: 32 */ 2128 sd_initpkt_for_buf, /* Index: 33 */ 2129 sd_initpkt_for_buf, /* Index: 34 */ 2130 }; 2131 2132 2133 /* 2134 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2135 * The redundant entries are present so that the index used for accessing 2136 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2137 * with this table as well. 2138 */ 2139 typedef void (*sd_destroypkt_t)(struct buf *); 2140 2141 static sd_destroypkt_t sd_destroypkt_map[] = { 2142 2143 /* Chain for buf IO for disk drive targets (PM enabled) */ 2144 sd_destroypkt_for_buf, /* Index: 0 */ 2145 sd_destroypkt_for_buf, /* Index: 1 */ 2146 sd_destroypkt_for_buf, /* Index: 2 */ 2147 2148 /* Chain for buf IO for disk drive targets (PM disabled) */ 2149 sd_destroypkt_for_buf, /* Index: 3 */ 2150 sd_destroypkt_for_buf, /* Index: 4 */ 2151 2152 /* 2153 * Chain for buf IO for removable-media or large sector size 2154 * disk drive targets (PM enabled) 2155 */ 2156 sd_destroypkt_for_buf, /* Index: 5 */ 2157 sd_destroypkt_for_buf, /* Index: 6 */ 2158 sd_destroypkt_for_buf, /* Index: 7 */ 2159 sd_destroypkt_for_buf, /* Index: 8 */ 2160 2161 /* 2162 * Chain for buf IO for removable-media or large sector size 2163 * disk drive targets (PM disabled) 2164 */ 2165 sd_destroypkt_for_buf, /* Index: 9 */ 2166 sd_destroypkt_for_buf, /* Index: 10 */ 2167 sd_destroypkt_for_buf, /* Index: 11 */ 2168 2169 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2170 sd_destroypkt_for_buf, /* Index: 12 */ 2171 sd_destroypkt_for_buf, /* Index: 13 */ 2172 sd_destroypkt_for_buf, /* Index: 14 */ 2173 sd_destroypkt_for_buf, /* Index: 15 */ 2174 2175 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2176 sd_destroypkt_for_buf, /* Index: 16 */ 2177 sd_destroypkt_for_buf, /* Index: 17 */ 2178 sd_destroypkt_for_buf, /* Index: 18 */ 2179 2180 /* Chain for USCSI commands (non-checksum targets) */ 2181 sd_destroypkt_for_uscsi, /* Index: 19 */ 2182 sd_destroypkt_for_uscsi, /* Index: 20 */ 2183 2184 /* Chain for USCSI commands (checksum targets) */ 2185 sd_destroypkt_for_uscsi, /* Index: 21 */ 2186 sd_destroypkt_for_uscsi, /* Index: 22 */ 2187 sd_destroypkt_for_uscsi, /* Index: 22 */ 2188 2189 /* Chain for "direct" USCSI commands (all targets) */ 2190 sd_destroypkt_for_uscsi, /* Index: 24 */ 2191 2192 /* Chain for "direct priority" USCSI commands (all targets) */ 2193 sd_destroypkt_for_uscsi, /* Index: 25 */ 2194 2195 /* 2196 * Chain for buf IO for large sector size disk drive targets 2197 * with checksumming (PM disabled) 2198 */ 2199 sd_destroypkt_for_buf, /* Index: 26 */ 2200 sd_destroypkt_for_buf, /* Index: 27 */ 2201 sd_destroypkt_for_buf, /* Index: 28 */ 2202 sd_destroypkt_for_buf, /* Index: 29 */ 2203 sd_destroypkt_for_buf, /* Index: 30 */ 2204 2205 /* 2206 * Chain for buf IO for large sector size disk drive targets 2207 * with checksumming (PM enabled) 2208 */ 2209 sd_destroypkt_for_buf, /* Index: 31 */ 2210 sd_destroypkt_for_buf, /* Index: 32 */ 2211 sd_destroypkt_for_buf, /* Index: 33 */ 2212 sd_destroypkt_for_buf, /* Index: 34 */ 2213 }; 2214 2215 2216 2217 /* 2218 * Array to map a layering chain index to the appropriate chain "type". 2219 * The chain type indicates a specific property/usage of the chain. 2220 * The redundant entries are present so that the index used for accessing 2221 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2222 * with this table as well. 2223 */ 2224 2225 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2226 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2227 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2228 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2229 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2230 /* (for error recovery) */ 2231 2232 static int sd_chain_type_map[] = { 2233 2234 /* Chain for buf IO for disk drive targets (PM enabled) */ 2235 SD_CHAIN_BUFIO, /* Index: 0 */ 2236 SD_CHAIN_BUFIO, /* Index: 1 */ 2237 SD_CHAIN_BUFIO, /* Index: 2 */ 2238 2239 /* Chain for buf IO for disk drive targets (PM disabled) */ 2240 SD_CHAIN_BUFIO, /* Index: 3 */ 2241 SD_CHAIN_BUFIO, /* Index: 4 */ 2242 2243 /* 2244 * Chain for buf IO for removable-media or large sector size 2245 * disk drive targets (PM enabled) 2246 */ 2247 SD_CHAIN_BUFIO, /* Index: 5 */ 2248 SD_CHAIN_BUFIO, /* Index: 6 */ 2249 SD_CHAIN_BUFIO, /* Index: 7 */ 2250 SD_CHAIN_BUFIO, /* Index: 8 */ 2251 2252 /* 2253 * Chain for buf IO for removable-media or large sector size 2254 * disk drive targets (PM disabled) 2255 */ 2256 SD_CHAIN_BUFIO, /* Index: 9 */ 2257 SD_CHAIN_BUFIO, /* Index: 10 */ 2258 SD_CHAIN_BUFIO, /* Index: 11 */ 2259 2260 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2261 SD_CHAIN_BUFIO, /* Index: 12 */ 2262 SD_CHAIN_BUFIO, /* Index: 13 */ 2263 SD_CHAIN_BUFIO, /* Index: 14 */ 2264 SD_CHAIN_BUFIO, /* Index: 15 */ 2265 2266 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2267 SD_CHAIN_BUFIO, /* Index: 16 */ 2268 SD_CHAIN_BUFIO, /* Index: 17 */ 2269 SD_CHAIN_BUFIO, /* Index: 18 */ 2270 2271 /* Chain for USCSI commands (non-checksum targets) */ 2272 SD_CHAIN_USCSI, /* Index: 19 */ 2273 SD_CHAIN_USCSI, /* Index: 20 */ 2274 2275 /* Chain for USCSI commands (checksum targets) */ 2276 SD_CHAIN_USCSI, /* Index: 21 */ 2277 SD_CHAIN_USCSI, /* Index: 22 */ 2278 SD_CHAIN_USCSI, /* Index: 23 */ 2279 2280 /* Chain for "direct" USCSI commands (all targets) */ 2281 SD_CHAIN_DIRECT, /* Index: 24 */ 2282 2283 /* Chain for "direct priority" USCSI commands (all targets) */ 2284 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2285 2286 /* 2287 * Chain for buf IO for large sector size disk drive targets 2288 * with checksumming (PM enabled) 2289 */ 2290 SD_CHAIN_BUFIO, /* Index: 26 */ 2291 SD_CHAIN_BUFIO, /* Index: 27 */ 2292 SD_CHAIN_BUFIO, /* Index: 28 */ 2293 SD_CHAIN_BUFIO, /* Index: 29 */ 2294 SD_CHAIN_BUFIO, /* Index: 30 */ 2295 2296 /* 2297 * Chain for buf IO for large sector size disk drive targets 2298 * with checksumming (PM disabled) 2299 */ 2300 SD_CHAIN_BUFIO, /* Index: 31 */ 2301 SD_CHAIN_BUFIO, /* Index: 32 */ 2302 SD_CHAIN_BUFIO, /* Index: 33 */ 2303 SD_CHAIN_BUFIO, /* Index: 34 */ 2304 }; 2305 2306 2307 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2308 #define SD_IS_BUFIO(xp) \ 2309 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2310 2311 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2312 #define SD_IS_DIRECT_PRIORITY(xp) \ 2313 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2314 2315 2316 2317 /* 2318 * Struct, array, and macros to map a specific chain to the appropriate 2319 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2320 * 2321 * The sd_chain_index_map[] array is used at attach time to set the various 2322 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2323 * chain to be used with the instance. This allows different instances to use 2324 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2325 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2326 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2327 * dynamically & without the use of locking; and (2) a layer may update the 2328 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2329 * to allow for deferred processing of an IO within the same chain from a 2330 * different execution context. 2331 */ 2332 2333 struct sd_chain_index { 2334 int sci_iostart_index; 2335 int sci_iodone_index; 2336 }; 2337 2338 static struct sd_chain_index sd_chain_index_map[] = { 2339 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2340 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2341 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2342 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2343 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2344 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2345 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2346 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2347 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2348 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2349 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2350 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2351 2352 }; 2353 2354 2355 /* 2356 * The following are indexes into the sd_chain_index_map[] array. 2357 */ 2358 2359 /* un->un_buf_chain_type must be set to one of these */ 2360 #define SD_CHAIN_INFO_DISK 0 2361 #define SD_CHAIN_INFO_DISK_NO_PM 1 2362 #define SD_CHAIN_INFO_RMMEDIA 2 2363 #define SD_CHAIN_INFO_MSS_DISK 2 2364 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2365 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2366 #define SD_CHAIN_INFO_CHKSUM 4 2367 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2368 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2369 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2370 2371 /* un->un_uscsi_chain_type must be set to one of these */ 2372 #define SD_CHAIN_INFO_USCSI_CMD 6 2373 /* USCSI with PM disabled is the same as DIRECT */ 2374 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2375 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2376 2377 /* un->un_direct_chain_type must be set to one of these */ 2378 #define SD_CHAIN_INFO_DIRECT_CMD 8 2379 2380 /* un->un_priority_chain_type must be set to one of these */ 2381 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2382 2383 /* size for devid inquiries */ 2384 #define MAX_INQUIRY_SIZE 0xF0 2385 2386 /* 2387 * Macros used by functions to pass a given buf(9S) struct along to the 2388 * next function in the layering chain for further processing. 2389 * 2390 * In the following macros, passing more than three arguments to the called 2391 * routines causes the optimizer for the SPARC compiler to stop doing tail 2392 * call elimination which results in significant performance degradation. 2393 */ 2394 #define SD_BEGIN_IOSTART(index, un, bp) \ 2395 ((*(sd_iostart_chain[index]))(index, un, bp)) 2396 2397 #define SD_BEGIN_IODONE(index, un, bp) \ 2398 ((*(sd_iodone_chain[index]))(index, un, bp)) 2399 2400 #define SD_NEXT_IOSTART(index, un, bp) \ 2401 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2402 2403 #define SD_NEXT_IODONE(index, un, bp) \ 2404 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2405 2406 /* 2407 * Function: _init 2408 * 2409 * Description: This is the driver _init(9E) entry point. 2410 * 2411 * Return Code: Returns the value from mod_install(9F) or 2412 * ddi_soft_state_init(9F) as appropriate. 2413 * 2414 * Context: Called when driver module loaded. 2415 */ 2416 2417 int 2418 _init(void) 2419 { 2420 int err; 2421 2422 /* establish driver name from module name */ 2423 sd_label = (char *)mod_modname(&modlinkage); 2424 2425 #ifndef XPV_HVM_DRIVER 2426 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2427 SD_MAXUNIT); 2428 if (err != 0) { 2429 return (err); 2430 } 2431 2432 #else /* XPV_HVM_DRIVER */ 2433 /* Remove the leading "hvm_" from the module name */ 2434 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2435 sd_label += strlen("hvm_"); 2436 2437 #endif /* XPV_HVM_DRIVER */ 2438 2439 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2440 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2441 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2442 2443 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2444 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2445 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2446 2447 /* 2448 * it's ok to init here even for fibre device 2449 */ 2450 sd_scsi_probe_cache_init(); 2451 2452 sd_scsi_target_lun_init(); 2453 2454 /* 2455 * Creating taskq before mod_install ensures that all callers (threads) 2456 * that enter the module after a successful mod_install encounter 2457 * a valid taskq. 2458 */ 2459 sd_taskq_create(); 2460 2461 err = mod_install(&modlinkage); 2462 if (err != 0) { 2463 /* delete taskq if install fails */ 2464 sd_taskq_delete(); 2465 2466 mutex_destroy(&sd_detach_mutex); 2467 mutex_destroy(&sd_log_mutex); 2468 mutex_destroy(&sd_label_mutex); 2469 2470 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2471 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2472 cv_destroy(&sd_tr.srq_inprocess_cv); 2473 2474 sd_scsi_probe_cache_fini(); 2475 2476 sd_scsi_target_lun_fini(); 2477 2478 #ifndef XPV_HVM_DRIVER 2479 ddi_soft_state_fini(&sd_state); 2480 #endif /* !XPV_HVM_DRIVER */ 2481 return (err); 2482 } 2483 2484 return (err); 2485 } 2486 2487 2488 /* 2489 * Function: _fini 2490 * 2491 * Description: This is the driver _fini(9E) entry point. 2492 * 2493 * Return Code: Returns the value from mod_remove(9F) 2494 * 2495 * Context: Called when driver module is unloaded. 2496 */ 2497 2498 int 2499 _fini(void) 2500 { 2501 int err; 2502 2503 if ((err = mod_remove(&modlinkage)) != 0) { 2504 return (err); 2505 } 2506 2507 sd_taskq_delete(); 2508 2509 mutex_destroy(&sd_detach_mutex); 2510 mutex_destroy(&sd_log_mutex); 2511 mutex_destroy(&sd_label_mutex); 2512 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2513 2514 sd_scsi_probe_cache_fini(); 2515 2516 sd_scsi_target_lun_fini(); 2517 2518 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2519 cv_destroy(&sd_tr.srq_inprocess_cv); 2520 2521 #ifndef XPV_HVM_DRIVER 2522 ddi_soft_state_fini(&sd_state); 2523 #endif /* !XPV_HVM_DRIVER */ 2524 2525 return (err); 2526 } 2527 2528 2529 /* 2530 * Function: _info 2531 * 2532 * Description: This is the driver _info(9E) entry point. 2533 * 2534 * Arguments: modinfop - pointer to the driver modinfo structure 2535 * 2536 * Return Code: Returns the value from mod_info(9F). 2537 * 2538 * Context: Kernel thread context 2539 */ 2540 2541 int 2542 _info(struct modinfo *modinfop) 2543 { 2544 return (mod_info(&modlinkage, modinfop)); 2545 } 2546 2547 2548 /* 2549 * The following routines implement the driver message logging facility. 2550 * They provide component- and level- based debug output filtering. 2551 * Output may also be restricted to messages for a single instance by 2552 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2553 * to NULL, then messages for all instances are printed. 2554 * 2555 * These routines have been cloned from each other due to the language 2556 * constraints of macros and variable argument list processing. 2557 */ 2558 2559 2560 /* 2561 * Function: sd_log_err 2562 * 2563 * Description: This routine is called by the SD_ERROR macro for debug 2564 * logging of error conditions. 2565 * 2566 * Arguments: comp - driver component being logged 2567 * dev - pointer to driver info structure 2568 * fmt - error string and format to be logged 2569 */ 2570 2571 static void 2572 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2573 { 2574 va_list ap; 2575 dev_info_t *dev; 2576 2577 ASSERT(un != NULL); 2578 dev = SD_DEVINFO(un); 2579 ASSERT(dev != NULL); 2580 2581 /* 2582 * Filter messages based on the global component and level masks. 2583 * Also print if un matches the value of sd_debug_un, or if 2584 * sd_debug_un is set to NULL. 2585 */ 2586 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2587 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2588 mutex_enter(&sd_log_mutex); 2589 va_start(ap, fmt); 2590 (void) vsprintf(sd_log_buf, fmt, ap); 2591 va_end(ap); 2592 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2593 mutex_exit(&sd_log_mutex); 2594 } 2595 #ifdef SD_FAULT_INJECTION 2596 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2597 if (un->sd_injection_mask & comp) { 2598 mutex_enter(&sd_log_mutex); 2599 va_start(ap, fmt); 2600 (void) vsprintf(sd_log_buf, fmt, ap); 2601 va_end(ap); 2602 sd_injection_log(sd_log_buf, un); 2603 mutex_exit(&sd_log_mutex); 2604 } 2605 #endif 2606 } 2607 2608 2609 /* 2610 * Function: sd_log_info 2611 * 2612 * Description: This routine is called by the SD_INFO macro for debug 2613 * logging of general purpose informational conditions. 2614 * 2615 * Arguments: comp - driver component being logged 2616 * dev - pointer to driver info structure 2617 * fmt - info string and format to be logged 2618 */ 2619 2620 static void 2621 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2622 { 2623 va_list ap; 2624 dev_info_t *dev; 2625 2626 ASSERT(un != NULL); 2627 dev = SD_DEVINFO(un); 2628 ASSERT(dev != NULL); 2629 2630 /* 2631 * Filter messages based on the global component and level masks. 2632 * Also print if un matches the value of sd_debug_un, or if 2633 * sd_debug_un is set to NULL. 2634 */ 2635 if ((sd_component_mask & component) && 2636 (sd_level_mask & SD_LOGMASK_INFO) && 2637 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2638 mutex_enter(&sd_log_mutex); 2639 va_start(ap, fmt); 2640 (void) vsprintf(sd_log_buf, fmt, ap); 2641 va_end(ap); 2642 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2643 mutex_exit(&sd_log_mutex); 2644 } 2645 #ifdef SD_FAULT_INJECTION 2646 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2647 if (un->sd_injection_mask & component) { 2648 mutex_enter(&sd_log_mutex); 2649 va_start(ap, fmt); 2650 (void) vsprintf(sd_log_buf, fmt, ap); 2651 va_end(ap); 2652 sd_injection_log(sd_log_buf, un); 2653 mutex_exit(&sd_log_mutex); 2654 } 2655 #endif 2656 } 2657 2658 2659 /* 2660 * Function: sd_log_trace 2661 * 2662 * Description: This routine is called by the SD_TRACE macro for debug 2663 * logging of trace conditions (i.e. function entry/exit). 2664 * 2665 * Arguments: comp - driver component being logged 2666 * dev - pointer to driver info structure 2667 * fmt - trace string and format to be logged 2668 */ 2669 2670 static void 2671 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2672 { 2673 va_list ap; 2674 dev_info_t *dev; 2675 2676 ASSERT(un != NULL); 2677 dev = SD_DEVINFO(un); 2678 ASSERT(dev != NULL); 2679 2680 /* 2681 * Filter messages based on the global component and level masks. 2682 * Also print if un matches the value of sd_debug_un, or if 2683 * sd_debug_un is set to NULL. 2684 */ 2685 if ((sd_component_mask & component) && 2686 (sd_level_mask & SD_LOGMASK_TRACE) && 2687 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2688 mutex_enter(&sd_log_mutex); 2689 va_start(ap, fmt); 2690 (void) vsprintf(sd_log_buf, fmt, ap); 2691 va_end(ap); 2692 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2693 mutex_exit(&sd_log_mutex); 2694 } 2695 #ifdef SD_FAULT_INJECTION 2696 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2697 if (un->sd_injection_mask & component) { 2698 mutex_enter(&sd_log_mutex); 2699 va_start(ap, fmt); 2700 (void) vsprintf(sd_log_buf, fmt, ap); 2701 va_end(ap); 2702 sd_injection_log(sd_log_buf, un); 2703 mutex_exit(&sd_log_mutex); 2704 } 2705 #endif 2706 } 2707 2708 2709 /* 2710 * Function: sdprobe 2711 * 2712 * Description: This is the driver probe(9e) entry point function. 2713 * 2714 * Arguments: devi - opaque device info handle 2715 * 2716 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2717 * DDI_PROBE_FAILURE: If the probe failed. 2718 * DDI_PROBE_PARTIAL: If the instance is not present now, 2719 * but may be present in the future. 2720 */ 2721 2722 static int 2723 sdprobe(dev_info_t *devi) 2724 { 2725 struct scsi_device *devp; 2726 int rval; 2727 #ifndef XPV_HVM_DRIVER 2728 int instance = ddi_get_instance(devi); 2729 #endif /* !XPV_HVM_DRIVER */ 2730 2731 /* 2732 * if it wasn't for pln, sdprobe could actually be nulldev 2733 * in the "__fibre" case. 2734 */ 2735 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2736 return (DDI_PROBE_DONTCARE); 2737 } 2738 2739 devp = ddi_get_driver_private(devi); 2740 2741 if (devp == NULL) { 2742 /* Ooops... nexus driver is mis-configured... */ 2743 return (DDI_PROBE_FAILURE); 2744 } 2745 2746 #ifndef XPV_HVM_DRIVER 2747 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2748 return (DDI_PROBE_PARTIAL); 2749 } 2750 #endif /* !XPV_HVM_DRIVER */ 2751 2752 /* 2753 * Call the SCSA utility probe routine to see if we actually 2754 * have a target at this SCSI nexus. 2755 */ 2756 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2757 case SCSIPROBE_EXISTS: 2758 switch (devp->sd_inq->inq_dtype) { 2759 case DTYPE_DIRECT: 2760 rval = DDI_PROBE_SUCCESS; 2761 break; 2762 case DTYPE_RODIRECT: 2763 /* CDs etc. Can be removable media */ 2764 rval = DDI_PROBE_SUCCESS; 2765 break; 2766 case DTYPE_OPTICAL: 2767 /* 2768 * Rewritable optical driver HP115AA 2769 * Can also be removable media 2770 */ 2771 2772 /* 2773 * Do not attempt to bind to DTYPE_OPTICAL if 2774 * pre solaris 9 sparc sd behavior is required 2775 * 2776 * If first time through and sd_dtype_optical_bind 2777 * has not been set in /etc/system check properties 2778 */ 2779 2780 if (sd_dtype_optical_bind < 0) { 2781 sd_dtype_optical_bind = ddi_prop_get_int 2782 (DDI_DEV_T_ANY, devi, 0, 2783 "optical-device-bind", 1); 2784 } 2785 2786 if (sd_dtype_optical_bind == 0) { 2787 rval = DDI_PROBE_FAILURE; 2788 } else { 2789 rval = DDI_PROBE_SUCCESS; 2790 } 2791 break; 2792 2793 case DTYPE_NOTPRESENT: 2794 default: 2795 rval = DDI_PROBE_FAILURE; 2796 break; 2797 } 2798 break; 2799 default: 2800 rval = DDI_PROBE_PARTIAL; 2801 break; 2802 } 2803 2804 /* 2805 * This routine checks for resource allocation prior to freeing, 2806 * so it will take care of the "smart probing" case where a 2807 * scsi_probe() may or may not have been issued and will *not* 2808 * free previously-freed resources. 2809 */ 2810 scsi_unprobe(devp); 2811 return (rval); 2812 } 2813 2814 2815 /* 2816 * Function: sdinfo 2817 * 2818 * Description: This is the driver getinfo(9e) entry point function. 2819 * Given the device number, return the devinfo pointer from 2820 * the scsi_device structure or the instance number 2821 * associated with the dev_t. 2822 * 2823 * Arguments: dip - pointer to device info structure 2824 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2825 * DDI_INFO_DEVT2INSTANCE) 2826 * arg - driver dev_t 2827 * resultp - user buffer for request response 2828 * 2829 * Return Code: DDI_SUCCESS 2830 * DDI_FAILURE 2831 */ 2832 /* ARGSUSED */ 2833 static int 2834 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2835 { 2836 struct sd_lun *un; 2837 dev_t dev; 2838 int instance; 2839 int error; 2840 2841 switch (infocmd) { 2842 case DDI_INFO_DEVT2DEVINFO: 2843 dev = (dev_t)arg; 2844 instance = SDUNIT(dev); 2845 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2846 return (DDI_FAILURE); 2847 } 2848 *result = (void *) SD_DEVINFO(un); 2849 error = DDI_SUCCESS; 2850 break; 2851 case DDI_INFO_DEVT2INSTANCE: 2852 dev = (dev_t)arg; 2853 instance = SDUNIT(dev); 2854 *result = (void *)(uintptr_t)instance; 2855 error = DDI_SUCCESS; 2856 break; 2857 default: 2858 error = DDI_FAILURE; 2859 } 2860 return (error); 2861 } 2862 2863 /* 2864 * Function: sd_prop_op 2865 * 2866 * Description: This is the driver prop_op(9e) entry point function. 2867 * Return the number of blocks for the partition in question 2868 * or forward the request to the property facilities. 2869 * 2870 * Arguments: dev - device number 2871 * dip - pointer to device info structure 2872 * prop_op - property operator 2873 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2874 * name - pointer to property name 2875 * valuep - pointer or address of the user buffer 2876 * lengthp - property length 2877 * 2878 * Return Code: DDI_PROP_SUCCESS 2879 * DDI_PROP_NOT_FOUND 2880 * DDI_PROP_UNDEFINED 2881 * DDI_PROP_NO_MEMORY 2882 * DDI_PROP_BUF_TOO_SMALL 2883 */ 2884 2885 static int 2886 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2887 char *name, caddr_t valuep, int *lengthp) 2888 { 2889 struct sd_lun *un; 2890 2891 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2892 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2893 name, valuep, lengthp)); 2894 2895 return (cmlb_prop_op(un->un_cmlbhandle, 2896 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2897 SDPART(dev), (void *)SD_PATH_DIRECT)); 2898 } 2899 2900 /* 2901 * The following functions are for smart probing: 2902 * sd_scsi_probe_cache_init() 2903 * sd_scsi_probe_cache_fini() 2904 * sd_scsi_clear_probe_cache() 2905 * sd_scsi_probe_with_cache() 2906 */ 2907 2908 /* 2909 * Function: sd_scsi_probe_cache_init 2910 * 2911 * Description: Initializes the probe response cache mutex and head pointer. 2912 * 2913 * Context: Kernel thread context 2914 */ 2915 2916 static void 2917 sd_scsi_probe_cache_init(void) 2918 { 2919 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2920 sd_scsi_probe_cache_head = NULL; 2921 } 2922 2923 2924 /* 2925 * Function: sd_scsi_probe_cache_fini 2926 * 2927 * Description: Frees all resources associated with the probe response cache. 2928 * 2929 * Context: Kernel thread context 2930 */ 2931 2932 static void 2933 sd_scsi_probe_cache_fini(void) 2934 { 2935 struct sd_scsi_probe_cache *cp; 2936 struct sd_scsi_probe_cache *ncp; 2937 2938 /* Clean up our smart probing linked list */ 2939 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2940 ncp = cp->next; 2941 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2942 } 2943 sd_scsi_probe_cache_head = NULL; 2944 mutex_destroy(&sd_scsi_probe_cache_mutex); 2945 } 2946 2947 2948 /* 2949 * Function: sd_scsi_clear_probe_cache 2950 * 2951 * Description: This routine clears the probe response cache. This is 2952 * done when open() returns ENXIO so that when deferred 2953 * attach is attempted (possibly after a device has been 2954 * turned on) we will retry the probe. Since we don't know 2955 * which target we failed to open, we just clear the 2956 * entire cache. 2957 * 2958 * Context: Kernel thread context 2959 */ 2960 2961 static void 2962 sd_scsi_clear_probe_cache(void) 2963 { 2964 struct sd_scsi_probe_cache *cp; 2965 int i; 2966 2967 mutex_enter(&sd_scsi_probe_cache_mutex); 2968 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2969 /* 2970 * Reset all entries to SCSIPROBE_EXISTS. This will 2971 * force probing to be performed the next time 2972 * sd_scsi_probe_with_cache is called. 2973 */ 2974 for (i = 0; i < NTARGETS_WIDE; i++) { 2975 cp->cache[i] = SCSIPROBE_EXISTS; 2976 } 2977 } 2978 mutex_exit(&sd_scsi_probe_cache_mutex); 2979 } 2980 2981 2982 /* 2983 * Function: sd_scsi_probe_with_cache 2984 * 2985 * Description: This routine implements support for a scsi device probe 2986 * with cache. The driver maintains a cache of the target 2987 * responses to scsi probes. If we get no response from a 2988 * target during a probe inquiry, we remember that, and we 2989 * avoid additional calls to scsi_probe on non-zero LUNs 2990 * on the same target until the cache is cleared. By doing 2991 * so we avoid the 1/4 sec selection timeout for nonzero 2992 * LUNs. lun0 of a target is always probed. 2993 * 2994 * Arguments: devp - Pointer to a scsi_device(9S) structure 2995 * waitfunc - indicates what the allocator routines should 2996 * do when resources are not available. This value 2997 * is passed on to scsi_probe() when that routine 2998 * is called. 2999 * 3000 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 3001 * otherwise the value returned by scsi_probe(9F). 3002 * 3003 * Context: Kernel thread context 3004 */ 3005 3006 static int 3007 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 3008 { 3009 struct sd_scsi_probe_cache *cp; 3010 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 3011 int lun, tgt; 3012 3013 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3014 SCSI_ADDR_PROP_LUN, 0); 3015 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3016 SCSI_ADDR_PROP_TARGET, -1); 3017 3018 /* Make sure caching enabled and target in range */ 3019 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 3020 /* do it the old way (no cache) */ 3021 return (scsi_probe(devp, waitfn)); 3022 } 3023 3024 mutex_enter(&sd_scsi_probe_cache_mutex); 3025 3026 /* Find the cache for this scsi bus instance */ 3027 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 3028 if (cp->pdip == pdip) { 3029 break; 3030 } 3031 } 3032 3033 /* If we can't find a cache for this pdip, create one */ 3034 if (cp == NULL) { 3035 int i; 3036 3037 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 3038 KM_SLEEP); 3039 cp->pdip = pdip; 3040 cp->next = sd_scsi_probe_cache_head; 3041 sd_scsi_probe_cache_head = cp; 3042 for (i = 0; i < NTARGETS_WIDE; i++) { 3043 cp->cache[i] = SCSIPROBE_EXISTS; 3044 } 3045 } 3046 3047 mutex_exit(&sd_scsi_probe_cache_mutex); 3048 3049 /* Recompute the cache for this target if LUN zero */ 3050 if (lun == 0) { 3051 cp->cache[tgt] = SCSIPROBE_EXISTS; 3052 } 3053 3054 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 3055 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 3056 return (SCSIPROBE_NORESP); 3057 } 3058 3059 /* Do the actual probe; save & return the result */ 3060 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3061 } 3062 3063 3064 /* 3065 * Function: sd_scsi_target_lun_init 3066 * 3067 * Description: Initializes the attached lun chain mutex and head pointer. 3068 * 3069 * Context: Kernel thread context 3070 */ 3071 3072 static void 3073 sd_scsi_target_lun_init(void) 3074 { 3075 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3076 sd_scsi_target_lun_head = NULL; 3077 } 3078 3079 3080 /* 3081 * Function: sd_scsi_target_lun_fini 3082 * 3083 * Description: Frees all resources associated with the attached lun 3084 * chain 3085 * 3086 * Context: Kernel thread context 3087 */ 3088 3089 static void 3090 sd_scsi_target_lun_fini(void) 3091 { 3092 struct sd_scsi_hba_tgt_lun *cp; 3093 struct sd_scsi_hba_tgt_lun *ncp; 3094 3095 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3096 ncp = cp->next; 3097 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3098 } 3099 sd_scsi_target_lun_head = NULL; 3100 mutex_destroy(&sd_scsi_target_lun_mutex); 3101 } 3102 3103 3104 /* 3105 * Function: sd_scsi_get_target_lun_count 3106 * 3107 * Description: This routine will check in the attached lun chain to see 3108 * how many luns are attached on the required SCSI controller 3109 * and target. Currently, some capabilities like tagged queue 3110 * are supported per target based by HBA. So all luns in a 3111 * target have the same capabilities. Based on this assumption, 3112 * sd should only set these capabilities once per target. This 3113 * function is called when sd needs to decide how many luns 3114 * already attached on a target. 3115 * 3116 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3117 * controller device. 3118 * target - The target ID on the controller's SCSI bus. 3119 * 3120 * Return Code: The number of luns attached on the required target and 3121 * controller. 3122 * -1 if target ID is not in parallel SCSI scope or the given 3123 * dip is not in the chain. 3124 * 3125 * Context: Kernel thread context 3126 */ 3127 3128 static int 3129 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3130 { 3131 struct sd_scsi_hba_tgt_lun *cp; 3132 3133 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3134 return (-1); 3135 } 3136 3137 mutex_enter(&sd_scsi_target_lun_mutex); 3138 3139 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3140 if (cp->pdip == dip) { 3141 break; 3142 } 3143 } 3144 3145 mutex_exit(&sd_scsi_target_lun_mutex); 3146 3147 if (cp == NULL) { 3148 return (-1); 3149 } 3150 3151 return (cp->nlun[target]); 3152 } 3153 3154 3155 /* 3156 * Function: sd_scsi_update_lun_on_target 3157 * 3158 * Description: This routine is used to update the attached lun chain when a 3159 * lun is attached or detached on a target. 3160 * 3161 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3162 * controller device. 3163 * target - The target ID on the controller's SCSI bus. 3164 * flag - Indicate the lun is attached or detached. 3165 * 3166 * Context: Kernel thread context 3167 */ 3168 3169 static void 3170 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3171 { 3172 struct sd_scsi_hba_tgt_lun *cp; 3173 3174 mutex_enter(&sd_scsi_target_lun_mutex); 3175 3176 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3177 if (cp->pdip == dip) { 3178 break; 3179 } 3180 } 3181 3182 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3183 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3184 KM_SLEEP); 3185 cp->pdip = dip; 3186 cp->next = sd_scsi_target_lun_head; 3187 sd_scsi_target_lun_head = cp; 3188 } 3189 3190 mutex_exit(&sd_scsi_target_lun_mutex); 3191 3192 if (cp != NULL) { 3193 if (flag == SD_SCSI_LUN_ATTACH) { 3194 cp->nlun[target] ++; 3195 } else { 3196 cp->nlun[target] --; 3197 } 3198 } 3199 } 3200 3201 3202 /* 3203 * Function: sd_spin_up_unit 3204 * 3205 * Description: Issues the following commands to spin-up the device: 3206 * START STOP UNIT, and INQUIRY. 3207 * 3208 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3209 * structure for this target. 3210 * 3211 * Return Code: 0 - success 3212 * EIO - failure 3213 * EACCES - reservation conflict 3214 * 3215 * Context: Kernel thread context 3216 */ 3217 3218 static int 3219 sd_spin_up_unit(sd_ssc_t *ssc) 3220 { 3221 size_t resid = 0; 3222 int has_conflict = FALSE; 3223 uchar_t *bufaddr; 3224 int status; 3225 struct sd_lun *un; 3226 3227 ASSERT(ssc != NULL); 3228 un = ssc->ssc_un; 3229 ASSERT(un != NULL); 3230 3231 /* 3232 * Send a throwaway START UNIT command. 3233 * 3234 * If we fail on this, we don't care presently what precisely 3235 * is wrong. EMC's arrays will also fail this with a check 3236 * condition (0x2/0x4/0x3) if the device is "inactive," but 3237 * we don't want to fail the attach because it may become 3238 * "active" later. 3239 * We don't know if power condition is supported or not at 3240 * this stage, use START STOP bit. 3241 */ 3242 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 3243 SD_TARGET_START, SD_PATH_DIRECT); 3244 3245 if (status != 0) { 3246 if (status == EACCES) 3247 has_conflict = TRUE; 3248 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3249 } 3250 3251 /* 3252 * Send another INQUIRY command to the target. This is necessary for 3253 * non-removable media direct access devices because their INQUIRY data 3254 * may not be fully qualified until they are spun up (perhaps via the 3255 * START command above). Note: This seems to be needed for some 3256 * legacy devices only.) The INQUIRY command should succeed even if a 3257 * Reservation Conflict is present. 3258 */ 3259 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3260 3261 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3262 != 0) { 3263 kmem_free(bufaddr, SUN_INQSIZE); 3264 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3265 return (EIO); 3266 } 3267 3268 /* 3269 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3270 * Note that this routine does not return a failure here even if the 3271 * INQUIRY command did not return any data. This is a legacy behavior. 3272 */ 3273 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3274 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3275 } 3276 3277 kmem_free(bufaddr, SUN_INQSIZE); 3278 3279 /* If we hit a reservation conflict above, tell the caller. */ 3280 if (has_conflict == TRUE) { 3281 return (EACCES); 3282 } 3283 3284 return (0); 3285 } 3286 3287 #ifdef _LP64 3288 /* 3289 * Function: sd_enable_descr_sense 3290 * 3291 * Description: This routine attempts to select descriptor sense format 3292 * using the Control mode page. Devices that support 64 bit 3293 * LBAs (for >2TB luns) should also implement descriptor 3294 * sense data so we will call this function whenever we see 3295 * a lun larger than 2TB. If for some reason the device 3296 * supports 64 bit LBAs but doesn't support descriptor sense 3297 * presumably the mode select will fail. Everything will 3298 * continue to work normally except that we will not get 3299 * complete sense data for commands that fail with an LBA 3300 * larger than 32 bits. 3301 * 3302 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3303 * structure for this target. 3304 * 3305 * Context: Kernel thread context only 3306 */ 3307 3308 static void 3309 sd_enable_descr_sense(sd_ssc_t *ssc) 3310 { 3311 uchar_t *header; 3312 struct mode_control_scsi3 *ctrl_bufp; 3313 size_t buflen; 3314 size_t bd_len; 3315 int status; 3316 struct sd_lun *un; 3317 3318 ASSERT(ssc != NULL); 3319 un = ssc->ssc_un; 3320 ASSERT(un != NULL); 3321 3322 /* 3323 * Read MODE SENSE page 0xA, Control Mode Page 3324 */ 3325 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3326 sizeof (struct mode_control_scsi3); 3327 header = kmem_zalloc(buflen, KM_SLEEP); 3328 3329 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3330 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3331 3332 if (status != 0) { 3333 SD_ERROR(SD_LOG_COMMON, un, 3334 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3335 goto eds_exit; 3336 } 3337 3338 /* 3339 * Determine size of Block Descriptors in order to locate 3340 * the mode page data. ATAPI devices return 0, SCSI devices 3341 * should return MODE_BLK_DESC_LENGTH. 3342 */ 3343 bd_len = ((struct mode_header *)header)->bdesc_length; 3344 3345 /* Clear the mode data length field for MODE SELECT */ 3346 ((struct mode_header *)header)->length = 0; 3347 3348 ctrl_bufp = (struct mode_control_scsi3 *) 3349 (header + MODE_HEADER_LENGTH + bd_len); 3350 3351 /* 3352 * If the page length is smaller than the expected value, 3353 * the target device doesn't support D_SENSE. Bail out here. 3354 */ 3355 if (ctrl_bufp->mode_page.length < 3356 sizeof (struct mode_control_scsi3) - 2) { 3357 SD_ERROR(SD_LOG_COMMON, un, 3358 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3359 goto eds_exit; 3360 } 3361 3362 /* 3363 * Clear PS bit for MODE SELECT 3364 */ 3365 ctrl_bufp->mode_page.ps = 0; 3366 3367 /* 3368 * Set D_SENSE to enable descriptor sense format. 3369 */ 3370 ctrl_bufp->d_sense = 1; 3371 3372 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3373 3374 /* 3375 * Use MODE SELECT to commit the change to the D_SENSE bit 3376 */ 3377 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3378 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3379 3380 if (status != 0) { 3381 SD_INFO(SD_LOG_COMMON, un, 3382 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3383 } else { 3384 kmem_free(header, buflen); 3385 return; 3386 } 3387 3388 eds_exit: 3389 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3390 kmem_free(header, buflen); 3391 } 3392 3393 /* 3394 * Function: sd_reenable_dsense_task 3395 * 3396 * Description: Re-enable descriptor sense after device or bus reset 3397 * 3398 * Context: Executes in a taskq() thread context 3399 */ 3400 static void 3401 sd_reenable_dsense_task(void *arg) 3402 { 3403 struct sd_lun *un = arg; 3404 sd_ssc_t *ssc; 3405 3406 ASSERT(un != NULL); 3407 3408 ssc = sd_ssc_init(un); 3409 sd_enable_descr_sense(ssc); 3410 sd_ssc_fini(ssc); 3411 } 3412 #endif /* _LP64 */ 3413 3414 /* 3415 * Function: sd_set_mmc_caps 3416 * 3417 * Description: This routine determines if the device is MMC compliant and if 3418 * the device supports CDDA via a mode sense of the CDVD 3419 * capabilities mode page. Also checks if the device is a 3420 * dvdram writable device. 3421 * 3422 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3423 * structure for this target. 3424 * 3425 * Context: Kernel thread context only 3426 */ 3427 3428 static void 3429 sd_set_mmc_caps(sd_ssc_t *ssc) 3430 { 3431 struct mode_header_grp2 *sense_mhp; 3432 uchar_t *sense_page; 3433 caddr_t buf; 3434 int bd_len; 3435 int status; 3436 struct uscsi_cmd com; 3437 int rtn; 3438 uchar_t *out_data_rw, *out_data_hd; 3439 uchar_t *rqbuf_rw, *rqbuf_hd; 3440 uchar_t *out_data_gesn; 3441 int gesn_len; 3442 struct sd_lun *un; 3443 3444 ASSERT(ssc != NULL); 3445 un = ssc->ssc_un; 3446 ASSERT(un != NULL); 3447 3448 /* 3449 * The flags which will be set in this function are - mmc compliant, 3450 * dvdram writable device, cdda support. Initialize them to FALSE 3451 * and if a capability is detected - it will be set to TRUE. 3452 */ 3453 un->un_f_mmc_cap = FALSE; 3454 un->un_f_dvdram_writable_device = FALSE; 3455 un->un_f_cfg_cdda = FALSE; 3456 3457 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3458 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3459 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3460 3461 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3462 3463 if (status != 0) { 3464 /* command failed; just return */ 3465 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3466 return; 3467 } 3468 /* 3469 * If the mode sense request for the CDROM CAPABILITIES 3470 * page (0x2A) succeeds the device is assumed to be MMC. 3471 */ 3472 un->un_f_mmc_cap = TRUE; 3473 3474 /* See if GET STATUS EVENT NOTIFICATION is supported */ 3475 if (un->un_f_mmc_gesn_polling) { 3476 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN; 3477 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP); 3478 3479 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc, 3480 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS); 3481 3482 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3483 3484 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) { 3485 un->un_f_mmc_gesn_polling = FALSE; 3486 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3487 "sd_set_mmc_caps: gesn not supported " 3488 "%d %x %x %x %x\n", rtn, 3489 out_data_gesn[0], out_data_gesn[1], 3490 out_data_gesn[2], out_data_gesn[3]); 3491 } 3492 3493 kmem_free(out_data_gesn, gesn_len); 3494 } 3495 3496 /* Get to the page data */ 3497 sense_mhp = (struct mode_header_grp2 *)buf; 3498 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3499 sense_mhp->bdesc_length_lo; 3500 if (bd_len > MODE_BLK_DESC_LENGTH) { 3501 /* 3502 * We did not get back the expected block descriptor 3503 * length so we cannot determine if the device supports 3504 * CDDA. However, we still indicate the device is MMC 3505 * according to the successful response to the page 3506 * 0x2A mode sense request. 3507 */ 3508 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3509 "sd_set_mmc_caps: Mode Sense returned " 3510 "invalid block descriptor length\n"); 3511 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3512 return; 3513 } 3514 3515 /* See if read CDDA is supported */ 3516 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3517 bd_len); 3518 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3519 3520 /* See if writing DVD RAM is supported. */ 3521 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3522 if (un->un_f_dvdram_writable_device == TRUE) { 3523 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3524 return; 3525 } 3526 3527 /* 3528 * If the device presents DVD or CD capabilities in the mode 3529 * page, we can return here since a RRD will not have 3530 * these capabilities. 3531 */ 3532 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3533 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3534 return; 3535 } 3536 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3537 3538 /* 3539 * If un->un_f_dvdram_writable_device is still FALSE, 3540 * check for a Removable Rigid Disk (RRD). A RRD 3541 * device is identified by the features RANDOM_WRITABLE and 3542 * HARDWARE_DEFECT_MANAGEMENT. 3543 */ 3544 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3545 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3546 3547 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3548 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3549 RANDOM_WRITABLE, SD_PATH_STANDARD); 3550 3551 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3552 3553 if (rtn != 0) { 3554 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3555 kmem_free(rqbuf_rw, SENSE_LENGTH); 3556 return; 3557 } 3558 3559 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3560 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3561 3562 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3563 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3564 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3565 3566 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3567 3568 if (rtn == 0) { 3569 /* 3570 * We have good information, check for random writable 3571 * and hardware defect features. 3572 */ 3573 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3574 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3575 un->un_f_dvdram_writable_device = TRUE; 3576 } 3577 } 3578 3579 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3580 kmem_free(rqbuf_rw, SENSE_LENGTH); 3581 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3582 kmem_free(rqbuf_hd, SENSE_LENGTH); 3583 } 3584 3585 /* 3586 * Function: sd_check_for_writable_cd 3587 * 3588 * Description: This routine determines if the media in the device is 3589 * writable or not. It uses the get configuration command (0x46) 3590 * to determine if the media is writable 3591 * 3592 * Arguments: un - driver soft state (unit) structure 3593 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3594 * chain and the normal command waitq, or 3595 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3596 * "direct" chain and bypass the normal command 3597 * waitq. 3598 * 3599 * Context: Never called at interrupt context. 3600 */ 3601 3602 static void 3603 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3604 { 3605 struct uscsi_cmd com; 3606 uchar_t *out_data; 3607 uchar_t *rqbuf; 3608 int rtn; 3609 uchar_t *out_data_rw, *out_data_hd; 3610 uchar_t *rqbuf_rw, *rqbuf_hd; 3611 struct mode_header_grp2 *sense_mhp; 3612 uchar_t *sense_page; 3613 caddr_t buf; 3614 int bd_len; 3615 int status; 3616 struct sd_lun *un; 3617 3618 ASSERT(ssc != NULL); 3619 un = ssc->ssc_un; 3620 ASSERT(un != NULL); 3621 ASSERT(mutex_owned(SD_MUTEX(un))); 3622 3623 /* 3624 * Initialize the writable media to false, if configuration info. 3625 * tells us otherwise then only we will set it. 3626 */ 3627 un->un_f_mmc_writable_media = FALSE; 3628 mutex_exit(SD_MUTEX(un)); 3629 3630 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3631 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3632 3633 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3634 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3635 3636 if (rtn != 0) 3637 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3638 3639 mutex_enter(SD_MUTEX(un)); 3640 if (rtn == 0) { 3641 /* 3642 * We have good information, check for writable DVD. 3643 */ 3644 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3645 un->un_f_mmc_writable_media = TRUE; 3646 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3647 kmem_free(rqbuf, SENSE_LENGTH); 3648 return; 3649 } 3650 } 3651 3652 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3653 kmem_free(rqbuf, SENSE_LENGTH); 3654 3655 /* 3656 * Determine if this is a RRD type device. 3657 */ 3658 mutex_exit(SD_MUTEX(un)); 3659 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3660 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3661 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3662 3663 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3664 3665 mutex_enter(SD_MUTEX(un)); 3666 if (status != 0) { 3667 /* command failed; just return */ 3668 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3669 return; 3670 } 3671 3672 /* Get to the page data */ 3673 sense_mhp = (struct mode_header_grp2 *)buf; 3674 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3675 if (bd_len > MODE_BLK_DESC_LENGTH) { 3676 /* 3677 * We did not get back the expected block descriptor length so 3678 * we cannot check the mode page. 3679 */ 3680 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3681 "sd_check_for_writable_cd: Mode Sense returned " 3682 "invalid block descriptor length\n"); 3683 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3684 return; 3685 } 3686 3687 /* 3688 * If the device presents DVD or CD capabilities in the mode 3689 * page, we can return here since a RRD device will not have 3690 * these capabilities. 3691 */ 3692 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3693 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3694 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3695 return; 3696 } 3697 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3698 3699 /* 3700 * If un->un_f_mmc_writable_media is still FALSE, 3701 * check for RRD type media. A RRD device is identified 3702 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3703 */ 3704 mutex_exit(SD_MUTEX(un)); 3705 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3706 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3707 3708 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3709 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3710 RANDOM_WRITABLE, path_flag); 3711 3712 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3713 if (rtn != 0) { 3714 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3715 kmem_free(rqbuf_rw, SENSE_LENGTH); 3716 mutex_enter(SD_MUTEX(un)); 3717 return; 3718 } 3719 3720 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3721 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3722 3723 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3724 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3725 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3726 3727 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3728 mutex_enter(SD_MUTEX(un)); 3729 if (rtn == 0) { 3730 /* 3731 * We have good information, check for random writable 3732 * and hardware defect features as current. 3733 */ 3734 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3735 (out_data_rw[10] & 0x1) && 3736 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3737 (out_data_hd[10] & 0x1)) { 3738 un->un_f_mmc_writable_media = TRUE; 3739 } 3740 } 3741 3742 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3743 kmem_free(rqbuf_rw, SENSE_LENGTH); 3744 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3745 kmem_free(rqbuf_hd, SENSE_LENGTH); 3746 } 3747 3748 /* 3749 * Function: sd_read_unit_properties 3750 * 3751 * Description: The following implements a property lookup mechanism. 3752 * Properties for particular disks (keyed on vendor, model 3753 * and rev numbers) are sought in the sd.conf file via 3754 * sd_process_sdconf_file(), and if not found there, are 3755 * looked for in a list hardcoded in this driver via 3756 * sd_process_sdconf_table() Once located the properties 3757 * are used to update the driver unit structure. 3758 * 3759 * Arguments: un - driver soft state (unit) structure 3760 */ 3761 3762 static void 3763 sd_read_unit_properties(struct sd_lun *un) 3764 { 3765 /* 3766 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3767 * the "sd-config-list" property (from the sd.conf file) or if 3768 * there was not a match for the inquiry vid/pid. If this event 3769 * occurs the static driver configuration table is searched for 3770 * a match. 3771 */ 3772 ASSERT(un != NULL); 3773 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3774 sd_process_sdconf_table(un); 3775 } 3776 3777 /* check for LSI device */ 3778 sd_is_lsi(un); 3779 3780 3781 } 3782 3783 3784 /* 3785 * Function: sd_process_sdconf_file 3786 * 3787 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3788 * driver's config file (ie, sd.conf) and update the driver 3789 * soft state structure accordingly. 3790 * 3791 * Arguments: un - driver soft state (unit) structure 3792 * 3793 * Return Code: SD_SUCCESS - The properties were successfully set according 3794 * to the driver configuration file. 3795 * SD_FAILURE - The driver config list was not obtained or 3796 * there was no vid/pid match. This indicates that 3797 * the static config table should be used. 3798 * 3799 * The config file has a property, "sd-config-list". Currently we support 3800 * two kinds of formats. For both formats, the value of this property 3801 * is a list of duplets: 3802 * 3803 * sd-config-list= 3804 * <duplet>, 3805 * [,<duplet>]*; 3806 * 3807 * For the improved format, where 3808 * 3809 * <duplet>:= "<vid+pid>","<tunable-list>" 3810 * 3811 * and 3812 * 3813 * <tunable-list>:= <tunable> [, <tunable> ]*; 3814 * <tunable> = <name> : <value> 3815 * 3816 * The <vid+pid> is the string that is returned by the target device on a 3817 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3818 * to apply to all target devices with the specified <vid+pid>. 3819 * 3820 * Each <tunable> is a "<name> : <value>" pair. 3821 * 3822 * For the old format, the structure of each duplet is as follows: 3823 * 3824 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3825 * 3826 * The first entry of the duplet is the device ID string (the concatenated 3827 * vid & pid; not to be confused with a device_id). This is defined in 3828 * the same way as in the sd_disk_table. 3829 * 3830 * The second part of the duplet is a string that identifies a 3831 * data-property-name-list. The data-property-name-list is defined as 3832 * follows: 3833 * 3834 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3835 * 3836 * The syntax of <data-property-name> depends on the <version> field. 3837 * 3838 * If version = SD_CONF_VERSION_1 we have the following syntax: 3839 * 3840 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3841 * 3842 * where the prop0 value will be used to set prop0 if bit0 set in the 3843 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3844 * 3845 */ 3846 3847 static int 3848 sd_process_sdconf_file(struct sd_lun *un) 3849 { 3850 char **config_list = NULL; 3851 uint_t nelements; 3852 char *vidptr; 3853 int vidlen; 3854 char *dnlist_ptr; 3855 char *dataname_ptr; 3856 char *dataname_lasts; 3857 int *data_list = NULL; 3858 uint_t data_list_len; 3859 int rval = SD_FAILURE; 3860 int i; 3861 3862 ASSERT(un != NULL); 3863 3864 /* Obtain the configuration list associated with the .conf file */ 3865 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3866 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3867 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3868 return (SD_FAILURE); 3869 } 3870 3871 /* 3872 * Compare vids in each duplet to the inquiry vid - if a match is 3873 * made, get the data value and update the soft state structure 3874 * accordingly. 3875 * 3876 * Each duplet should show as a pair of strings, return SD_FAILURE 3877 * otherwise. 3878 */ 3879 if (nelements & 1) { 3880 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3881 "sd-config-list should show as pairs of strings.\n"); 3882 if (config_list) 3883 ddi_prop_free(config_list); 3884 return (SD_FAILURE); 3885 } 3886 3887 for (i = 0; i < nelements; i += 2) { 3888 /* 3889 * Note: The assumption here is that each vid entry is on 3890 * a unique line from its associated duplet. 3891 */ 3892 vidptr = config_list[i]; 3893 vidlen = (int)strlen(vidptr); 3894 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) { 3895 continue; 3896 } 3897 3898 /* 3899 * dnlist contains 1 or more blank separated 3900 * data-property-name entries 3901 */ 3902 dnlist_ptr = config_list[i + 1]; 3903 3904 if (strchr(dnlist_ptr, ':') != NULL) { 3905 /* 3906 * Decode the improved format sd-config-list. 3907 */ 3908 sd_nvpair_str_decode(un, dnlist_ptr); 3909 } else { 3910 /* 3911 * The old format sd-config-list, loop through all 3912 * data-property-name entries in the 3913 * data-property-name-list 3914 * setting the properties for each. 3915 */ 3916 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3917 &dataname_lasts); dataname_ptr != NULL; 3918 dataname_ptr = sd_strtok_r(NULL, " \t", 3919 &dataname_lasts)) { 3920 int version; 3921 3922 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3923 "sd_process_sdconf_file: disk:%s, " 3924 "data:%s\n", vidptr, dataname_ptr); 3925 3926 /* Get the data list */ 3927 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3928 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3929 &data_list_len) != DDI_PROP_SUCCESS) { 3930 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3931 "sd_process_sdconf_file: data " 3932 "property (%s) has no value\n", 3933 dataname_ptr); 3934 continue; 3935 } 3936 3937 version = data_list[0]; 3938 3939 if (version == SD_CONF_VERSION_1) { 3940 sd_tunables values; 3941 3942 /* Set the properties */ 3943 if (sd_chk_vers1_data(un, data_list[1], 3944 &data_list[2], data_list_len, 3945 dataname_ptr) == SD_SUCCESS) { 3946 sd_get_tunables_from_conf(un, 3947 data_list[1], &data_list[2], 3948 &values); 3949 sd_set_vers1_properties(un, 3950 data_list[1], &values); 3951 rval = SD_SUCCESS; 3952 } else { 3953 rval = SD_FAILURE; 3954 } 3955 } else { 3956 scsi_log(SD_DEVINFO(un), sd_label, 3957 CE_WARN, "data property %s version " 3958 "0x%x is invalid.", 3959 dataname_ptr, version); 3960 rval = SD_FAILURE; 3961 } 3962 if (data_list) 3963 ddi_prop_free(data_list); 3964 } 3965 } 3966 } 3967 3968 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3969 if (config_list) { 3970 ddi_prop_free(config_list); 3971 } 3972 3973 return (rval); 3974 } 3975 3976 /* 3977 * Function: sd_nvpair_str_decode() 3978 * 3979 * Description: Parse the improved format sd-config-list to get 3980 * each entry of tunable, which includes a name-value pair. 3981 * Then call sd_set_properties() to set the property. 3982 * 3983 * Arguments: un - driver soft state (unit) structure 3984 * nvpair_str - the tunable list 3985 */ 3986 static void 3987 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3988 { 3989 char *nv, *name, *value, *token; 3990 char *nv_lasts, *v_lasts, *x_lasts; 3991 3992 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3993 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3994 token = sd_strtok_r(nv, ":", &v_lasts); 3995 name = sd_strtok_r(token, " \t", &x_lasts); 3996 token = sd_strtok_r(NULL, ":", &v_lasts); 3997 value = sd_strtok_r(token, " \t", &x_lasts); 3998 if (name == NULL || value == NULL) { 3999 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4000 "sd_nvpair_str_decode: " 4001 "name or value is not valid!\n"); 4002 } else { 4003 sd_set_properties(un, name, value); 4004 } 4005 } 4006 } 4007 4008 /* 4009 * Function: sd_strtok_r() 4010 * 4011 * Description: This function uses strpbrk and strspn to break 4012 * string into tokens on sequentially subsequent calls. Return 4013 * NULL when no non-separator characters remain. The first 4014 * argument is NULL for subsequent calls. 4015 */ 4016 static char * 4017 sd_strtok_r(char *string, const char *sepset, char **lasts) 4018 { 4019 char *q, *r; 4020 4021 /* First or subsequent call */ 4022 if (string == NULL) 4023 string = *lasts; 4024 4025 if (string == NULL) 4026 return (NULL); 4027 4028 /* Skip leading separators */ 4029 q = string + strspn(string, sepset); 4030 4031 if (*q == '\0') 4032 return (NULL); 4033 4034 if ((r = strpbrk(q, sepset)) == NULL) 4035 *lasts = NULL; 4036 else { 4037 *r = '\0'; 4038 *lasts = r + 1; 4039 } 4040 return (q); 4041 } 4042 4043 /* 4044 * Function: sd_set_properties() 4045 * 4046 * Description: Set device properties based on the improved 4047 * format sd-config-list. 4048 * 4049 * Arguments: un - driver soft state (unit) structure 4050 * name - supported tunable name 4051 * value - tunable value 4052 */ 4053 static void 4054 sd_set_properties(struct sd_lun *un, char *name, char *value) 4055 { 4056 char *endptr = NULL; 4057 long val = 0; 4058 4059 if (strcasecmp(name, "cache-nonvolatile") == 0) { 4060 if (strcasecmp(value, "true") == 0) { 4061 un->un_f_suppress_cache_flush = TRUE; 4062 } else if (strcasecmp(value, "false") == 0) { 4063 un->un_f_suppress_cache_flush = FALSE; 4064 } else { 4065 goto value_invalid; 4066 } 4067 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4068 "suppress_cache_flush flag set to %d\n", 4069 un->un_f_suppress_cache_flush); 4070 return; 4071 } 4072 4073 if (strcasecmp(name, "controller-type") == 0) { 4074 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4075 un->un_ctype = val; 4076 } else { 4077 goto value_invalid; 4078 } 4079 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4080 "ctype set to %d\n", un->un_ctype); 4081 return; 4082 } 4083 4084 if (strcasecmp(name, "delay-busy") == 0) { 4085 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4086 un->un_busy_timeout = drv_usectohz(val / 1000); 4087 } else { 4088 goto value_invalid; 4089 } 4090 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4091 "busy_timeout set to %d\n", un->un_busy_timeout); 4092 return; 4093 } 4094 4095 if (strcasecmp(name, "disksort") == 0) { 4096 if (strcasecmp(value, "true") == 0) { 4097 un->un_f_disksort_disabled = FALSE; 4098 } else if (strcasecmp(value, "false") == 0) { 4099 un->un_f_disksort_disabled = TRUE; 4100 } else { 4101 goto value_invalid; 4102 } 4103 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4104 "disksort disabled flag set to %d\n", 4105 un->un_f_disksort_disabled); 4106 return; 4107 } 4108 4109 if (strcasecmp(name, "power-condition") == 0) { 4110 if (strcasecmp(value, "true") == 0) { 4111 un->un_f_power_condition_disabled = FALSE; 4112 } else if (strcasecmp(value, "false") == 0) { 4113 un->un_f_power_condition_disabled = TRUE; 4114 } else { 4115 goto value_invalid; 4116 } 4117 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4118 "power condition disabled flag set to %d\n", 4119 un->un_f_power_condition_disabled); 4120 return; 4121 } 4122 4123 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4124 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4125 un->un_reserve_release_time = val; 4126 } else { 4127 goto value_invalid; 4128 } 4129 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4130 "reservation release timeout set to %d\n", 4131 un->un_reserve_release_time); 4132 return; 4133 } 4134 4135 if (strcasecmp(name, "reset-lun") == 0) { 4136 if (strcasecmp(value, "true") == 0) { 4137 un->un_f_lun_reset_enabled = TRUE; 4138 } else if (strcasecmp(value, "false") == 0) { 4139 un->un_f_lun_reset_enabled = FALSE; 4140 } else { 4141 goto value_invalid; 4142 } 4143 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4144 "lun reset enabled flag set to %d\n", 4145 un->un_f_lun_reset_enabled); 4146 return; 4147 } 4148 4149 if (strcasecmp(name, "retries-busy") == 0) { 4150 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4151 un->un_busy_retry_count = val; 4152 } else { 4153 goto value_invalid; 4154 } 4155 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4156 "busy retry count set to %d\n", un->un_busy_retry_count); 4157 return; 4158 } 4159 4160 if (strcasecmp(name, "retries-timeout") == 0) { 4161 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4162 un->un_retry_count = val; 4163 } else { 4164 goto value_invalid; 4165 } 4166 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4167 "timeout retry count set to %d\n", un->un_retry_count); 4168 return; 4169 } 4170 4171 if (strcasecmp(name, "retries-notready") == 0) { 4172 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4173 un->un_notready_retry_count = val; 4174 } else { 4175 goto value_invalid; 4176 } 4177 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4178 "notready retry count set to %d\n", 4179 un->un_notready_retry_count); 4180 return; 4181 } 4182 4183 if (strcasecmp(name, "retries-reset") == 0) { 4184 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4185 un->un_reset_retry_count = val; 4186 } else { 4187 goto value_invalid; 4188 } 4189 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4190 "reset retry count set to %d\n", 4191 un->un_reset_retry_count); 4192 return; 4193 } 4194 4195 if (strcasecmp(name, "throttle-max") == 0) { 4196 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4197 un->un_saved_throttle = un->un_throttle = val; 4198 } else { 4199 goto value_invalid; 4200 } 4201 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4202 "throttle set to %d\n", un->un_throttle); 4203 } 4204 4205 if (strcasecmp(name, "throttle-min") == 0) { 4206 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4207 un->un_min_throttle = val; 4208 } else { 4209 goto value_invalid; 4210 } 4211 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4212 "min throttle set to %d\n", un->un_min_throttle); 4213 } 4214 4215 if (strcasecmp(name, "rmw-type") == 0) { 4216 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4217 un->un_f_rmw_type = val; 4218 } else { 4219 goto value_invalid; 4220 } 4221 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4222 "RMW type set to %d\n", un->un_f_rmw_type); 4223 } 4224 4225 if (strcasecmp(name, "physical-block-size") == 0) { 4226 if (ddi_strtol(value, &endptr, 0, &val) == 0 && 4227 ISP2(val) && val >= un->un_tgt_blocksize && 4228 val >= un->un_sys_blocksize) { 4229 un->un_phy_blocksize = val; 4230 } else { 4231 goto value_invalid; 4232 } 4233 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4234 "physical block size set to %d\n", un->un_phy_blocksize); 4235 } 4236 4237 if (strcasecmp(name, "retries-victim") == 0) { 4238 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4239 un->un_victim_retry_count = val; 4240 } else { 4241 goto value_invalid; 4242 } 4243 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4244 "victim retry count set to %d\n", 4245 un->un_victim_retry_count); 4246 return; 4247 } 4248 4249 /* 4250 * Validate the throttle values. 4251 * If any of the numbers are invalid, set everything to defaults. 4252 */ 4253 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4254 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4255 (un->un_min_throttle > un->un_throttle)) { 4256 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4257 un->un_min_throttle = sd_min_throttle; 4258 } 4259 4260 if (strcasecmp(name, "mmc-gesn-polling") == 0) { 4261 if (strcasecmp(value, "true") == 0) { 4262 un->un_f_mmc_gesn_polling = TRUE; 4263 } else if (strcasecmp(value, "false") == 0) { 4264 un->un_f_mmc_gesn_polling = FALSE; 4265 } else { 4266 goto value_invalid; 4267 } 4268 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4269 "mmc-gesn-polling set to %d\n", 4270 un->un_f_mmc_gesn_polling); 4271 } 4272 4273 return; 4274 4275 value_invalid: 4276 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4277 "value of prop %s is invalid\n", name); 4278 } 4279 4280 /* 4281 * Function: sd_get_tunables_from_conf() 4282 * 4283 * 4284 * This function reads the data list from the sd.conf file and pulls 4285 * the values that can have numeric values as arguments and places 4286 * the values in the appropriate sd_tunables member. 4287 * Since the order of the data list members varies across platforms 4288 * This function reads them from the data list in a platform specific 4289 * order and places them into the correct sd_tunable member that is 4290 * consistent across all platforms. 4291 */ 4292 static void 4293 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4294 sd_tunables *values) 4295 { 4296 int i; 4297 int mask; 4298 4299 bzero(values, sizeof (sd_tunables)); 4300 4301 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4302 4303 mask = 1 << i; 4304 if (mask > flags) { 4305 break; 4306 } 4307 4308 switch (mask & flags) { 4309 case 0: /* This mask bit not set in flags */ 4310 continue; 4311 case SD_CONF_BSET_THROTTLE: 4312 values->sdt_throttle = data_list[i]; 4313 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4314 "sd_get_tunables_from_conf: throttle = %d\n", 4315 values->sdt_throttle); 4316 break; 4317 case SD_CONF_BSET_CTYPE: 4318 values->sdt_ctype = data_list[i]; 4319 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4320 "sd_get_tunables_from_conf: ctype = %d\n", 4321 values->sdt_ctype); 4322 break; 4323 case SD_CONF_BSET_NRR_COUNT: 4324 values->sdt_not_rdy_retries = data_list[i]; 4325 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4326 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4327 values->sdt_not_rdy_retries); 4328 break; 4329 case SD_CONF_BSET_BSY_RETRY_COUNT: 4330 values->sdt_busy_retries = data_list[i]; 4331 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4332 "sd_get_tunables_from_conf: busy_retries = %d\n", 4333 values->sdt_busy_retries); 4334 break; 4335 case SD_CONF_BSET_RST_RETRIES: 4336 values->sdt_reset_retries = data_list[i]; 4337 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4338 "sd_get_tunables_from_conf: reset_retries = %d\n", 4339 values->sdt_reset_retries); 4340 break; 4341 case SD_CONF_BSET_RSV_REL_TIME: 4342 values->sdt_reserv_rel_time = data_list[i]; 4343 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4344 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4345 values->sdt_reserv_rel_time); 4346 break; 4347 case SD_CONF_BSET_MIN_THROTTLE: 4348 values->sdt_min_throttle = data_list[i]; 4349 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4350 "sd_get_tunables_from_conf: min_throttle = %d\n", 4351 values->sdt_min_throttle); 4352 break; 4353 case SD_CONF_BSET_DISKSORT_DISABLED: 4354 values->sdt_disk_sort_dis = data_list[i]; 4355 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4356 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4357 values->sdt_disk_sort_dis); 4358 break; 4359 case SD_CONF_BSET_LUN_RESET_ENABLED: 4360 values->sdt_lun_reset_enable = data_list[i]; 4361 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4362 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4363 "\n", values->sdt_lun_reset_enable); 4364 break; 4365 case SD_CONF_BSET_CACHE_IS_NV: 4366 values->sdt_suppress_cache_flush = data_list[i]; 4367 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4368 "sd_get_tunables_from_conf: \ 4369 suppress_cache_flush = %d" 4370 "\n", values->sdt_suppress_cache_flush); 4371 break; 4372 case SD_CONF_BSET_PC_DISABLED: 4373 values->sdt_disk_sort_dis = data_list[i]; 4374 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4375 "sd_get_tunables_from_conf: power_condition_dis = " 4376 "%d\n", values->sdt_power_condition_dis); 4377 break; 4378 } 4379 } 4380 } 4381 4382 /* 4383 * Function: sd_process_sdconf_table 4384 * 4385 * Description: Search the static configuration table for a match on the 4386 * inquiry vid/pid and update the driver soft state structure 4387 * according to the table property values for the device. 4388 * 4389 * The form of a configuration table entry is: 4390 * <vid+pid>,<flags>,<property-data> 4391 * "SEAGATE ST42400N",1,0x40000, 4392 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4393 * 4394 * Arguments: un - driver soft state (unit) structure 4395 */ 4396 4397 static void 4398 sd_process_sdconf_table(struct sd_lun *un) 4399 { 4400 char *id = NULL; 4401 int table_index; 4402 int idlen; 4403 4404 ASSERT(un != NULL); 4405 for (table_index = 0; table_index < sd_disk_table_size; 4406 table_index++) { 4407 id = sd_disk_table[table_index].device_id; 4408 idlen = strlen(id); 4409 4410 /* 4411 * The static configuration table currently does not 4412 * implement version 10 properties. Additionally, 4413 * multiple data-property-name entries are not 4414 * implemented in the static configuration table. 4415 */ 4416 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4417 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4418 "sd_process_sdconf_table: disk %s\n", id); 4419 sd_set_vers1_properties(un, 4420 sd_disk_table[table_index].flags, 4421 sd_disk_table[table_index].properties); 4422 break; 4423 } 4424 } 4425 } 4426 4427 4428 /* 4429 * Function: sd_sdconf_id_match 4430 * 4431 * Description: This local function implements a case sensitive vid/pid 4432 * comparison as well as the boundary cases of wild card and 4433 * multiple blanks. 4434 * 4435 * Note: An implicit assumption made here is that the scsi 4436 * inquiry structure will always keep the vid, pid and 4437 * revision strings in consecutive sequence, so they can be 4438 * read as a single string. If this assumption is not the 4439 * case, a separate string, to be used for the check, needs 4440 * to be built with these strings concatenated. 4441 * 4442 * Arguments: un - driver soft state (unit) structure 4443 * id - table or config file vid/pid 4444 * idlen - length of the vid/pid (bytes) 4445 * 4446 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4447 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4448 */ 4449 4450 static int 4451 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4452 { 4453 struct scsi_inquiry *sd_inq; 4454 int rval = SD_SUCCESS; 4455 4456 ASSERT(un != NULL); 4457 sd_inq = un->un_sd->sd_inq; 4458 ASSERT(id != NULL); 4459 4460 /* 4461 * We use the inq_vid as a pointer to a buffer containing the 4462 * vid and pid and use the entire vid/pid length of the table 4463 * entry for the comparison. This works because the inq_pid 4464 * data member follows inq_vid in the scsi_inquiry structure. 4465 */ 4466 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4467 /* 4468 * The user id string is compared to the inquiry vid/pid 4469 * using a case insensitive comparison and ignoring 4470 * multiple spaces. 4471 */ 4472 rval = sd_blank_cmp(un, id, idlen); 4473 if (rval != SD_SUCCESS) { 4474 /* 4475 * User id strings that start and end with a "*" 4476 * are a special case. These do not have a 4477 * specific vendor, and the product string can 4478 * appear anywhere in the 16 byte PID portion of 4479 * the inquiry data. This is a simple strstr() 4480 * type search for the user id in the inquiry data. 4481 */ 4482 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4483 char *pidptr = &id[1]; 4484 int i; 4485 int j; 4486 int pidstrlen = idlen - 2; 4487 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4488 pidstrlen; 4489 4490 if (j < 0) { 4491 return (SD_FAILURE); 4492 } 4493 for (i = 0; i < j; i++) { 4494 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4495 pidptr, pidstrlen) == 0) { 4496 rval = SD_SUCCESS; 4497 break; 4498 } 4499 } 4500 } 4501 } 4502 } 4503 return (rval); 4504 } 4505 4506 4507 /* 4508 * Function: sd_blank_cmp 4509 * 4510 * Description: If the id string starts and ends with a space, treat 4511 * multiple consecutive spaces as equivalent to a single 4512 * space. For example, this causes a sd_disk_table entry 4513 * of " NEC CDROM " to match a device's id string of 4514 * "NEC CDROM". 4515 * 4516 * Note: The success exit condition for this routine is if 4517 * the pointer to the table entry is '\0' and the cnt of 4518 * the inquiry length is zero. This will happen if the inquiry 4519 * string returned by the device is padded with spaces to be 4520 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4521 * SCSI spec states that the inquiry string is to be padded with 4522 * spaces. 4523 * 4524 * Arguments: un - driver soft state (unit) structure 4525 * id - table or config file vid/pid 4526 * idlen - length of the vid/pid (bytes) 4527 * 4528 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4529 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4530 */ 4531 4532 static int 4533 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4534 { 4535 char *p1; 4536 char *p2; 4537 int cnt; 4538 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4539 sizeof (SD_INQUIRY(un)->inq_pid); 4540 4541 ASSERT(un != NULL); 4542 p2 = un->un_sd->sd_inq->inq_vid; 4543 ASSERT(id != NULL); 4544 p1 = id; 4545 4546 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4547 /* 4548 * Note: string p1 is terminated by a NUL but string p2 4549 * isn't. The end of p2 is determined by cnt. 4550 */ 4551 for (;;) { 4552 /* skip over any extra blanks in both strings */ 4553 while ((*p1 != '\0') && (*p1 == ' ')) { 4554 p1++; 4555 } 4556 while ((cnt != 0) && (*p2 == ' ')) { 4557 p2++; 4558 cnt--; 4559 } 4560 4561 /* compare the two strings */ 4562 if ((cnt == 0) || 4563 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4564 break; 4565 } 4566 while ((cnt > 0) && 4567 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4568 p1++; 4569 p2++; 4570 cnt--; 4571 } 4572 } 4573 } 4574 4575 /* return SD_SUCCESS if both strings match */ 4576 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4577 } 4578 4579 4580 /* 4581 * Function: sd_chk_vers1_data 4582 * 4583 * Description: Verify the version 1 device properties provided by the 4584 * user via the configuration file 4585 * 4586 * Arguments: un - driver soft state (unit) structure 4587 * flags - integer mask indicating properties to be set 4588 * prop_list - integer list of property values 4589 * list_len - number of the elements 4590 * 4591 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4592 * SD_FAILURE - Indicates the user provided data is invalid 4593 */ 4594 4595 static int 4596 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4597 int list_len, char *dataname_ptr) 4598 { 4599 int i; 4600 int mask = 1; 4601 int index = 0; 4602 4603 ASSERT(un != NULL); 4604 4605 /* Check for a NULL property name and list */ 4606 if (dataname_ptr == NULL) { 4607 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4608 "sd_chk_vers1_data: NULL data property name."); 4609 return (SD_FAILURE); 4610 } 4611 if (prop_list == NULL) { 4612 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4613 "sd_chk_vers1_data: %s NULL data property list.", 4614 dataname_ptr); 4615 return (SD_FAILURE); 4616 } 4617 4618 /* Display a warning if undefined bits are set in the flags */ 4619 if (flags & ~SD_CONF_BIT_MASK) { 4620 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4621 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4622 "Properties not set.", 4623 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4624 return (SD_FAILURE); 4625 } 4626 4627 /* 4628 * Verify the length of the list by identifying the highest bit set 4629 * in the flags and validating that the property list has a length 4630 * up to the index of this bit. 4631 */ 4632 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4633 if (flags & mask) { 4634 index++; 4635 } 4636 mask = 1 << i; 4637 } 4638 if (list_len < (index + 2)) { 4639 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4640 "sd_chk_vers1_data: " 4641 "Data property list %s size is incorrect. " 4642 "Properties not set.", dataname_ptr); 4643 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4644 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4645 return (SD_FAILURE); 4646 } 4647 return (SD_SUCCESS); 4648 } 4649 4650 4651 /* 4652 * Function: sd_set_vers1_properties 4653 * 4654 * Description: Set version 1 device properties based on a property list 4655 * retrieved from the driver configuration file or static 4656 * configuration table. Version 1 properties have the format: 4657 * 4658 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4659 * 4660 * where the prop0 value will be used to set prop0 if bit0 4661 * is set in the flags 4662 * 4663 * Arguments: un - driver soft state (unit) structure 4664 * flags - integer mask indicating properties to be set 4665 * prop_list - integer list of property values 4666 */ 4667 4668 static void 4669 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4670 { 4671 ASSERT(un != NULL); 4672 4673 /* 4674 * Set the flag to indicate cache is to be disabled. An attempt 4675 * to disable the cache via sd_cache_control() will be made 4676 * later during attach once the basic initialization is complete. 4677 */ 4678 if (flags & SD_CONF_BSET_NOCACHE) { 4679 un->un_f_opt_disable_cache = TRUE; 4680 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4681 "sd_set_vers1_properties: caching disabled flag set\n"); 4682 } 4683 4684 /* CD-specific configuration parameters */ 4685 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4686 un->un_f_cfg_playmsf_bcd = TRUE; 4687 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4688 "sd_set_vers1_properties: playmsf_bcd set\n"); 4689 } 4690 if (flags & SD_CONF_BSET_READSUB_BCD) { 4691 un->un_f_cfg_readsub_bcd = TRUE; 4692 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4693 "sd_set_vers1_properties: readsub_bcd set\n"); 4694 } 4695 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4696 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4697 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4698 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4699 } 4700 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4701 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4702 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4703 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4704 } 4705 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4706 un->un_f_cfg_no_read_header = TRUE; 4707 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4708 "sd_set_vers1_properties: no_read_header set\n"); 4709 } 4710 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4711 un->un_f_cfg_read_cd_xd4 = TRUE; 4712 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4713 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4714 } 4715 4716 /* Support for devices which do not have valid/unique serial numbers */ 4717 if (flags & SD_CONF_BSET_FAB_DEVID) { 4718 un->un_f_opt_fab_devid = TRUE; 4719 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4720 "sd_set_vers1_properties: fab_devid bit set\n"); 4721 } 4722 4723 /* Support for user throttle configuration */ 4724 if (flags & SD_CONF_BSET_THROTTLE) { 4725 ASSERT(prop_list != NULL); 4726 un->un_saved_throttle = un->un_throttle = 4727 prop_list->sdt_throttle; 4728 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4729 "sd_set_vers1_properties: throttle set to %d\n", 4730 prop_list->sdt_throttle); 4731 } 4732 4733 /* Set the per disk retry count according to the conf file or table. */ 4734 if (flags & SD_CONF_BSET_NRR_COUNT) { 4735 ASSERT(prop_list != NULL); 4736 if (prop_list->sdt_not_rdy_retries) { 4737 un->un_notready_retry_count = 4738 prop_list->sdt_not_rdy_retries; 4739 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4740 "sd_set_vers1_properties: not ready retry count" 4741 " set to %d\n", un->un_notready_retry_count); 4742 } 4743 } 4744 4745 /* The controller type is reported for generic disk driver ioctls */ 4746 if (flags & SD_CONF_BSET_CTYPE) { 4747 ASSERT(prop_list != NULL); 4748 switch (prop_list->sdt_ctype) { 4749 case CTYPE_CDROM: 4750 un->un_ctype = prop_list->sdt_ctype; 4751 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4752 "sd_set_vers1_properties: ctype set to " 4753 "CTYPE_CDROM\n"); 4754 break; 4755 case CTYPE_CCS: 4756 un->un_ctype = prop_list->sdt_ctype; 4757 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4758 "sd_set_vers1_properties: ctype set to " 4759 "CTYPE_CCS\n"); 4760 break; 4761 case CTYPE_ROD: /* RW optical */ 4762 un->un_ctype = prop_list->sdt_ctype; 4763 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4764 "sd_set_vers1_properties: ctype set to " 4765 "CTYPE_ROD\n"); 4766 break; 4767 default: 4768 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4769 "sd_set_vers1_properties: Could not set " 4770 "invalid ctype value (%d)", 4771 prop_list->sdt_ctype); 4772 } 4773 } 4774 4775 /* Purple failover timeout */ 4776 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4777 ASSERT(prop_list != NULL); 4778 un->un_busy_retry_count = 4779 prop_list->sdt_busy_retries; 4780 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4781 "sd_set_vers1_properties: " 4782 "busy retry count set to %d\n", 4783 un->un_busy_retry_count); 4784 } 4785 4786 /* Purple reset retry count */ 4787 if (flags & SD_CONF_BSET_RST_RETRIES) { 4788 ASSERT(prop_list != NULL); 4789 un->un_reset_retry_count = 4790 prop_list->sdt_reset_retries; 4791 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4792 "sd_set_vers1_properties: " 4793 "reset retry count set to %d\n", 4794 un->un_reset_retry_count); 4795 } 4796 4797 /* Purple reservation release timeout */ 4798 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4799 ASSERT(prop_list != NULL); 4800 un->un_reserve_release_time = 4801 prop_list->sdt_reserv_rel_time; 4802 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4803 "sd_set_vers1_properties: " 4804 "reservation release timeout set to %d\n", 4805 un->un_reserve_release_time); 4806 } 4807 4808 /* 4809 * Driver flag telling the driver to verify that no commands are pending 4810 * for a device before issuing a Test Unit Ready. This is a workaround 4811 * for a firmware bug in some Seagate eliteI drives. 4812 */ 4813 if (flags & SD_CONF_BSET_TUR_CHECK) { 4814 un->un_f_cfg_tur_check = TRUE; 4815 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4816 "sd_set_vers1_properties: tur queue check set\n"); 4817 } 4818 4819 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4820 un->un_min_throttle = prop_list->sdt_min_throttle; 4821 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4822 "sd_set_vers1_properties: min throttle set to %d\n", 4823 un->un_min_throttle); 4824 } 4825 4826 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4827 un->un_f_disksort_disabled = 4828 (prop_list->sdt_disk_sort_dis != 0) ? 4829 TRUE : FALSE; 4830 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4831 "sd_set_vers1_properties: disksort disabled " 4832 "flag set to %d\n", 4833 prop_list->sdt_disk_sort_dis); 4834 } 4835 4836 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4837 un->un_f_lun_reset_enabled = 4838 (prop_list->sdt_lun_reset_enable != 0) ? 4839 TRUE : FALSE; 4840 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4841 "sd_set_vers1_properties: lun reset enabled " 4842 "flag set to %d\n", 4843 prop_list->sdt_lun_reset_enable); 4844 } 4845 4846 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4847 un->un_f_suppress_cache_flush = 4848 (prop_list->sdt_suppress_cache_flush != 0) ? 4849 TRUE : FALSE; 4850 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4851 "sd_set_vers1_properties: suppress_cache_flush " 4852 "flag set to %d\n", 4853 prop_list->sdt_suppress_cache_flush); 4854 } 4855 4856 if (flags & SD_CONF_BSET_PC_DISABLED) { 4857 un->un_f_power_condition_disabled = 4858 (prop_list->sdt_power_condition_dis != 0) ? 4859 TRUE : FALSE; 4860 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4861 "sd_set_vers1_properties: power_condition_disabled " 4862 "flag set to %d\n", 4863 prop_list->sdt_power_condition_dis); 4864 } 4865 4866 /* 4867 * Validate the throttle values. 4868 * If any of the numbers are invalid, set everything to defaults. 4869 */ 4870 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4871 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4872 (un->un_min_throttle > un->un_throttle)) { 4873 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4874 un->un_min_throttle = sd_min_throttle; 4875 } 4876 } 4877 4878 /* 4879 * Function: sd_is_lsi() 4880 * 4881 * Description: Check for lsi devices, step through the static device 4882 * table to match vid/pid. 4883 * 4884 * Args: un - ptr to sd_lun 4885 * 4886 * Notes: When creating new LSI property, need to add the new LSI property 4887 * to this function. 4888 */ 4889 static void 4890 sd_is_lsi(struct sd_lun *un) 4891 { 4892 char *id = NULL; 4893 int table_index; 4894 int idlen; 4895 void *prop; 4896 4897 ASSERT(un != NULL); 4898 for (table_index = 0; table_index < sd_disk_table_size; 4899 table_index++) { 4900 id = sd_disk_table[table_index].device_id; 4901 idlen = strlen(id); 4902 if (idlen == 0) { 4903 continue; 4904 } 4905 4906 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4907 prop = sd_disk_table[table_index].properties; 4908 if (prop == &lsi_properties || 4909 prop == &lsi_oem_properties || 4910 prop == &lsi_properties_scsi || 4911 prop == &symbios_properties) { 4912 un->un_f_cfg_is_lsi = TRUE; 4913 } 4914 break; 4915 } 4916 } 4917 } 4918 4919 /* 4920 * Function: sd_get_physical_geometry 4921 * 4922 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4923 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4924 * target, and use this information to initialize the physical 4925 * geometry cache specified by pgeom_p. 4926 * 4927 * MODE SENSE is an optional command, so failure in this case 4928 * does not necessarily denote an error. We want to use the 4929 * MODE SENSE commands to derive the physical geometry of the 4930 * device, but if either command fails, the logical geometry is 4931 * used as the fallback for disk label geometry in cmlb. 4932 * 4933 * This requires that un->un_blockcount and un->un_tgt_blocksize 4934 * have already been initialized for the current target and 4935 * that the current values be passed as args so that we don't 4936 * end up ever trying to use -1 as a valid value. This could 4937 * happen if either value is reset while we're not holding 4938 * the mutex. 4939 * 4940 * Arguments: un - driver soft state (unit) structure 4941 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4942 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4943 * to use the USCSI "direct" chain and bypass the normal 4944 * command waitq. 4945 * 4946 * Context: Kernel thread only (can sleep). 4947 */ 4948 4949 static int 4950 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4951 diskaddr_t capacity, int lbasize, int path_flag) 4952 { 4953 struct mode_format *page3p; 4954 struct mode_geometry *page4p; 4955 struct mode_header *headerp; 4956 int sector_size; 4957 int nsect; 4958 int nhead; 4959 int ncyl; 4960 int intrlv; 4961 int spc; 4962 diskaddr_t modesense_capacity; 4963 int rpm; 4964 int bd_len; 4965 int mode_header_length; 4966 uchar_t *p3bufp; 4967 uchar_t *p4bufp; 4968 int cdbsize; 4969 int ret = EIO; 4970 sd_ssc_t *ssc; 4971 int status; 4972 4973 ASSERT(un != NULL); 4974 4975 if (lbasize == 0) { 4976 if (ISCD(un)) { 4977 lbasize = 2048; 4978 } else { 4979 lbasize = un->un_sys_blocksize; 4980 } 4981 } 4982 pgeom_p->g_secsize = (unsigned short)lbasize; 4983 4984 /* 4985 * If the unit is a cd/dvd drive MODE SENSE page three 4986 * and MODE SENSE page four are reserved (see SBC spec 4987 * and MMC spec). To prevent soft errors just return 4988 * using the default LBA size. 4989 * 4990 * Since SATA MODE SENSE function (sata_txlt_mode_sense()) does not 4991 * implement support for mode pages 3 and 4 return here to prevent 4992 * illegal requests on SATA drives. 4993 * 4994 * These pages are also reserved in SBC-2 and later. We assume SBC-2 4995 * or later for a direct-attached block device if the SCSI version is 4996 * at least SPC-3. 4997 */ 4998 4999 if (ISCD(un) || 5000 un->un_interconnect_type == SD_INTERCONNECT_SATA || 5001 (un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5)) 5002 return (ret); 5003 5004 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 5005 5006 /* 5007 * Retrieve MODE SENSE page 3 - Format Device Page 5008 */ 5009 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 5010 ssc = sd_ssc_init(un); 5011 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 5012 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 5013 if (status != 0) { 5014 SD_ERROR(SD_LOG_COMMON, un, 5015 "sd_get_physical_geometry: mode sense page 3 failed\n"); 5016 goto page3_exit; 5017 } 5018 5019 /* 5020 * Determine size of Block Descriptors in order to locate the mode 5021 * page data. ATAPI devices return 0, SCSI devices should return 5022 * MODE_BLK_DESC_LENGTH. 5023 */ 5024 headerp = (struct mode_header *)p3bufp; 5025 if (un->un_f_cfg_is_atapi == TRUE) { 5026 struct mode_header_grp2 *mhp = 5027 (struct mode_header_grp2 *)headerp; 5028 mode_header_length = MODE_HEADER_LENGTH_GRP2; 5029 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5030 } else { 5031 mode_header_length = MODE_HEADER_LENGTH; 5032 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5033 } 5034 5035 if (bd_len > MODE_BLK_DESC_LENGTH) { 5036 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5037 "sd_get_physical_geometry: received unexpected bd_len " 5038 "of %d, page3\n", bd_len); 5039 status = EIO; 5040 goto page3_exit; 5041 } 5042 5043 page3p = (struct mode_format *) 5044 ((caddr_t)headerp + mode_header_length + bd_len); 5045 5046 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 5047 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5048 "sd_get_physical_geometry: mode sense pg3 code mismatch " 5049 "%d\n", page3p->mode_page.code); 5050 status = EIO; 5051 goto page3_exit; 5052 } 5053 5054 /* 5055 * Use this physical geometry data only if BOTH MODE SENSE commands 5056 * complete successfully; otherwise, revert to the logical geometry. 5057 * So, we need to save everything in temporary variables. 5058 */ 5059 sector_size = BE_16(page3p->data_bytes_sect); 5060 5061 /* 5062 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 5063 */ 5064 if (sector_size == 0) { 5065 sector_size = un->un_sys_blocksize; 5066 } else { 5067 sector_size &= ~(un->un_sys_blocksize - 1); 5068 } 5069 5070 nsect = BE_16(page3p->sect_track); 5071 intrlv = BE_16(page3p->interleave); 5072 5073 SD_INFO(SD_LOG_COMMON, un, 5074 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 5075 SD_INFO(SD_LOG_COMMON, un, 5076 " mode page: %d; nsect: %d; sector size: %d;\n", 5077 page3p->mode_page.code, nsect, sector_size); 5078 SD_INFO(SD_LOG_COMMON, un, 5079 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 5080 BE_16(page3p->track_skew), 5081 BE_16(page3p->cylinder_skew)); 5082 5083 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5084 5085 /* 5086 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 5087 */ 5088 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 5089 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 5090 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 5091 if (status != 0) { 5092 SD_ERROR(SD_LOG_COMMON, un, 5093 "sd_get_physical_geometry: mode sense page 4 failed\n"); 5094 goto page4_exit; 5095 } 5096 5097 /* 5098 * Determine size of Block Descriptors in order to locate the mode 5099 * page data. ATAPI devices return 0, SCSI devices should return 5100 * MODE_BLK_DESC_LENGTH. 5101 */ 5102 headerp = (struct mode_header *)p4bufp; 5103 if (un->un_f_cfg_is_atapi == TRUE) { 5104 struct mode_header_grp2 *mhp = 5105 (struct mode_header_grp2 *)headerp; 5106 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5107 } else { 5108 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5109 } 5110 5111 if (bd_len > MODE_BLK_DESC_LENGTH) { 5112 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5113 "sd_get_physical_geometry: received unexpected bd_len of " 5114 "%d, page4\n", bd_len); 5115 status = EIO; 5116 goto page4_exit; 5117 } 5118 5119 page4p = (struct mode_geometry *) 5120 ((caddr_t)headerp + mode_header_length + bd_len); 5121 5122 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 5123 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5124 "sd_get_physical_geometry: mode sense pg4 code mismatch " 5125 "%d\n", page4p->mode_page.code); 5126 status = EIO; 5127 goto page4_exit; 5128 } 5129 5130 /* 5131 * Stash the data now, after we know that both commands completed. 5132 */ 5133 5134 5135 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 5136 spc = nhead * nsect; 5137 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 5138 rpm = BE_16(page4p->rpm); 5139 5140 modesense_capacity = spc * ncyl; 5141 5142 SD_INFO(SD_LOG_COMMON, un, 5143 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 5144 SD_INFO(SD_LOG_COMMON, un, 5145 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 5146 SD_INFO(SD_LOG_COMMON, un, 5147 " computed capacity(h*s*c): %d;\n", modesense_capacity); 5148 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 5149 (void *)pgeom_p, capacity); 5150 5151 /* 5152 * Compensate if the drive's geometry is not rectangular, i.e., 5153 * the product of C * H * S returned by MODE SENSE >= that returned 5154 * by read capacity. This is an idiosyncrasy of the original x86 5155 * disk subsystem. 5156 */ 5157 if (modesense_capacity >= capacity) { 5158 SD_INFO(SD_LOG_COMMON, un, 5159 "sd_get_physical_geometry: adjusting acyl; " 5160 "old: %d; new: %d\n", pgeom_p->g_acyl, 5161 (modesense_capacity - capacity + spc - 1) / spc); 5162 if (sector_size != 0) { 5163 /* 1243403: NEC D38x7 drives don't support sec size */ 5164 pgeom_p->g_secsize = (unsigned short)sector_size; 5165 } 5166 pgeom_p->g_nsect = (unsigned short)nsect; 5167 pgeom_p->g_nhead = (unsigned short)nhead; 5168 pgeom_p->g_capacity = capacity; 5169 pgeom_p->g_acyl = 5170 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5171 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5172 } 5173 5174 pgeom_p->g_rpm = (unsigned short)rpm; 5175 pgeom_p->g_intrlv = (unsigned short)intrlv; 5176 ret = 0; 5177 5178 SD_INFO(SD_LOG_COMMON, un, 5179 "sd_get_physical_geometry: mode sense geometry:\n"); 5180 SD_INFO(SD_LOG_COMMON, un, 5181 " nsect: %d; sector size: %d; interlv: %d\n", 5182 nsect, sector_size, intrlv); 5183 SD_INFO(SD_LOG_COMMON, un, 5184 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5185 nhead, ncyl, rpm, modesense_capacity); 5186 SD_INFO(SD_LOG_COMMON, un, 5187 "sd_get_physical_geometry: (cached)\n"); 5188 SD_INFO(SD_LOG_COMMON, un, 5189 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5190 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5191 pgeom_p->g_nhead, pgeom_p->g_nsect); 5192 SD_INFO(SD_LOG_COMMON, un, 5193 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5194 pgeom_p->g_secsize, pgeom_p->g_capacity, 5195 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5196 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5197 5198 page4_exit: 5199 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5200 5201 page3_exit: 5202 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5203 5204 if (status != 0) { 5205 if (status == EIO) { 5206 /* 5207 * Some disks do not support mode sense(6), we 5208 * should ignore this kind of error(sense key is 5209 * 0x5 - illegal request). 5210 */ 5211 uint8_t *sensep; 5212 int senlen; 5213 5214 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5215 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5216 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5217 5218 if (senlen > 0 && 5219 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5220 sd_ssc_assessment(ssc, 5221 SD_FMT_IGNORE_COMPROMISE); 5222 } else { 5223 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5224 } 5225 } else { 5226 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5227 } 5228 } 5229 sd_ssc_fini(ssc); 5230 return (ret); 5231 } 5232 5233 /* 5234 * Function: sd_get_virtual_geometry 5235 * 5236 * Description: Ask the controller to tell us about the target device. 5237 * 5238 * Arguments: un - pointer to softstate 5239 * capacity - disk capacity in #blocks 5240 * lbasize - disk block size in bytes 5241 * 5242 * Context: Kernel thread only 5243 */ 5244 5245 static int 5246 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5247 diskaddr_t capacity, int lbasize) 5248 { 5249 uint_t geombuf; 5250 int spc; 5251 5252 ASSERT(un != NULL); 5253 5254 /* Set sector size, and total number of sectors */ 5255 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5256 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5257 5258 /* Let the HBA tell us its geometry */ 5259 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5260 5261 /* A value of -1 indicates an undefined "geometry" property */ 5262 if (geombuf == (-1)) { 5263 return (EINVAL); 5264 } 5265 5266 /* Initialize the logical geometry cache. */ 5267 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5268 lgeom_p->g_nsect = geombuf & 0xffff; 5269 lgeom_p->g_secsize = un->un_sys_blocksize; 5270 5271 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5272 5273 /* 5274 * Note: The driver originally converted the capacity value from 5275 * target blocks to system blocks. However, the capacity value passed 5276 * to this routine is already in terms of system blocks (this scaling 5277 * is done when the READ CAPACITY command is issued and processed). 5278 * This 'error' may have gone undetected because the usage of g_ncyl 5279 * (which is based upon g_capacity) is very limited within the driver 5280 */ 5281 lgeom_p->g_capacity = capacity; 5282 5283 /* 5284 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5285 * hba may return zero values if the device has been removed. 5286 */ 5287 if (spc == 0) { 5288 lgeom_p->g_ncyl = 0; 5289 } else { 5290 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5291 } 5292 lgeom_p->g_acyl = 0; 5293 5294 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5295 return (0); 5296 5297 } 5298 /* 5299 * Function: sd_update_block_info 5300 * 5301 * Description: Calculate a byte count to sector count bitshift value 5302 * from sector size. 5303 * 5304 * Arguments: un: unit struct. 5305 * lbasize: new target sector size 5306 * capacity: new target capacity, ie. block count 5307 * 5308 * Context: Kernel thread context 5309 */ 5310 5311 static void 5312 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5313 { 5314 if (lbasize != 0) { 5315 un->un_tgt_blocksize = lbasize; 5316 un->un_f_tgt_blocksize_is_valid = TRUE; 5317 if (!un->un_f_has_removable_media) { 5318 un->un_sys_blocksize = lbasize; 5319 } 5320 } 5321 5322 if (capacity != 0) { 5323 un->un_blockcount = capacity; 5324 un->un_f_blockcount_is_valid = TRUE; 5325 5326 /* 5327 * The capacity has changed so update the errstats. 5328 */ 5329 if (un->un_errstats != NULL) { 5330 struct sd_errstats *stp; 5331 5332 capacity *= un->un_sys_blocksize; 5333 stp = (struct sd_errstats *)un->un_errstats->ks_data; 5334 if (stp->sd_capacity.value.ui64 < capacity) 5335 stp->sd_capacity.value.ui64 = capacity; 5336 } 5337 } 5338 } 5339 5340 5341 /* 5342 * Function: sd_register_devid 5343 * 5344 * Description: This routine will obtain the device id information from the 5345 * target, obtain the serial number, and register the device 5346 * id with the ddi framework. 5347 * 5348 * Arguments: devi - the system's dev_info_t for the device. 5349 * un - driver soft state (unit) structure 5350 * reservation_flag - indicates if a reservation conflict 5351 * occurred during attach 5352 * 5353 * Context: Kernel Thread 5354 */ 5355 static void 5356 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5357 { 5358 int rval = 0; 5359 uchar_t *inq80 = NULL; 5360 size_t inq80_len = MAX_INQUIRY_SIZE; 5361 size_t inq80_resid = 0; 5362 uchar_t *inq83 = NULL; 5363 size_t inq83_len = MAX_INQUIRY_SIZE; 5364 size_t inq83_resid = 0; 5365 int dlen, len; 5366 char *sn; 5367 struct sd_lun *un; 5368 5369 ASSERT(ssc != NULL); 5370 un = ssc->ssc_un; 5371 ASSERT(un != NULL); 5372 ASSERT(mutex_owned(SD_MUTEX(un))); 5373 ASSERT((SD_DEVINFO(un)) == devi); 5374 5375 5376 /* 5377 * We check the availability of the World Wide Name (0x83) and Unit 5378 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5379 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5380 * 0x83 is available, that is the best choice. Our next choice is 5381 * 0x80. If neither are available, we munge the devid from the device 5382 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5383 * to fabricate a devid for non-Sun qualified disks. 5384 */ 5385 if (sd_check_vpd_page_support(ssc) == 0) { 5386 /* collect page 80 data if available */ 5387 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5388 5389 mutex_exit(SD_MUTEX(un)); 5390 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5391 5392 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5393 0x01, 0x80, &inq80_resid); 5394 5395 if (rval != 0) { 5396 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5397 kmem_free(inq80, inq80_len); 5398 inq80 = NULL; 5399 inq80_len = 0; 5400 } else if (ddi_prop_exists( 5401 DDI_DEV_T_NONE, SD_DEVINFO(un), 5402 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5403 INQUIRY_SERIAL_NO) == 0) { 5404 /* 5405 * If we don't already have a serial number 5406 * property, do quick verify of data returned 5407 * and define property. 5408 */ 5409 dlen = inq80_len - inq80_resid; 5410 len = (size_t)inq80[3]; 5411 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5412 /* 5413 * Ensure sn termination, skip leading 5414 * blanks, and create property 5415 * 'inquiry-serial-no'. 5416 */ 5417 sn = (char *)&inq80[4]; 5418 sn[len] = 0; 5419 while (*sn && (*sn == ' ')) 5420 sn++; 5421 if (*sn) { 5422 (void) ddi_prop_update_string( 5423 DDI_DEV_T_NONE, 5424 SD_DEVINFO(un), 5425 INQUIRY_SERIAL_NO, sn); 5426 } 5427 } 5428 } 5429 mutex_enter(SD_MUTEX(un)); 5430 } 5431 5432 /* collect page 83 data if available */ 5433 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5434 mutex_exit(SD_MUTEX(un)); 5435 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5436 5437 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5438 0x01, 0x83, &inq83_resid); 5439 5440 if (rval != 0) { 5441 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5442 kmem_free(inq83, inq83_len); 5443 inq83 = NULL; 5444 inq83_len = 0; 5445 } 5446 mutex_enter(SD_MUTEX(un)); 5447 } 5448 } 5449 5450 /* 5451 * If transport has already registered a devid for this target 5452 * then that takes precedence over the driver's determination 5453 * of the devid. 5454 * 5455 * NOTE: The reason this check is done here instead of at the beginning 5456 * of the function is to allow the code above to create the 5457 * 'inquiry-serial-no' property. 5458 */ 5459 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5460 ASSERT(un->un_devid); 5461 un->un_f_devid_transport_defined = TRUE; 5462 goto cleanup; /* use devid registered by the transport */ 5463 } 5464 5465 /* 5466 * This is the case of antiquated Sun disk drives that have the 5467 * FAB_DEVID property set in the disk_table. These drives 5468 * manage the devid's by storing them in last 2 available sectors 5469 * on the drive and have them fabricated by the ddi layer by calling 5470 * ddi_devid_init and passing the DEVID_FAB flag. 5471 */ 5472 if (un->un_f_opt_fab_devid == TRUE) { 5473 /* 5474 * Depending on EINVAL isn't reliable, since a reserved disk 5475 * may result in invalid geometry, so check to make sure a 5476 * reservation conflict did not occur during attach. 5477 */ 5478 if ((sd_get_devid(ssc) == EINVAL) && 5479 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5480 /* 5481 * The devid is invalid AND there is no reservation 5482 * conflict. Fabricate a new devid. 5483 */ 5484 (void) sd_create_devid(ssc); 5485 } 5486 5487 /* Register the devid if it exists */ 5488 if (un->un_devid != NULL) { 5489 (void) ddi_devid_register(SD_DEVINFO(un), 5490 un->un_devid); 5491 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5492 "sd_register_devid: Devid Fabricated\n"); 5493 } 5494 goto cleanup; 5495 } 5496 5497 /* encode best devid possible based on data available */ 5498 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5499 (char *)ddi_driver_name(SD_DEVINFO(un)), 5500 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5501 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5502 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5503 5504 /* devid successfully encoded, register devid */ 5505 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5506 5507 } else { 5508 /* 5509 * Unable to encode a devid based on data available. 5510 * This is not a Sun qualified disk. Older Sun disk 5511 * drives that have the SD_FAB_DEVID property 5512 * set in the disk_table and non Sun qualified 5513 * disks are treated in the same manner. These 5514 * drives manage the devid's by storing them in 5515 * last 2 available sectors on the drive and 5516 * have them fabricated by the ddi layer by 5517 * calling ddi_devid_init and passing the 5518 * DEVID_FAB flag. 5519 * Create a fabricate devid only if there's no 5520 * fabricate devid existed. 5521 */ 5522 if (sd_get_devid(ssc) == EINVAL) { 5523 (void) sd_create_devid(ssc); 5524 } 5525 un->un_f_opt_fab_devid = TRUE; 5526 5527 /* Register the devid if it exists */ 5528 if (un->un_devid != NULL) { 5529 (void) ddi_devid_register(SD_DEVINFO(un), 5530 un->un_devid); 5531 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5532 "sd_register_devid: devid fabricated using " 5533 "ddi framework\n"); 5534 } 5535 } 5536 5537 cleanup: 5538 /* clean up resources */ 5539 if (inq80 != NULL) { 5540 kmem_free(inq80, inq80_len); 5541 } 5542 if (inq83 != NULL) { 5543 kmem_free(inq83, inq83_len); 5544 } 5545 } 5546 5547 5548 5549 /* 5550 * Function: sd_get_devid 5551 * 5552 * Description: This routine will return 0 if a valid device id has been 5553 * obtained from the target and stored in the soft state. If a 5554 * valid device id has not been previously read and stored, a 5555 * read attempt will be made. 5556 * 5557 * Arguments: un - driver soft state (unit) structure 5558 * 5559 * Return Code: 0 if we successfully get the device id 5560 * 5561 * Context: Kernel Thread 5562 */ 5563 5564 static int 5565 sd_get_devid(sd_ssc_t *ssc) 5566 { 5567 struct dk_devid *dkdevid; 5568 ddi_devid_t tmpid; 5569 uint_t *ip; 5570 size_t sz; 5571 diskaddr_t blk; 5572 int status; 5573 int chksum; 5574 int i; 5575 size_t buffer_size; 5576 struct sd_lun *un; 5577 5578 ASSERT(ssc != NULL); 5579 un = ssc->ssc_un; 5580 ASSERT(un != NULL); 5581 ASSERT(mutex_owned(SD_MUTEX(un))); 5582 5583 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5584 un); 5585 5586 if (un->un_devid != NULL) { 5587 return (0); 5588 } 5589 5590 mutex_exit(SD_MUTEX(un)); 5591 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5592 (void *)SD_PATH_DIRECT) != 0) { 5593 mutex_enter(SD_MUTEX(un)); 5594 return (EINVAL); 5595 } 5596 5597 /* 5598 * Read and verify device id, stored in the reserved cylinders at the 5599 * end of the disk. Backup label is on the odd sectors of the last 5600 * track of the last cylinder. Device id will be on track of the next 5601 * to last cylinder. 5602 */ 5603 mutex_enter(SD_MUTEX(un)); 5604 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5605 mutex_exit(SD_MUTEX(un)); 5606 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5607 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5608 SD_PATH_DIRECT); 5609 5610 if (status != 0) { 5611 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5612 goto error; 5613 } 5614 5615 /* Validate the revision */ 5616 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5617 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5618 status = EINVAL; 5619 goto error; 5620 } 5621 5622 /* Calculate the checksum */ 5623 chksum = 0; 5624 ip = (uint_t *)dkdevid; 5625 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5626 i++) { 5627 chksum ^= ip[i]; 5628 } 5629 5630 /* Compare the checksums */ 5631 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5632 status = EINVAL; 5633 goto error; 5634 } 5635 5636 /* Validate the device id */ 5637 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5638 status = EINVAL; 5639 goto error; 5640 } 5641 5642 /* 5643 * Store the device id in the driver soft state 5644 */ 5645 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5646 tmpid = kmem_alloc(sz, KM_SLEEP); 5647 5648 mutex_enter(SD_MUTEX(un)); 5649 5650 un->un_devid = tmpid; 5651 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5652 5653 kmem_free(dkdevid, buffer_size); 5654 5655 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5656 5657 return (status); 5658 error: 5659 mutex_enter(SD_MUTEX(un)); 5660 kmem_free(dkdevid, buffer_size); 5661 return (status); 5662 } 5663 5664 5665 /* 5666 * Function: sd_create_devid 5667 * 5668 * Description: This routine will fabricate the device id and write it 5669 * to the disk. 5670 * 5671 * Arguments: un - driver soft state (unit) structure 5672 * 5673 * Return Code: value of the fabricated device id 5674 * 5675 * Context: Kernel Thread 5676 */ 5677 5678 static ddi_devid_t 5679 sd_create_devid(sd_ssc_t *ssc) 5680 { 5681 struct sd_lun *un; 5682 5683 ASSERT(ssc != NULL); 5684 un = ssc->ssc_un; 5685 ASSERT(un != NULL); 5686 5687 /* Fabricate the devid */ 5688 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5689 == DDI_FAILURE) { 5690 return (NULL); 5691 } 5692 5693 /* Write the devid to disk */ 5694 if (sd_write_deviceid(ssc) != 0) { 5695 ddi_devid_free(un->un_devid); 5696 un->un_devid = NULL; 5697 } 5698 5699 return (un->un_devid); 5700 } 5701 5702 5703 /* 5704 * Function: sd_write_deviceid 5705 * 5706 * Description: This routine will write the device id to the disk 5707 * reserved sector. 5708 * 5709 * Arguments: un - driver soft state (unit) structure 5710 * 5711 * Return Code: EINVAL 5712 * value returned by sd_send_scsi_cmd 5713 * 5714 * Context: Kernel Thread 5715 */ 5716 5717 static int 5718 sd_write_deviceid(sd_ssc_t *ssc) 5719 { 5720 struct dk_devid *dkdevid; 5721 uchar_t *buf; 5722 diskaddr_t blk; 5723 uint_t *ip, chksum; 5724 int status; 5725 int i; 5726 struct sd_lun *un; 5727 5728 ASSERT(ssc != NULL); 5729 un = ssc->ssc_un; 5730 ASSERT(un != NULL); 5731 ASSERT(mutex_owned(SD_MUTEX(un))); 5732 5733 mutex_exit(SD_MUTEX(un)); 5734 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5735 (void *)SD_PATH_DIRECT) != 0) { 5736 mutex_enter(SD_MUTEX(un)); 5737 return (-1); 5738 } 5739 5740 5741 /* Allocate the buffer */ 5742 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5743 dkdevid = (struct dk_devid *)buf; 5744 5745 /* Fill in the revision */ 5746 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5747 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5748 5749 /* Copy in the device id */ 5750 mutex_enter(SD_MUTEX(un)); 5751 bcopy(un->un_devid, &dkdevid->dkd_devid, 5752 ddi_devid_sizeof(un->un_devid)); 5753 mutex_exit(SD_MUTEX(un)); 5754 5755 /* Calculate the checksum */ 5756 chksum = 0; 5757 ip = (uint_t *)dkdevid; 5758 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5759 i++) { 5760 chksum ^= ip[i]; 5761 } 5762 5763 /* Fill-in checksum */ 5764 DKD_FORMCHKSUM(chksum, dkdevid); 5765 5766 /* Write the reserved sector */ 5767 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5768 SD_PATH_DIRECT); 5769 if (status != 0) 5770 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5771 5772 kmem_free(buf, un->un_sys_blocksize); 5773 5774 mutex_enter(SD_MUTEX(un)); 5775 return (status); 5776 } 5777 5778 5779 /* 5780 * Function: sd_check_vpd_page_support 5781 * 5782 * Description: This routine sends an inquiry command with the EVPD bit set and 5783 * a page code of 0x00 to the device. It is used to determine which 5784 * vital product pages are available to find the devid. We are 5785 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1, 5786 * the device does not support that command. 5787 * 5788 * Arguments: un - driver soft state (unit) structure 5789 * 5790 * Return Code: 0 - success 5791 * 1 - check condition 5792 * 5793 * Context: This routine can sleep. 5794 */ 5795 5796 static int 5797 sd_check_vpd_page_support(sd_ssc_t *ssc) 5798 { 5799 uchar_t *page_list = NULL; 5800 uchar_t page_length = 0xff; /* Use max possible length */ 5801 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5802 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5803 int rval = 0; 5804 int counter; 5805 struct sd_lun *un; 5806 5807 ASSERT(ssc != NULL); 5808 un = ssc->ssc_un; 5809 ASSERT(un != NULL); 5810 ASSERT(mutex_owned(SD_MUTEX(un))); 5811 5812 mutex_exit(SD_MUTEX(un)); 5813 5814 /* 5815 * We'll set the page length to the maximum to save figuring it out 5816 * with an additional call. 5817 */ 5818 page_list = kmem_zalloc(page_length, KM_SLEEP); 5819 5820 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5821 page_code, NULL); 5822 5823 if (rval != 0) 5824 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5825 5826 mutex_enter(SD_MUTEX(un)); 5827 5828 /* 5829 * Now we must validate that the device accepted the command, as some 5830 * drives do not support it. If the drive does support it, we will 5831 * return 0, and the supported pages will be in un_vpd_page_mask. If 5832 * not, we return -1. 5833 */ 5834 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5835 /* Loop to find one of the 2 pages we need */ 5836 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5837 5838 /* 5839 * Pages are returned in ascending order, and 0x83 is what we 5840 * are hoping for. 5841 */ 5842 while ((page_list[counter] <= 0xB1) && 5843 (counter <= (page_list[VPD_PAGE_LENGTH] + 5844 VPD_HEAD_OFFSET))) { 5845 /* 5846 * Add 3 because page_list[3] is the number of 5847 * pages minus 3 5848 */ 5849 5850 switch (page_list[counter]) { 5851 case 0x00: 5852 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5853 break; 5854 case 0x80: 5855 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5856 break; 5857 case 0x81: 5858 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5859 break; 5860 case 0x82: 5861 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5862 break; 5863 case 0x83: 5864 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5865 break; 5866 case 0x86: 5867 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5868 break; 5869 case 0xB1: 5870 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG; 5871 break; 5872 } 5873 counter++; 5874 } 5875 5876 } else { 5877 rval = -1; 5878 5879 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5880 "sd_check_vpd_page_support: This drive does not implement " 5881 "VPD pages.\n"); 5882 } 5883 5884 kmem_free(page_list, page_length); 5885 5886 return (rval); 5887 } 5888 5889 5890 /* 5891 * Function: sd_setup_pm 5892 * 5893 * Description: Initialize Power Management on the device 5894 * 5895 * Context: Kernel Thread 5896 */ 5897 5898 static void 5899 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5900 { 5901 uint_t log_page_size; 5902 uchar_t *log_page_data; 5903 int rval = 0; 5904 struct sd_lun *un; 5905 5906 ASSERT(ssc != NULL); 5907 un = ssc->ssc_un; 5908 ASSERT(un != NULL); 5909 5910 /* 5911 * Since we are called from attach, holding a mutex for 5912 * un is unnecessary. Because some of the routines called 5913 * from here require SD_MUTEX to not be held, assert this 5914 * right up front. 5915 */ 5916 ASSERT(!mutex_owned(SD_MUTEX(un))); 5917 /* 5918 * Since the sd device does not have the 'reg' property, 5919 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5920 * The following code is to tell cpr that this device 5921 * DOES need to be suspended and resumed. 5922 */ 5923 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5924 "pm-hardware-state", "needs-suspend-resume"); 5925 5926 /* 5927 * This complies with the new power management framework 5928 * for certain desktop machines. Create the pm_components 5929 * property as a string array property. 5930 * If un_f_pm_supported is TRUE, that means the disk 5931 * attached HBA has set the "pm-capable" property and 5932 * the value of this property is bigger than 0. 5933 */ 5934 if (un->un_f_pm_supported) { 5935 /* 5936 * not all devices have a motor, try it first. 5937 * some devices may return ILLEGAL REQUEST, some 5938 * will hang 5939 * The following START_STOP_UNIT is used to check if target 5940 * device has a motor. 5941 */ 5942 un->un_f_start_stop_supported = TRUE; 5943 5944 if (un->un_f_power_condition_supported) { 5945 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5946 SD_POWER_CONDITION, SD_TARGET_ACTIVE, 5947 SD_PATH_DIRECT); 5948 if (rval != 0) { 5949 un->un_f_power_condition_supported = FALSE; 5950 } 5951 } 5952 if (!un->un_f_power_condition_supported) { 5953 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5954 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); 5955 } 5956 if (rval != 0) { 5957 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5958 un->un_f_start_stop_supported = FALSE; 5959 } 5960 5961 /* 5962 * create pm properties anyways otherwise the parent can't 5963 * go to sleep 5964 */ 5965 un->un_f_pm_is_enabled = TRUE; 5966 (void) sd_create_pm_components(devi, un); 5967 5968 /* 5969 * If it claims that log sense is supported, check it out. 5970 */ 5971 if (un->un_f_log_sense_supported) { 5972 rval = sd_log_page_supported(ssc, 5973 START_STOP_CYCLE_PAGE); 5974 if (rval == 1) { 5975 /* Page found, use it. */ 5976 un->un_start_stop_cycle_page = 5977 START_STOP_CYCLE_PAGE; 5978 } else { 5979 /* 5980 * Page not found or log sense is not 5981 * supported. 5982 * Notice we do not check the old style 5983 * START_STOP_CYCLE_VU_PAGE because this 5984 * code path does not apply to old disks. 5985 */ 5986 un->un_f_log_sense_supported = FALSE; 5987 un->un_f_pm_log_sense_smart = FALSE; 5988 } 5989 } 5990 5991 return; 5992 } 5993 5994 /* 5995 * For the disk whose attached HBA has not set the "pm-capable" 5996 * property, check if it supports the power management. 5997 */ 5998 if (!un->un_f_log_sense_supported) { 5999 un->un_power_level = SD_SPINDLE_ON; 6000 un->un_f_pm_is_enabled = FALSE; 6001 return; 6002 } 6003 6004 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 6005 6006 #ifdef SDDEBUG 6007 if (sd_force_pm_supported) { 6008 /* Force a successful result */ 6009 rval = 1; 6010 } 6011 #endif 6012 6013 /* 6014 * If the start-stop cycle counter log page is not supported 6015 * or if the pm-capable property is set to be false (0), 6016 * then we should not create the pm_components property. 6017 */ 6018 if (rval == -1) { 6019 /* 6020 * Error. 6021 * Reading log sense failed, most likely this is 6022 * an older drive that does not support log sense. 6023 * If this fails auto-pm is not supported. 6024 */ 6025 un->un_power_level = SD_SPINDLE_ON; 6026 un->un_f_pm_is_enabled = FALSE; 6027 6028 } else if (rval == 0) { 6029 /* 6030 * Page not found. 6031 * The start stop cycle counter is implemented as page 6032 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6033 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6034 */ 6035 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 6036 /* 6037 * Page found, use this one. 6038 */ 6039 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6040 un->un_f_pm_is_enabled = TRUE; 6041 } else { 6042 /* 6043 * Error or page not found. 6044 * auto-pm is not supported for this device. 6045 */ 6046 un->un_power_level = SD_SPINDLE_ON; 6047 un->un_f_pm_is_enabled = FALSE; 6048 } 6049 } else { 6050 /* 6051 * Page found, use it. 6052 */ 6053 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6054 un->un_f_pm_is_enabled = TRUE; 6055 } 6056 6057 6058 if (un->un_f_pm_is_enabled == TRUE) { 6059 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6060 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6061 6062 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6063 log_page_size, un->un_start_stop_cycle_page, 6064 0x01, 0, SD_PATH_DIRECT); 6065 6066 if (rval != 0) { 6067 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6068 } 6069 6070 #ifdef SDDEBUG 6071 if (sd_force_pm_supported) { 6072 /* Force a successful result */ 6073 rval = 0; 6074 } 6075 #endif 6076 6077 /* 6078 * If the Log sense for Page( Start/stop cycle counter page) 6079 * succeeds, then power management is supported and we can 6080 * enable auto-pm. 6081 */ 6082 if (rval == 0) { 6083 (void) sd_create_pm_components(devi, un); 6084 } else { 6085 un->un_power_level = SD_SPINDLE_ON; 6086 un->un_f_pm_is_enabled = FALSE; 6087 } 6088 6089 kmem_free(log_page_data, log_page_size); 6090 } 6091 } 6092 6093 6094 /* 6095 * Function: sd_create_pm_components 6096 * 6097 * Description: Initialize PM property. 6098 * 6099 * Context: Kernel thread context 6100 */ 6101 6102 static void 6103 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6104 { 6105 ASSERT(!mutex_owned(SD_MUTEX(un))); 6106 6107 if (un->un_f_power_condition_supported) { 6108 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6109 "pm-components", sd_pwr_pc.pm_comp, 5) 6110 != DDI_PROP_SUCCESS) { 6111 un->un_power_level = SD_SPINDLE_ACTIVE; 6112 un->un_f_pm_is_enabled = FALSE; 6113 return; 6114 } 6115 } else { 6116 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6117 "pm-components", sd_pwr_ss.pm_comp, 3) 6118 != DDI_PROP_SUCCESS) { 6119 un->un_power_level = SD_SPINDLE_ON; 6120 un->un_f_pm_is_enabled = FALSE; 6121 return; 6122 } 6123 } 6124 /* 6125 * When components are initially created they are idle, 6126 * power up any non-removables. 6127 * Note: the return value of pm_raise_power can't be used 6128 * for determining if PM should be enabled for this device. 6129 * Even if you check the return values and remove this 6130 * property created above, the PM framework will not honor the 6131 * change after the first call to pm_raise_power. Hence, 6132 * removal of that property does not help if pm_raise_power 6133 * fails. In the case of removable media, the start/stop 6134 * will fail if the media is not present. 6135 */ 6136 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 6137 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) { 6138 mutex_enter(SD_MUTEX(un)); 6139 un->un_power_level = SD_PM_STATE_ACTIVE(un); 6140 mutex_enter(&un->un_pm_mutex); 6141 /* Set to on and not busy. */ 6142 un->un_pm_count = 0; 6143 } else { 6144 mutex_enter(SD_MUTEX(un)); 6145 un->un_power_level = SD_PM_STATE_STOPPED(un); 6146 mutex_enter(&un->un_pm_mutex); 6147 /* Set to off. */ 6148 un->un_pm_count = -1; 6149 } 6150 mutex_exit(&un->un_pm_mutex); 6151 mutex_exit(SD_MUTEX(un)); 6152 } 6153 6154 6155 /* 6156 * Function: sd_ddi_suspend 6157 * 6158 * Description: Performs system power-down operations. This includes 6159 * setting the drive state to indicate its suspended so 6160 * that no new commands will be accepted. Also, wait for 6161 * all commands that are in transport or queued to a timer 6162 * for retry to complete. All timeout threads are cancelled. 6163 * 6164 * Return Code: DDI_FAILURE or DDI_SUCCESS 6165 * 6166 * Context: Kernel thread context 6167 */ 6168 6169 static int 6170 sd_ddi_suspend(dev_info_t *devi) 6171 { 6172 struct sd_lun *un; 6173 clock_t wait_cmds_complete; 6174 6175 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6176 if (un == NULL) { 6177 return (DDI_FAILURE); 6178 } 6179 6180 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6181 6182 mutex_enter(SD_MUTEX(un)); 6183 6184 /* Return success if the device is already suspended. */ 6185 if (un->un_state == SD_STATE_SUSPENDED) { 6186 mutex_exit(SD_MUTEX(un)); 6187 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6188 "device already suspended, exiting\n"); 6189 return (DDI_SUCCESS); 6190 } 6191 6192 /* Return failure if the device is being used by HA */ 6193 if (un->un_resvd_status & 6194 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6195 mutex_exit(SD_MUTEX(un)); 6196 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6197 "device in use by HA, exiting\n"); 6198 return (DDI_FAILURE); 6199 } 6200 6201 /* 6202 * Return failure if the device is in a resource wait 6203 * or power changing state. 6204 */ 6205 if ((un->un_state == SD_STATE_RWAIT) || 6206 (un->un_state == SD_STATE_PM_CHANGING)) { 6207 mutex_exit(SD_MUTEX(un)); 6208 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6209 "device in resource wait state, exiting\n"); 6210 return (DDI_FAILURE); 6211 } 6212 6213 6214 un->un_save_state = un->un_last_state; 6215 New_state(un, SD_STATE_SUSPENDED); 6216 6217 /* 6218 * Wait for all commands that are in transport or queued to a timer 6219 * for retry to complete. 6220 * 6221 * While waiting, no new commands will be accepted or sent because of 6222 * the new state we set above. 6223 * 6224 * Wait till current operation has completed. If we are in the resource 6225 * wait state (with an intr outstanding) then we need to wait till the 6226 * intr completes and starts the next cmd. We want to wait for 6227 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6228 */ 6229 wait_cmds_complete = ddi_get_lbolt() + 6230 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6231 6232 while (un->un_ncmds_in_transport != 0) { 6233 /* 6234 * Fail if commands do not finish in the specified time. 6235 */ 6236 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6237 wait_cmds_complete) == -1) { 6238 /* 6239 * Undo the state changes made above. Everything 6240 * must go back to it's original value. 6241 */ 6242 Restore_state(un); 6243 un->un_last_state = un->un_save_state; 6244 /* Wake up any threads that might be waiting. */ 6245 cv_broadcast(&un->un_suspend_cv); 6246 mutex_exit(SD_MUTEX(un)); 6247 SD_ERROR(SD_LOG_IO_PM, un, 6248 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6249 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6250 return (DDI_FAILURE); 6251 } 6252 } 6253 6254 /* 6255 * Cancel SCSI watch thread and timeouts, if any are active 6256 */ 6257 6258 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6259 opaque_t temp_token = un->un_swr_token; 6260 mutex_exit(SD_MUTEX(un)); 6261 scsi_watch_suspend(temp_token); 6262 mutex_enter(SD_MUTEX(un)); 6263 } 6264 6265 if (un->un_reset_throttle_timeid != NULL) { 6266 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6267 un->un_reset_throttle_timeid = NULL; 6268 mutex_exit(SD_MUTEX(un)); 6269 (void) untimeout(temp_id); 6270 mutex_enter(SD_MUTEX(un)); 6271 } 6272 6273 if (un->un_dcvb_timeid != NULL) { 6274 timeout_id_t temp_id = un->un_dcvb_timeid; 6275 un->un_dcvb_timeid = NULL; 6276 mutex_exit(SD_MUTEX(un)); 6277 (void) untimeout(temp_id); 6278 mutex_enter(SD_MUTEX(un)); 6279 } 6280 6281 mutex_enter(&un->un_pm_mutex); 6282 if (un->un_pm_timeid != NULL) { 6283 timeout_id_t temp_id = un->un_pm_timeid; 6284 un->un_pm_timeid = NULL; 6285 mutex_exit(&un->un_pm_mutex); 6286 mutex_exit(SD_MUTEX(un)); 6287 (void) untimeout(temp_id); 6288 mutex_enter(SD_MUTEX(un)); 6289 } else { 6290 mutex_exit(&un->un_pm_mutex); 6291 } 6292 6293 if (un->un_rmw_msg_timeid != NULL) { 6294 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6295 un->un_rmw_msg_timeid = NULL; 6296 mutex_exit(SD_MUTEX(un)); 6297 (void) untimeout(temp_id); 6298 mutex_enter(SD_MUTEX(un)); 6299 } 6300 6301 if (un->un_retry_timeid != NULL) { 6302 timeout_id_t temp_id = un->un_retry_timeid; 6303 un->un_retry_timeid = NULL; 6304 mutex_exit(SD_MUTEX(un)); 6305 (void) untimeout(temp_id); 6306 mutex_enter(SD_MUTEX(un)); 6307 6308 if (un->un_retry_bp != NULL) { 6309 un->un_retry_bp->av_forw = un->un_waitq_headp; 6310 un->un_waitq_headp = un->un_retry_bp; 6311 if (un->un_waitq_tailp == NULL) { 6312 un->un_waitq_tailp = un->un_retry_bp; 6313 } 6314 un->un_retry_bp = NULL; 6315 un->un_retry_statp = NULL; 6316 } 6317 } 6318 6319 if (un->un_direct_priority_timeid != NULL) { 6320 timeout_id_t temp_id = un->un_direct_priority_timeid; 6321 un->un_direct_priority_timeid = NULL; 6322 mutex_exit(SD_MUTEX(un)); 6323 (void) untimeout(temp_id); 6324 mutex_enter(SD_MUTEX(un)); 6325 } 6326 6327 if (un->un_f_is_fibre == TRUE) { 6328 /* 6329 * Remove callbacks for insert and remove events 6330 */ 6331 if (un->un_insert_event != NULL) { 6332 mutex_exit(SD_MUTEX(un)); 6333 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6334 mutex_enter(SD_MUTEX(un)); 6335 un->un_insert_event = NULL; 6336 } 6337 6338 if (un->un_remove_event != NULL) { 6339 mutex_exit(SD_MUTEX(un)); 6340 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6341 mutex_enter(SD_MUTEX(un)); 6342 un->un_remove_event = NULL; 6343 } 6344 } 6345 6346 mutex_exit(SD_MUTEX(un)); 6347 6348 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6349 6350 return (DDI_SUCCESS); 6351 } 6352 6353 6354 /* 6355 * Function: sd_ddi_resume 6356 * 6357 * Description: Performs system power-up operations.. 6358 * 6359 * Return Code: DDI_SUCCESS 6360 * DDI_FAILURE 6361 * 6362 * Context: Kernel thread context 6363 */ 6364 6365 static int 6366 sd_ddi_resume(dev_info_t *devi) 6367 { 6368 struct sd_lun *un; 6369 6370 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6371 if (un == NULL) { 6372 return (DDI_FAILURE); 6373 } 6374 6375 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6376 6377 mutex_enter(SD_MUTEX(un)); 6378 Restore_state(un); 6379 6380 /* 6381 * Restore the state which was saved to give the 6382 * the right state in un_last_state 6383 */ 6384 un->un_last_state = un->un_save_state; 6385 /* 6386 * Note: throttle comes back at full. 6387 * Also note: this MUST be done before calling pm_raise_power 6388 * otherwise the system can get hung in biowait. The scenario where 6389 * this'll happen is under cpr suspend. Writing of the system 6390 * state goes through sddump, which writes 0 to un_throttle. If 6391 * writing the system state then fails, example if the partition is 6392 * too small, then cpr attempts a resume. If throttle isn't restored 6393 * from the saved value until after calling pm_raise_power then 6394 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6395 * in biowait. 6396 */ 6397 un->un_throttle = un->un_saved_throttle; 6398 6399 /* 6400 * The chance of failure is very rare as the only command done in power 6401 * entry point is START command when you transition from 0->1 or 6402 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6403 * which suspend was done. Ignore the return value as the resume should 6404 * not be failed. In the case of removable media the media need not be 6405 * inserted and hence there is a chance that raise power will fail with 6406 * media not present. 6407 */ 6408 if (un->un_f_attach_spinup) { 6409 mutex_exit(SD_MUTEX(un)); 6410 (void) pm_raise_power(SD_DEVINFO(un), 0, 6411 SD_PM_STATE_ACTIVE(un)); 6412 mutex_enter(SD_MUTEX(un)); 6413 } 6414 6415 /* 6416 * Don't broadcast to the suspend cv and therefore possibly 6417 * start I/O until after power has been restored. 6418 */ 6419 cv_broadcast(&un->un_suspend_cv); 6420 cv_broadcast(&un->un_state_cv); 6421 6422 /* restart thread */ 6423 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6424 scsi_watch_resume(un->un_swr_token); 6425 } 6426 6427 #if (defined(__fibre)) 6428 if (un->un_f_is_fibre == TRUE) { 6429 /* 6430 * Add callbacks for insert and remove events 6431 */ 6432 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6433 sd_init_event_callbacks(un); 6434 } 6435 } 6436 #endif 6437 6438 /* 6439 * Transport any pending commands to the target. 6440 * 6441 * If this is a low-activity device commands in queue will have to wait 6442 * until new commands come in, which may take awhile. Also, we 6443 * specifically don't check un_ncmds_in_transport because we know that 6444 * there really are no commands in progress after the unit was 6445 * suspended and we could have reached the throttle level, been 6446 * suspended, and have no new commands coming in for awhile. Highly 6447 * unlikely, but so is the low-activity disk scenario. 6448 */ 6449 ddi_xbuf_dispatch(un->un_xbuf_attr); 6450 6451 sd_start_cmds(un, NULL); 6452 mutex_exit(SD_MUTEX(un)); 6453 6454 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6455 6456 return (DDI_SUCCESS); 6457 } 6458 6459 6460 /* 6461 * Function: sd_pm_state_change 6462 * 6463 * Description: Change the driver power state. 6464 * Someone else is required to actually change the driver 6465 * power level. 6466 * 6467 * Arguments: un - driver soft state (unit) structure 6468 * level - the power level that is changed to 6469 * flag - to decide how to change the power state 6470 * 6471 * Return Code: DDI_SUCCESS 6472 * 6473 * Context: Kernel thread context 6474 */ 6475 static int 6476 sd_pm_state_change(struct sd_lun *un, int level, int flag) 6477 { 6478 ASSERT(un != NULL); 6479 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n"); 6480 6481 ASSERT(!mutex_owned(SD_MUTEX(un))); 6482 mutex_enter(SD_MUTEX(un)); 6483 6484 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) { 6485 un->un_power_level = level; 6486 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6487 mutex_enter(&un->un_pm_mutex); 6488 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6489 un->un_pm_count++; 6490 ASSERT(un->un_pm_count == 0); 6491 } 6492 mutex_exit(&un->un_pm_mutex); 6493 } else { 6494 /* 6495 * Exit if power management is not enabled for this device, 6496 * or if the device is being used by HA. 6497 */ 6498 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6499 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6500 mutex_exit(SD_MUTEX(un)); 6501 SD_TRACE(SD_LOG_POWER, un, 6502 "sd_pm_state_change: exiting\n"); 6503 return (DDI_FAILURE); 6504 } 6505 6506 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: " 6507 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver); 6508 6509 /* 6510 * See if the device is not busy, ie.: 6511 * - we have no commands in the driver for this device 6512 * - not waiting for resources 6513 */ 6514 if ((un->un_ncmds_in_driver == 0) && 6515 (un->un_state != SD_STATE_RWAIT)) { 6516 /* 6517 * The device is not busy, so it is OK to go to low 6518 * power state. Indicate low power, but rely on someone 6519 * else to actually change it. 6520 */ 6521 mutex_enter(&un->un_pm_mutex); 6522 un->un_pm_count = -1; 6523 mutex_exit(&un->un_pm_mutex); 6524 un->un_power_level = level; 6525 } 6526 } 6527 6528 mutex_exit(SD_MUTEX(un)); 6529 6530 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n"); 6531 6532 return (DDI_SUCCESS); 6533 } 6534 6535 6536 /* 6537 * Function: sd_pm_idletimeout_handler 6538 * 6539 * Description: A timer routine that's active only while a device is busy. 6540 * The purpose is to extend slightly the pm framework's busy 6541 * view of the device to prevent busy/idle thrashing for 6542 * back-to-back commands. Do this by comparing the current time 6543 * to the time at which the last command completed and when the 6544 * difference is greater than sd_pm_idletime, call 6545 * pm_idle_component. In addition to indicating idle to the pm 6546 * framework, update the chain type to again use the internal pm 6547 * layers of the driver. 6548 * 6549 * Arguments: arg - driver soft state (unit) structure 6550 * 6551 * Context: Executes in a timeout(9F) thread context 6552 */ 6553 6554 static void 6555 sd_pm_idletimeout_handler(void *arg) 6556 { 6557 const hrtime_t idletime = sd_pm_idletime * NANOSEC; 6558 struct sd_lun *un = arg; 6559 6560 mutex_enter(&sd_detach_mutex); 6561 if (un->un_detach_count != 0) { 6562 /* Abort if the instance is detaching */ 6563 mutex_exit(&sd_detach_mutex); 6564 return; 6565 } 6566 mutex_exit(&sd_detach_mutex); 6567 6568 /* 6569 * Grab both mutexes, in the proper order, since we're accessing 6570 * both PM and softstate variables. 6571 */ 6572 mutex_enter(SD_MUTEX(un)); 6573 mutex_enter(&un->un_pm_mutex); 6574 if (((gethrtime() - un->un_pm_idle_time) > idletime) && 6575 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6576 /* 6577 * Update the chain types. 6578 * This takes affect on the next new command received. 6579 */ 6580 if (un->un_f_non_devbsize_supported) { 6581 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6582 } else { 6583 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6584 } 6585 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6586 6587 SD_TRACE(SD_LOG_IO_PM, un, 6588 "sd_pm_idletimeout_handler: idling device\n"); 6589 (void) pm_idle_component(SD_DEVINFO(un), 0); 6590 un->un_pm_idle_timeid = NULL; 6591 } else { 6592 un->un_pm_idle_timeid = 6593 timeout(sd_pm_idletimeout_handler, un, 6594 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6595 } 6596 mutex_exit(&un->un_pm_mutex); 6597 mutex_exit(SD_MUTEX(un)); 6598 } 6599 6600 6601 /* 6602 * Function: sd_pm_timeout_handler 6603 * 6604 * Description: Callback to tell framework we are idle. 6605 * 6606 * Context: timeout(9f) thread context. 6607 */ 6608 6609 static void 6610 sd_pm_timeout_handler(void *arg) 6611 { 6612 struct sd_lun *un = arg; 6613 6614 (void) pm_idle_component(SD_DEVINFO(un), 0); 6615 mutex_enter(&un->un_pm_mutex); 6616 un->un_pm_timeid = NULL; 6617 mutex_exit(&un->un_pm_mutex); 6618 } 6619 6620 6621 /* 6622 * Function: sdpower 6623 * 6624 * Description: PM entry point. 6625 * 6626 * Return Code: DDI_SUCCESS 6627 * DDI_FAILURE 6628 * 6629 * Context: Kernel thread context 6630 */ 6631 6632 static int 6633 sdpower(dev_info_t *devi, int component, int level) 6634 { 6635 struct sd_lun *un; 6636 int instance; 6637 int rval = DDI_SUCCESS; 6638 uint_t i, log_page_size, maxcycles, ncycles; 6639 uchar_t *log_page_data; 6640 int log_sense_page; 6641 int medium_present; 6642 time_t intvlp; 6643 struct pm_trans_data sd_pm_tran_data; 6644 uchar_t save_state = SD_STATE_NORMAL; 6645 int sval; 6646 uchar_t state_before_pm; 6647 int got_semaphore_here; 6648 sd_ssc_t *ssc; 6649 int last_power_level = SD_SPINDLE_UNINIT; 6650 6651 instance = ddi_get_instance(devi); 6652 6653 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6654 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) { 6655 return (DDI_FAILURE); 6656 } 6657 6658 ssc = sd_ssc_init(un); 6659 6660 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6661 6662 /* 6663 * Must synchronize power down with close. 6664 * Attempt to decrement/acquire the open/close semaphore, 6665 * but do NOT wait on it. If it's not greater than zero, 6666 * ie. it can't be decremented without waiting, then 6667 * someone else, either open or close, already has it 6668 * and the try returns 0. Use that knowledge here to determine 6669 * if it's OK to change the device power level. 6670 * Also, only increment it on exit if it was decremented, ie. gotten, 6671 * here. 6672 */ 6673 got_semaphore_here = sema_tryp(&un->un_semoclose); 6674 6675 mutex_enter(SD_MUTEX(un)); 6676 6677 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6678 un->un_ncmds_in_driver); 6679 6680 /* 6681 * If un_ncmds_in_driver is non-zero it indicates commands are 6682 * already being processed in the driver, or if the semaphore was 6683 * not gotten here it indicates an open or close is being processed. 6684 * At the same time somebody is requesting to go to a lower power 6685 * that can't perform I/O, which can't happen, therefore we need to 6686 * return failure. 6687 */ 6688 if ((!SD_PM_IS_IO_CAPABLE(un, level)) && 6689 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6690 mutex_exit(SD_MUTEX(un)); 6691 6692 if (got_semaphore_here != 0) { 6693 sema_v(&un->un_semoclose); 6694 } 6695 SD_TRACE(SD_LOG_IO_PM, un, 6696 "sdpower: exit, device has queued cmds.\n"); 6697 6698 goto sdpower_failed; 6699 } 6700 6701 /* 6702 * if it is OFFLINE that means the disk is completely dead 6703 * in our case we have to put the disk in on or off by sending commands 6704 * Of course that will fail anyway so return back here. 6705 * 6706 * Power changes to a device that's OFFLINE or SUSPENDED 6707 * are not allowed. 6708 */ 6709 if ((un->un_state == SD_STATE_OFFLINE) || 6710 (un->un_state == SD_STATE_SUSPENDED)) { 6711 mutex_exit(SD_MUTEX(un)); 6712 6713 if (got_semaphore_here != 0) { 6714 sema_v(&un->un_semoclose); 6715 } 6716 SD_TRACE(SD_LOG_IO_PM, un, 6717 "sdpower: exit, device is off-line.\n"); 6718 6719 goto sdpower_failed; 6720 } 6721 6722 /* 6723 * Change the device's state to indicate it's power level 6724 * is being changed. Do this to prevent a power off in the 6725 * middle of commands, which is especially bad on devices 6726 * that are really powered off instead of just spun down. 6727 */ 6728 state_before_pm = un->un_state; 6729 un->un_state = SD_STATE_PM_CHANGING; 6730 6731 mutex_exit(SD_MUTEX(un)); 6732 6733 /* 6734 * If log sense command is not supported, bypass the 6735 * following checking, otherwise, check the log sense 6736 * information for this device. 6737 */ 6738 if (SD_PM_STOP_MOTOR_NEEDED(un, level) && 6739 un->un_f_log_sense_supported) { 6740 /* 6741 * Get the log sense information to understand whether the 6742 * the powercycle counts have gone beyond the threshhold. 6743 */ 6744 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6745 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6746 6747 mutex_enter(SD_MUTEX(un)); 6748 log_sense_page = un->un_start_stop_cycle_page; 6749 mutex_exit(SD_MUTEX(un)); 6750 6751 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6752 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6753 6754 if (rval != 0) { 6755 if (rval == EIO) 6756 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6757 else 6758 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6759 } 6760 6761 #ifdef SDDEBUG 6762 if (sd_force_pm_supported) { 6763 /* Force a successful result */ 6764 rval = 0; 6765 } 6766 #endif 6767 if (rval != 0) { 6768 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6769 "Log Sense Failed\n"); 6770 6771 kmem_free(log_page_data, log_page_size); 6772 /* Cannot support power management on those drives */ 6773 6774 if (got_semaphore_here != 0) { 6775 sema_v(&un->un_semoclose); 6776 } 6777 /* 6778 * On exit put the state back to it's original value 6779 * and broadcast to anyone waiting for the power 6780 * change completion. 6781 */ 6782 mutex_enter(SD_MUTEX(un)); 6783 un->un_state = state_before_pm; 6784 cv_broadcast(&un->un_suspend_cv); 6785 mutex_exit(SD_MUTEX(un)); 6786 SD_TRACE(SD_LOG_IO_PM, un, 6787 "sdpower: exit, Log Sense Failed.\n"); 6788 6789 goto sdpower_failed; 6790 } 6791 6792 /* 6793 * From the page data - Convert the essential information to 6794 * pm_trans_data 6795 */ 6796 maxcycles = 6797 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6798 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6799 6800 ncycles = 6801 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6802 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6803 6804 if (un->un_f_pm_log_sense_smart) { 6805 sd_pm_tran_data.un.smart_count.allowed = maxcycles; 6806 sd_pm_tran_data.un.smart_count.consumed = ncycles; 6807 sd_pm_tran_data.un.smart_count.flag = 0; 6808 sd_pm_tran_data.format = DC_SMART_FORMAT; 6809 } else { 6810 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6811 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6812 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6813 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6814 log_page_data[8+i]; 6815 } 6816 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6817 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6818 } 6819 6820 kmem_free(log_page_data, log_page_size); 6821 6822 /* 6823 * Call pm_trans_check routine to get the Ok from 6824 * the global policy 6825 */ 6826 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6827 #ifdef SDDEBUG 6828 if (sd_force_pm_supported) { 6829 /* Force a successful result */ 6830 rval = 1; 6831 } 6832 #endif 6833 switch (rval) { 6834 case 0: 6835 /* 6836 * Not Ok to Power cycle or error in parameters passed 6837 * Would have given the advised time to consider power 6838 * cycle. Based on the new intvlp parameter we are 6839 * supposed to pretend we are busy so that pm framework 6840 * will never call our power entry point. Because of 6841 * that install a timeout handler and wait for the 6842 * recommended time to elapse so that power management 6843 * can be effective again. 6844 * 6845 * To effect this behavior, call pm_busy_component to 6846 * indicate to the framework this device is busy. 6847 * By not adjusting un_pm_count the rest of PM in 6848 * the driver will function normally, and independent 6849 * of this but because the framework is told the device 6850 * is busy it won't attempt powering down until it gets 6851 * a matching idle. The timeout handler sends this. 6852 * Note: sd_pm_entry can't be called here to do this 6853 * because sdpower may have been called as a result 6854 * of a call to pm_raise_power from within sd_pm_entry. 6855 * 6856 * If a timeout handler is already active then 6857 * don't install another. 6858 */ 6859 mutex_enter(&un->un_pm_mutex); 6860 if (un->un_pm_timeid == NULL) { 6861 un->un_pm_timeid = 6862 timeout(sd_pm_timeout_handler, 6863 un, intvlp * drv_usectohz(1000000)); 6864 mutex_exit(&un->un_pm_mutex); 6865 (void) pm_busy_component(SD_DEVINFO(un), 0); 6866 } else { 6867 mutex_exit(&un->un_pm_mutex); 6868 } 6869 if (got_semaphore_here != 0) { 6870 sema_v(&un->un_semoclose); 6871 } 6872 /* 6873 * On exit put the state back to it's original value 6874 * and broadcast to anyone waiting for the power 6875 * change completion. 6876 */ 6877 mutex_enter(SD_MUTEX(un)); 6878 un->un_state = state_before_pm; 6879 cv_broadcast(&un->un_suspend_cv); 6880 mutex_exit(SD_MUTEX(un)); 6881 6882 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6883 "trans check Failed, not ok to power cycle.\n"); 6884 6885 goto sdpower_failed; 6886 case -1: 6887 if (got_semaphore_here != 0) { 6888 sema_v(&un->un_semoclose); 6889 } 6890 /* 6891 * On exit put the state back to it's original value 6892 * and broadcast to anyone waiting for the power 6893 * change completion. 6894 */ 6895 mutex_enter(SD_MUTEX(un)); 6896 un->un_state = state_before_pm; 6897 cv_broadcast(&un->un_suspend_cv); 6898 mutex_exit(SD_MUTEX(un)); 6899 SD_TRACE(SD_LOG_IO_PM, un, 6900 "sdpower: exit, trans check command Failed.\n"); 6901 6902 goto sdpower_failed; 6903 } 6904 } 6905 6906 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6907 /* 6908 * Save the last state... if the STOP FAILS we need it 6909 * for restoring 6910 */ 6911 mutex_enter(SD_MUTEX(un)); 6912 save_state = un->un_last_state; 6913 last_power_level = un->un_power_level; 6914 /* 6915 * There must not be any cmds. getting processed 6916 * in the driver when we get here. Power to the 6917 * device is potentially going off. 6918 */ 6919 ASSERT(un->un_ncmds_in_driver == 0); 6920 mutex_exit(SD_MUTEX(un)); 6921 6922 /* 6923 * For now PM suspend the device completely before spindle is 6924 * turned off 6925 */ 6926 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE)) 6927 == DDI_FAILURE) { 6928 if (got_semaphore_here != 0) { 6929 sema_v(&un->un_semoclose); 6930 } 6931 /* 6932 * On exit put the state back to it's original value 6933 * and broadcast to anyone waiting for the power 6934 * change completion. 6935 */ 6936 mutex_enter(SD_MUTEX(un)); 6937 un->un_state = state_before_pm; 6938 un->un_power_level = last_power_level; 6939 cv_broadcast(&un->un_suspend_cv); 6940 mutex_exit(SD_MUTEX(un)); 6941 SD_TRACE(SD_LOG_IO_PM, un, 6942 "sdpower: exit, PM suspend Failed.\n"); 6943 6944 goto sdpower_failed; 6945 } 6946 } 6947 6948 /* 6949 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6950 * close, or strategy. Dump no long uses this routine, it uses it's 6951 * own code so it can be done in polled mode. 6952 */ 6953 6954 medium_present = TRUE; 6955 6956 /* 6957 * When powering up, issue a TUR in case the device is at unit 6958 * attention. Don't do retries. Bypass the PM layer, otherwise 6959 * a deadlock on un_pm_busy_cv will occur. 6960 */ 6961 if (SD_PM_IS_IO_CAPABLE(un, level)) { 6962 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6963 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6964 if (sval != 0) 6965 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6966 } 6967 6968 if (un->un_f_power_condition_supported) { 6969 char *pm_condition_name[] = {"STOPPED", "STANDBY", 6970 "IDLE", "ACTIVE"}; 6971 SD_TRACE(SD_LOG_IO_PM, un, 6972 "sdpower: sending \'%s\' power condition", 6973 pm_condition_name[level]); 6974 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 6975 sd_pl2pc[level], SD_PATH_DIRECT); 6976 } else { 6977 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6978 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6979 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 6980 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : 6981 SD_TARGET_STOP), SD_PATH_DIRECT); 6982 } 6983 if (sval != 0) { 6984 if (sval == EIO) 6985 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6986 else 6987 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6988 } 6989 6990 /* Command failed, check for media present. */ 6991 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6992 medium_present = FALSE; 6993 } 6994 6995 /* 6996 * The conditions of interest here are: 6997 * if a spindle off with media present fails, 6998 * then restore the state and return an error. 6999 * else if a spindle on fails, 7000 * then return an error (there's no state to restore). 7001 * In all other cases we setup for the new state 7002 * and return success. 7003 */ 7004 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 7005 if ((medium_present == TRUE) && (sval != 0)) { 7006 /* The stop command from above failed */ 7007 rval = DDI_FAILURE; 7008 /* 7009 * The stop command failed, and we have media 7010 * present. Put the level back by calling the 7011 * sd_pm_resume() and set the state back to 7012 * it's previous value. 7013 */ 7014 (void) sd_pm_state_change(un, last_power_level, 7015 SD_PM_STATE_ROLLBACK); 7016 mutex_enter(SD_MUTEX(un)); 7017 un->un_last_state = save_state; 7018 mutex_exit(SD_MUTEX(un)); 7019 } else if (un->un_f_monitor_media_state) { 7020 /* 7021 * The stop command from above succeeded. 7022 * Terminate watch thread in case of removable media 7023 * devices going into low power state. This is as per 7024 * the requirements of pm framework, otherwise commands 7025 * will be generated for the device (through watch 7026 * thread), even when the device is in low power state. 7027 */ 7028 mutex_enter(SD_MUTEX(un)); 7029 un->un_f_watcht_stopped = FALSE; 7030 if (un->un_swr_token != NULL) { 7031 opaque_t temp_token = un->un_swr_token; 7032 un->un_f_watcht_stopped = TRUE; 7033 un->un_swr_token = NULL; 7034 mutex_exit(SD_MUTEX(un)); 7035 (void) scsi_watch_request_terminate(temp_token, 7036 SCSI_WATCH_TERMINATE_ALL_WAIT); 7037 } else { 7038 mutex_exit(SD_MUTEX(un)); 7039 } 7040 } 7041 } else { 7042 /* 7043 * The level requested is I/O capable. 7044 * Legacy behavior: return success on a failed spinup 7045 * if there is no media in the drive. 7046 * Do this by looking at medium_present here. 7047 */ 7048 if ((sval != 0) && medium_present) { 7049 /* The start command from above failed */ 7050 rval = DDI_FAILURE; 7051 } else { 7052 /* 7053 * The start command from above succeeded 7054 * PM resume the devices now that we have 7055 * started the disks 7056 */ 7057 (void) sd_pm_state_change(un, level, 7058 SD_PM_STATE_CHANGE); 7059 7060 /* 7061 * Resume the watch thread since it was suspended 7062 * when the device went into low power mode. 7063 */ 7064 if (un->un_f_monitor_media_state) { 7065 mutex_enter(SD_MUTEX(un)); 7066 if (un->un_f_watcht_stopped == TRUE) { 7067 opaque_t temp_token; 7068 7069 un->un_f_watcht_stopped = FALSE; 7070 mutex_exit(SD_MUTEX(un)); 7071 temp_token = 7072 sd_watch_request_submit(un); 7073 mutex_enter(SD_MUTEX(un)); 7074 un->un_swr_token = temp_token; 7075 } 7076 mutex_exit(SD_MUTEX(un)); 7077 } 7078 } 7079 } 7080 7081 if (got_semaphore_here != 0) { 7082 sema_v(&un->un_semoclose); 7083 } 7084 /* 7085 * On exit put the state back to it's original value 7086 * and broadcast to anyone waiting for the power 7087 * change completion. 7088 */ 7089 mutex_enter(SD_MUTEX(un)); 7090 un->un_state = state_before_pm; 7091 cv_broadcast(&un->un_suspend_cv); 7092 mutex_exit(SD_MUTEX(un)); 7093 7094 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7095 7096 sd_ssc_fini(ssc); 7097 return (rval); 7098 7099 sdpower_failed: 7100 7101 sd_ssc_fini(ssc); 7102 return (DDI_FAILURE); 7103 } 7104 7105 7106 7107 /* 7108 * Function: sdattach 7109 * 7110 * Description: Driver's attach(9e) entry point function. 7111 * 7112 * Arguments: devi - opaque device info handle 7113 * cmd - attach type 7114 * 7115 * Return Code: DDI_SUCCESS 7116 * DDI_FAILURE 7117 * 7118 * Context: Kernel thread context 7119 */ 7120 7121 static int 7122 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7123 { 7124 switch (cmd) { 7125 case DDI_ATTACH: 7126 return (sd_unit_attach(devi)); 7127 case DDI_RESUME: 7128 return (sd_ddi_resume(devi)); 7129 default: 7130 break; 7131 } 7132 return (DDI_FAILURE); 7133 } 7134 7135 7136 /* 7137 * Function: sddetach 7138 * 7139 * Description: Driver's detach(9E) entry point function. 7140 * 7141 * Arguments: devi - opaque device info handle 7142 * cmd - detach type 7143 * 7144 * Return Code: DDI_SUCCESS 7145 * DDI_FAILURE 7146 * 7147 * Context: Kernel thread context 7148 */ 7149 7150 static int 7151 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7152 { 7153 switch (cmd) { 7154 case DDI_DETACH: 7155 return (sd_unit_detach(devi)); 7156 case DDI_SUSPEND: 7157 return (sd_ddi_suspend(devi)); 7158 default: 7159 break; 7160 } 7161 return (DDI_FAILURE); 7162 } 7163 7164 7165 /* 7166 * Function: sd_sync_with_callback 7167 * 7168 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7169 * state while the callback routine is active. 7170 * 7171 * Arguments: un: softstate structure for the instance 7172 * 7173 * Context: Kernel thread context 7174 */ 7175 7176 static void 7177 sd_sync_with_callback(struct sd_lun *un) 7178 { 7179 ASSERT(un != NULL); 7180 7181 mutex_enter(SD_MUTEX(un)); 7182 7183 ASSERT(un->un_in_callback >= 0); 7184 7185 while (un->un_in_callback > 0) { 7186 mutex_exit(SD_MUTEX(un)); 7187 delay(2); 7188 mutex_enter(SD_MUTEX(un)); 7189 } 7190 7191 mutex_exit(SD_MUTEX(un)); 7192 } 7193 7194 /* 7195 * Function: sd_unit_attach 7196 * 7197 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7198 * the soft state structure for the device and performs 7199 * all necessary structure and device initializations. 7200 * 7201 * Arguments: devi: the system's dev_info_t for the device. 7202 * 7203 * Return Code: DDI_SUCCESS if attach is successful. 7204 * DDI_FAILURE if any part of the attach fails. 7205 * 7206 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7207 * Kernel thread context only. Can sleep. 7208 */ 7209 7210 static int 7211 sd_unit_attach(dev_info_t *devi) 7212 { 7213 struct scsi_device *devp; 7214 struct sd_lun *un; 7215 char *variantp; 7216 char name_str[48]; 7217 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7218 int instance; 7219 int rval; 7220 int wc_enabled; 7221 int wc_changeable; 7222 int tgt; 7223 uint64_t capacity; 7224 uint_t lbasize = 0; 7225 dev_info_t *pdip = ddi_get_parent(devi); 7226 int offbyone = 0; 7227 int geom_label_valid = 0; 7228 sd_ssc_t *ssc; 7229 int status; 7230 struct sd_fm_internal *sfip = NULL; 7231 int max_xfer_size; 7232 7233 /* 7234 * Retrieve the target driver's private data area. This was set 7235 * up by the HBA. 7236 */ 7237 devp = ddi_get_driver_private(devi); 7238 7239 /* 7240 * Retrieve the target ID of the device. 7241 */ 7242 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7243 SCSI_ADDR_PROP_TARGET, -1); 7244 7245 /* 7246 * Since we have no idea what state things were left in by the last 7247 * user of the device, set up some 'default' settings, ie. turn 'em 7248 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7249 * Do this before the scsi_probe, which sends an inquiry. 7250 * This is a fix for bug (4430280). 7251 * Of special importance is wide-xfer. The drive could have been left 7252 * in wide transfer mode by the last driver to communicate with it, 7253 * this includes us. If that's the case, and if the following is not 7254 * setup properly or we don't re-negotiate with the drive prior to 7255 * transferring data to/from the drive, it causes bus parity errors, 7256 * data overruns, and unexpected interrupts. This first occurred when 7257 * the fix for bug (4378686) was made. 7258 */ 7259 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7260 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7261 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7262 7263 /* 7264 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7265 * on a target. Setting it per lun instance actually sets the 7266 * capability of this target, which affects those luns already 7267 * attached on the same target. So during attach, we can only disable 7268 * this capability only when no other lun has been attached on this 7269 * target. By doing this, we assume a target has the same tagged-qing 7270 * capability for every lun. The condition can be removed when HBA 7271 * is changed to support per lun based tagged-qing capability. 7272 */ 7273 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7274 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7275 } 7276 7277 /* 7278 * Use scsi_probe() to issue an INQUIRY command to the device. 7279 * This call will allocate and fill in the scsi_inquiry structure 7280 * and point the sd_inq member of the scsi_device structure to it. 7281 * If the attach succeeds, then this memory will not be de-allocated 7282 * (via scsi_unprobe()) until the instance is detached. 7283 */ 7284 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7285 goto probe_failed; 7286 } 7287 7288 /* 7289 * Check the device type as specified in the inquiry data and 7290 * claim it if it is of a type that we support. 7291 */ 7292 switch (devp->sd_inq->inq_dtype) { 7293 case DTYPE_DIRECT: 7294 break; 7295 case DTYPE_RODIRECT: 7296 break; 7297 case DTYPE_OPTICAL: 7298 break; 7299 case DTYPE_NOTPRESENT: 7300 default: 7301 /* Unsupported device type; fail the attach. */ 7302 goto probe_failed; 7303 } 7304 7305 /* 7306 * Allocate the soft state structure for this unit. 7307 * 7308 * We rely upon this memory being set to all zeroes by 7309 * ddi_soft_state_zalloc(). We assume that any member of the 7310 * soft state structure that is not explicitly initialized by 7311 * this routine will have a value of zero. 7312 */ 7313 instance = ddi_get_instance(devp->sd_dev); 7314 #ifndef XPV_HVM_DRIVER 7315 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7316 goto probe_failed; 7317 } 7318 #endif /* !XPV_HVM_DRIVER */ 7319 7320 /* 7321 * Retrieve a pointer to the newly-allocated soft state. 7322 * 7323 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7324 * was successful, unless something has gone horribly wrong and the 7325 * ddi's soft state internals are corrupt (in which case it is 7326 * probably better to halt here than just fail the attach....) 7327 */ 7328 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7329 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7330 instance); 7331 /*NOTREACHED*/ 7332 } 7333 7334 /* 7335 * Link the back ptr of the driver soft state to the scsi_device 7336 * struct for this lun. 7337 * Save a pointer to the softstate in the driver-private area of 7338 * the scsi_device struct. 7339 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7340 * we first set un->un_sd below. 7341 */ 7342 un->un_sd = devp; 7343 devp->sd_private = (opaque_t)un; 7344 7345 /* 7346 * The following must be after devp is stored in the soft state struct. 7347 */ 7348 #ifdef SDDEBUG 7349 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7350 "%s_unit_attach: un:0x%p instance:%d\n", 7351 ddi_driver_name(devi), un, instance); 7352 #endif 7353 7354 /* 7355 * Set up the device type and node type (for the minor nodes). 7356 * By default we assume that the device can at least support the 7357 * Common Command Set. Call it a CD-ROM if it reports itself 7358 * as a RODIRECT device. 7359 */ 7360 switch (devp->sd_inq->inq_dtype) { 7361 case DTYPE_RODIRECT: 7362 un->un_node_type = DDI_NT_CD_CHAN; 7363 un->un_ctype = CTYPE_CDROM; 7364 break; 7365 case DTYPE_OPTICAL: 7366 un->un_node_type = DDI_NT_BLOCK_CHAN; 7367 un->un_ctype = CTYPE_ROD; 7368 break; 7369 default: 7370 un->un_node_type = DDI_NT_BLOCK_CHAN; 7371 un->un_ctype = CTYPE_CCS; 7372 break; 7373 } 7374 7375 /* 7376 * Try to read the interconnect type from the HBA. 7377 * 7378 * Note: This driver is currently compiled as two binaries, a parallel 7379 * scsi version (sd) and a fibre channel version (ssd). All functional 7380 * differences are determined at compile time. In the future a single 7381 * binary will be provided and the interconnect type will be used to 7382 * differentiate between fibre and parallel scsi behaviors. At that time 7383 * it will be necessary for all fibre channel HBAs to support this 7384 * property. 7385 * 7386 * set un_f_is_fiber to TRUE ( default fiber ) 7387 */ 7388 un->un_f_is_fibre = TRUE; 7389 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7390 case INTERCONNECT_SSA: 7391 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7392 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7393 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7394 break; 7395 case INTERCONNECT_PARALLEL: 7396 un->un_f_is_fibre = FALSE; 7397 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7398 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7399 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7400 break; 7401 case INTERCONNECT_SAS: 7402 un->un_f_is_fibre = FALSE; 7403 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7404 un->un_node_type = DDI_NT_BLOCK_SAS; 7405 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7406 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7407 break; 7408 case INTERCONNECT_SATA: 7409 un->un_f_is_fibre = FALSE; 7410 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7411 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7412 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7413 break; 7414 case INTERCONNECT_FIBRE: 7415 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7416 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7417 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7418 break; 7419 case INTERCONNECT_FABRIC: 7420 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7421 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7422 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7423 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7424 break; 7425 default: 7426 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7427 /* 7428 * The HBA does not support the "interconnect-type" property 7429 * (or did not provide a recognized type). 7430 * 7431 * Note: This will be obsoleted when a single fibre channel 7432 * and parallel scsi driver is delivered. In the meantime the 7433 * interconnect type will be set to the platform default.If that 7434 * type is not parallel SCSI, it means that we should be 7435 * assuming "ssd" semantics. However, here this also means that 7436 * the FC HBA is not supporting the "interconnect-type" property 7437 * like we expect it to, so log this occurrence. 7438 */ 7439 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7440 if (!SD_IS_PARALLEL_SCSI(un)) { 7441 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7442 "sd_unit_attach: un:0x%p Assuming " 7443 "INTERCONNECT_FIBRE\n", un); 7444 } else { 7445 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7446 "sd_unit_attach: un:0x%p Assuming " 7447 "INTERCONNECT_PARALLEL\n", un); 7448 un->un_f_is_fibre = FALSE; 7449 } 7450 #else 7451 /* 7452 * Note: This source will be implemented when a single fibre 7453 * channel and parallel scsi driver is delivered. The default 7454 * will be to assume that if a device does not support the 7455 * "interconnect-type" property it is a parallel SCSI HBA and 7456 * we will set the interconnect type for parallel scsi. 7457 */ 7458 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7459 un->un_f_is_fibre = FALSE; 7460 #endif 7461 break; 7462 } 7463 7464 if (un->un_f_is_fibre == TRUE) { 7465 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7466 SCSI_VERSION_3) { 7467 switch (un->un_interconnect_type) { 7468 case SD_INTERCONNECT_FIBRE: 7469 case SD_INTERCONNECT_SSA: 7470 un->un_node_type = DDI_NT_BLOCK_WWN; 7471 break; 7472 default: 7473 break; 7474 } 7475 } 7476 } 7477 7478 /* 7479 * Initialize the Request Sense command for the target 7480 */ 7481 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7482 goto alloc_rqs_failed; 7483 } 7484 7485 /* 7486 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7487 * with separate binary for sd and ssd. 7488 * 7489 * x86 has 1 binary, un_retry_count is set base on connection type. 7490 * The hardcoded values will go away when Sparc uses 1 binary 7491 * for sd and ssd. This hardcoded values need to match 7492 * SD_RETRY_COUNT in sddef.h 7493 * The value used is base on interconnect type. 7494 * fibre = 3, parallel = 5 7495 */ 7496 #if defined(__i386) || defined(__amd64) 7497 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7498 #else 7499 un->un_retry_count = SD_RETRY_COUNT; 7500 #endif 7501 7502 /* 7503 * Set the per disk retry count to the default number of retries 7504 * for disks and CDROMs. This value can be overridden by the 7505 * disk property list or an entry in sd.conf. 7506 */ 7507 un->un_notready_retry_count = 7508 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7509 : DISK_NOT_READY_RETRY_COUNT(un); 7510 7511 /* 7512 * Set the busy retry count to the default value of un_retry_count. 7513 * This can be overridden by entries in sd.conf or the device 7514 * config table. 7515 */ 7516 un->un_busy_retry_count = un->un_retry_count; 7517 7518 /* 7519 * Init the reset threshold for retries. This number determines 7520 * how many retries must be performed before a reset can be issued 7521 * (for certain error conditions). This can be overridden by entries 7522 * in sd.conf or the device config table. 7523 */ 7524 un->un_reset_retry_count = (un->un_retry_count / 2); 7525 7526 /* 7527 * Set the victim_retry_count to the default un_retry_count 7528 */ 7529 un->un_victim_retry_count = (2 * un->un_retry_count); 7530 7531 /* 7532 * Set the reservation release timeout to the default value of 7533 * 5 seconds. This can be overridden by entries in ssd.conf or the 7534 * device config table. 7535 */ 7536 un->un_reserve_release_time = 5; 7537 7538 /* 7539 * Set up the default maximum transfer size. Note that this may 7540 * get updated later in the attach, when setting up default wide 7541 * operations for disks. 7542 */ 7543 #if defined(__i386) || defined(__amd64) 7544 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7545 un->un_partial_dma_supported = 1; 7546 #else 7547 un->un_max_xfer_size = (uint_t)maxphys; 7548 #endif 7549 7550 /* 7551 * Get "allow bus device reset" property (defaults to "enabled" if 7552 * the property was not defined). This is to disable bus resets for 7553 * certain kinds of error recovery. Note: In the future when a run-time 7554 * fibre check is available the soft state flag should default to 7555 * enabled. 7556 */ 7557 if (un->un_f_is_fibre == TRUE) { 7558 un->un_f_allow_bus_device_reset = TRUE; 7559 } else { 7560 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7561 "allow-bus-device-reset", 1) != 0) { 7562 un->un_f_allow_bus_device_reset = TRUE; 7563 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7564 "sd_unit_attach: un:0x%p Bus device reset " 7565 "enabled\n", un); 7566 } else { 7567 un->un_f_allow_bus_device_reset = FALSE; 7568 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7569 "sd_unit_attach: un:0x%p Bus device reset " 7570 "disabled\n", un); 7571 } 7572 } 7573 7574 /* 7575 * Check if this is an ATAPI device. ATAPI devices use Group 1 7576 * Read/Write commands and Group 2 Mode Sense/Select commands. 7577 * 7578 * Note: The "obsolete" way of doing this is to check for the "atapi" 7579 * property. The new "variant" property with a value of "atapi" has been 7580 * introduced so that future 'variants' of standard SCSI behavior (like 7581 * atapi) could be specified by the underlying HBA drivers by supplying 7582 * a new value for the "variant" property, instead of having to define a 7583 * new property. 7584 */ 7585 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7586 un->un_f_cfg_is_atapi = TRUE; 7587 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7588 "sd_unit_attach: un:0x%p Atapi device\n", un); 7589 } 7590 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7591 &variantp) == DDI_PROP_SUCCESS) { 7592 if (strcmp(variantp, "atapi") == 0) { 7593 un->un_f_cfg_is_atapi = TRUE; 7594 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7595 "sd_unit_attach: un:0x%p Atapi device\n", un); 7596 } 7597 ddi_prop_free(variantp); 7598 } 7599 7600 un->un_cmd_timeout = SD_IO_TIME; 7601 7602 un->un_busy_timeout = SD_BSY_TIMEOUT; 7603 7604 /* Info on current states, statuses, etc. (Updated frequently) */ 7605 un->un_state = SD_STATE_NORMAL; 7606 un->un_last_state = SD_STATE_NORMAL; 7607 7608 /* Control & status info for command throttling */ 7609 un->un_throttle = sd_max_throttle; 7610 un->un_saved_throttle = sd_max_throttle; 7611 un->un_min_throttle = sd_min_throttle; 7612 7613 if (un->un_f_is_fibre == TRUE) { 7614 un->un_f_use_adaptive_throttle = TRUE; 7615 } else { 7616 un->un_f_use_adaptive_throttle = FALSE; 7617 } 7618 7619 /* Removable media support. */ 7620 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7621 un->un_mediastate = DKIO_NONE; 7622 un->un_specified_mediastate = DKIO_NONE; 7623 7624 /* CVs for suspend/resume (PM or DR) */ 7625 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7626 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7627 7628 /* Power management support. */ 7629 un->un_power_level = SD_SPINDLE_UNINIT; 7630 7631 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7632 un->un_f_wcc_inprog = 0; 7633 7634 /* 7635 * The open/close semaphore is used to serialize threads executing 7636 * in the driver's open & close entry point routines for a given 7637 * instance. 7638 */ 7639 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7640 7641 /* 7642 * The conf file entry and softstate variable is a forceful override, 7643 * meaning a non-zero value must be entered to change the default. 7644 */ 7645 un->un_f_disksort_disabled = FALSE; 7646 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7647 un->un_f_enable_rmw = FALSE; 7648 7649 /* 7650 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but 7651 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property. 7652 */ 7653 un->un_f_mmc_gesn_polling = TRUE; 7654 7655 /* 7656 * physical sector size defaults to DEV_BSIZE currently. We can 7657 * override this value via the driver configuration file so we must 7658 * set it before calling sd_read_unit_properties(). 7659 */ 7660 un->un_phy_blocksize = DEV_BSIZE; 7661 7662 /* 7663 * Retrieve the properties from the static driver table or the driver 7664 * configuration file (.conf) for this unit and update the soft state 7665 * for the device as needed for the indicated properties. 7666 * Note: the property configuration needs to occur here as some of the 7667 * following routines may have dependencies on soft state flags set 7668 * as part of the driver property configuration. 7669 */ 7670 sd_read_unit_properties(un); 7671 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7672 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7673 7674 /* 7675 * Only if a device has "hotpluggable" property, it is 7676 * treated as hotpluggable device. Otherwise, it is 7677 * regarded as non-hotpluggable one. 7678 */ 7679 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7680 -1) != -1) { 7681 un->un_f_is_hotpluggable = TRUE; 7682 } 7683 7684 /* 7685 * set unit's attributes(flags) according to "hotpluggable" and 7686 * RMB bit in INQUIRY data. 7687 */ 7688 sd_set_unit_attributes(un, devi); 7689 7690 /* 7691 * By default, we mark the capacity, lbasize, and geometry 7692 * as invalid. Only if we successfully read a valid capacity 7693 * will we update the un_blockcount and un_tgt_blocksize with the 7694 * valid values (the geometry will be validated later). 7695 */ 7696 un->un_f_blockcount_is_valid = FALSE; 7697 un->un_f_tgt_blocksize_is_valid = FALSE; 7698 7699 /* 7700 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7701 * otherwise. 7702 */ 7703 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7704 un->un_blockcount = 0; 7705 7706 /* 7707 * Set up the per-instance info needed to determine the correct 7708 * CDBs and other info for issuing commands to the target. 7709 */ 7710 sd_init_cdb_limits(un); 7711 7712 /* 7713 * Set up the IO chains to use, based upon the target type. 7714 */ 7715 if (un->un_f_non_devbsize_supported) { 7716 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7717 } else { 7718 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7719 } 7720 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7721 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7722 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7723 7724 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7725 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7726 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7727 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7728 7729 7730 if (ISCD(un)) { 7731 un->un_additional_codes = sd_additional_codes; 7732 } else { 7733 un->un_additional_codes = NULL; 7734 } 7735 7736 /* 7737 * Create the kstats here so they can be available for attach-time 7738 * routines that send commands to the unit (either polled or via 7739 * sd_send_scsi_cmd). 7740 * 7741 * Note: This is a critical sequence that needs to be maintained: 7742 * 1) Instantiate the kstats here, before any routines using the 7743 * iopath (i.e. sd_send_scsi_cmd). 7744 * 2) Instantiate and initialize the partition stats 7745 * (sd_set_pstats). 7746 * 3) Initialize the error stats (sd_set_errstats), following 7747 * sd_validate_geometry(),sd_register_devid(), 7748 * and sd_cache_control(). 7749 */ 7750 7751 un->un_stats = kstat_create(sd_label, instance, 7752 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7753 if (un->un_stats != NULL) { 7754 un->un_stats->ks_lock = SD_MUTEX(un); 7755 kstat_install(un->un_stats); 7756 } 7757 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7758 "sd_unit_attach: un:0x%p un_stats created\n", un); 7759 7760 sd_create_errstats(un, instance); 7761 if (un->un_errstats == NULL) { 7762 goto create_errstats_failed; 7763 } 7764 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7765 "sd_unit_attach: un:0x%p errstats created\n", un); 7766 7767 /* 7768 * The following if/else code was relocated here from below as part 7769 * of the fix for bug (4430280). However with the default setup added 7770 * on entry to this routine, it's no longer absolutely necessary for 7771 * this to be before the call to sd_spin_up_unit. 7772 */ 7773 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7774 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7775 (devp->sd_inq->inq_ansi == 5)) && 7776 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7777 7778 /* 7779 * If tagged queueing is supported by the target 7780 * and by the host adapter then we will enable it 7781 */ 7782 un->un_tagflags = 0; 7783 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7784 (un->un_f_arq_enabled == TRUE)) { 7785 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7786 1, 1) == 1) { 7787 un->un_tagflags = FLAG_STAG; 7788 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7789 "sd_unit_attach: un:0x%p tag queueing " 7790 "enabled\n", un); 7791 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7792 "untagged-qing", 0) == 1) { 7793 un->un_f_opt_queueing = TRUE; 7794 un->un_saved_throttle = un->un_throttle = 7795 min(un->un_throttle, 3); 7796 } else { 7797 un->un_f_opt_queueing = FALSE; 7798 un->un_saved_throttle = un->un_throttle = 1; 7799 } 7800 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7801 == 1) && (un->un_f_arq_enabled == TRUE)) { 7802 /* The Host Adapter supports internal queueing. */ 7803 un->un_f_opt_queueing = TRUE; 7804 un->un_saved_throttle = un->un_throttle = 7805 min(un->un_throttle, 3); 7806 } else { 7807 un->un_f_opt_queueing = FALSE; 7808 un->un_saved_throttle = un->un_throttle = 1; 7809 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7810 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7811 } 7812 7813 /* 7814 * Enable large transfers for SATA/SAS drives 7815 */ 7816 if (SD_IS_SERIAL(un)) { 7817 un->un_max_xfer_size = 7818 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7819 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7820 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7821 "sd_unit_attach: un:0x%p max transfer " 7822 "size=0x%x\n", un, un->un_max_xfer_size); 7823 7824 } 7825 7826 /* Setup or tear down default wide operations for disks */ 7827 7828 /* 7829 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7830 * and "ssd_max_xfer_size" to exist simultaneously on the same 7831 * system and be set to different values. In the future this 7832 * code may need to be updated when the ssd module is 7833 * obsoleted and removed from the system. (4299588) 7834 */ 7835 if (SD_IS_PARALLEL_SCSI(un) && 7836 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7837 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7838 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7839 1, 1) == 1) { 7840 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7841 "sd_unit_attach: un:0x%p Wide Transfer " 7842 "enabled\n", un); 7843 } 7844 7845 /* 7846 * If tagged queuing has also been enabled, then 7847 * enable large xfers 7848 */ 7849 if (un->un_saved_throttle == sd_max_throttle) { 7850 un->un_max_xfer_size = 7851 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7852 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7853 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7854 "sd_unit_attach: un:0x%p max transfer " 7855 "size=0x%x\n", un, un->un_max_xfer_size); 7856 } 7857 } else { 7858 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7859 0, 1) == 1) { 7860 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7861 "sd_unit_attach: un:0x%p " 7862 "Wide Transfer disabled\n", un); 7863 } 7864 } 7865 } else { 7866 un->un_tagflags = FLAG_STAG; 7867 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7868 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7869 } 7870 7871 /* 7872 * If this target supports LUN reset, try to enable it. 7873 */ 7874 if (un->un_f_lun_reset_enabled) { 7875 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7876 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7877 "un:0x%p lun_reset capability set\n", un); 7878 } else { 7879 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7880 "un:0x%p lun-reset capability not set\n", un); 7881 } 7882 } 7883 7884 /* 7885 * Adjust the maximum transfer size. This is to fix 7886 * the problem of partial DMA support on SPARC. Some 7887 * HBA driver, like aac, has very small dma_attr_maxxfer 7888 * size, which requires partial DMA support on SPARC. 7889 * In the future the SPARC pci nexus driver may solve 7890 * the problem instead of this fix. 7891 */ 7892 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7893 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7894 /* We need DMA partial even on sparc to ensure sddump() works */ 7895 un->un_max_xfer_size = max_xfer_size; 7896 if (un->un_partial_dma_supported == 0) 7897 un->un_partial_dma_supported = 1; 7898 } 7899 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7900 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7901 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7902 un->un_max_xfer_size) == 1) { 7903 un->un_buf_breakup_supported = 1; 7904 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7905 "un:0x%p Buf breakup enabled\n", un); 7906 } 7907 } 7908 7909 /* 7910 * Set PKT_DMA_PARTIAL flag. 7911 */ 7912 if (un->un_partial_dma_supported == 1) { 7913 un->un_pkt_flags = PKT_DMA_PARTIAL; 7914 } else { 7915 un->un_pkt_flags = 0; 7916 } 7917 7918 /* Initialize sd_ssc_t for internal uscsi commands */ 7919 ssc = sd_ssc_init(un); 7920 scsi_fm_init(devp); 7921 7922 /* 7923 * Allocate memory for SCSI FMA stuffs. 7924 */ 7925 un->un_fm_private = 7926 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7927 sfip = (struct sd_fm_internal *)un->un_fm_private; 7928 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7929 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7930 sfip->fm_ssc.ssc_un = un; 7931 7932 if (ISCD(un) || 7933 un->un_f_has_removable_media || 7934 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7935 /* 7936 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7937 * Their log are unchanged. 7938 */ 7939 sfip->fm_log_level = SD_FM_LOG_NSUP; 7940 } else { 7941 /* 7942 * If enter here, it should be non-CDROM and FM-capable 7943 * device, and it will not keep the old scsi_log as before 7944 * in /var/adm/messages. However, the property 7945 * "fm-scsi-log" will control whether the FM telemetry will 7946 * be logged in /var/adm/messages. 7947 */ 7948 int fm_scsi_log; 7949 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7950 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7951 7952 if (fm_scsi_log) 7953 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7954 else 7955 sfip->fm_log_level = SD_FM_LOG_SILENT; 7956 } 7957 7958 /* 7959 * At this point in the attach, we have enough info in the 7960 * soft state to be able to issue commands to the target. 7961 * 7962 * All command paths used below MUST issue their commands as 7963 * SD_PATH_DIRECT. This is important as intermediate layers 7964 * are not all initialized yet (such as PM). 7965 */ 7966 7967 /* 7968 * Send a TEST UNIT READY command to the device. This should clear 7969 * any outstanding UNIT ATTENTION that may be present. 7970 * 7971 * Note: Don't check for success, just track if there is a reservation, 7972 * this is a throw away command to clear any unit attentions. 7973 * 7974 * Note: This MUST be the first command issued to the target during 7975 * attach to ensure power on UNIT ATTENTIONS are cleared. 7976 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7977 * with attempts at spinning up a device with no media. 7978 */ 7979 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7980 if (status != 0) { 7981 if (status == EACCES) 7982 reservation_flag = SD_TARGET_IS_RESERVED; 7983 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7984 } 7985 7986 /* 7987 * If the device is NOT a removable media device, attempt to spin 7988 * it up (using the START_STOP_UNIT command) and read its capacity 7989 * (using the READ CAPACITY command). Note, however, that either 7990 * of these could fail and in some cases we would continue with 7991 * the attach despite the failure (see below). 7992 */ 7993 if (un->un_f_descr_format_supported) { 7994 7995 switch (sd_spin_up_unit(ssc)) { 7996 case 0: 7997 /* 7998 * Spin-up was successful; now try to read the 7999 * capacity. If successful then save the results 8000 * and mark the capacity & lbasize as valid. 8001 */ 8002 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8003 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8004 8005 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 8006 &lbasize, SD_PATH_DIRECT); 8007 8008 switch (status) { 8009 case 0: { 8010 if (capacity > DK_MAX_BLOCKS) { 8011 #ifdef _LP64 8012 if ((capacity + 1) > 8013 SD_GROUP1_MAX_ADDRESS) { 8014 /* 8015 * Enable descriptor format 8016 * sense data so that we can 8017 * get 64 bit sense data 8018 * fields. 8019 */ 8020 sd_enable_descr_sense(ssc); 8021 } 8022 #else 8023 /* 32-bit kernels can't handle this */ 8024 scsi_log(SD_DEVINFO(un), 8025 sd_label, CE_WARN, 8026 "disk has %llu blocks, which " 8027 "is too large for a 32-bit " 8028 "kernel", capacity); 8029 8030 #if defined(__i386) || defined(__amd64) 8031 /* 8032 * 1TB disk was treated as (1T - 512)B 8033 * in the past, so that it might have 8034 * valid VTOC and solaris partitions, 8035 * we have to allow it to continue to 8036 * work. 8037 */ 8038 if (capacity -1 > DK_MAX_BLOCKS) 8039 #endif 8040 goto spinup_failed; 8041 #endif 8042 } 8043 8044 /* 8045 * Here it's not necessary to check the case: 8046 * the capacity of the device is bigger than 8047 * what the max hba cdb can support. Because 8048 * sd_send_scsi_READ_CAPACITY will retrieve 8049 * the capacity by sending USCSI command, which 8050 * is constrained by the max hba cdb. Actually, 8051 * sd_send_scsi_READ_CAPACITY will return 8052 * EINVAL when using bigger cdb than required 8053 * cdb length. Will handle this case in 8054 * "case EINVAL". 8055 */ 8056 8057 /* 8058 * The following relies on 8059 * sd_send_scsi_READ_CAPACITY never 8060 * returning 0 for capacity and/or lbasize. 8061 */ 8062 sd_update_block_info(un, lbasize, capacity); 8063 8064 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8065 "sd_unit_attach: un:0x%p capacity = %ld " 8066 "blocks; lbasize= %ld.\n", un, 8067 un->un_blockcount, un->un_tgt_blocksize); 8068 8069 break; 8070 } 8071 case EINVAL: 8072 /* 8073 * In the case where the max-cdb-length property 8074 * is smaller than the required CDB length for 8075 * a SCSI device, a target driver can fail to 8076 * attach to that device. 8077 */ 8078 scsi_log(SD_DEVINFO(un), 8079 sd_label, CE_WARN, 8080 "disk capacity is too large " 8081 "for current cdb length"); 8082 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8083 8084 goto spinup_failed; 8085 case EACCES: 8086 /* 8087 * Should never get here if the spin-up 8088 * succeeded, but code it in anyway. 8089 * From here, just continue with the attach... 8090 */ 8091 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8092 "sd_unit_attach: un:0x%p " 8093 "sd_send_scsi_READ_CAPACITY " 8094 "returned reservation conflict\n", un); 8095 reservation_flag = SD_TARGET_IS_RESERVED; 8096 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8097 break; 8098 default: 8099 /* 8100 * Likewise, should never get here if the 8101 * spin-up succeeded. Just continue with 8102 * the attach... 8103 */ 8104 if (status == EIO) 8105 sd_ssc_assessment(ssc, 8106 SD_FMT_STATUS_CHECK); 8107 else 8108 sd_ssc_assessment(ssc, 8109 SD_FMT_IGNORE); 8110 break; 8111 } 8112 break; 8113 case EACCES: 8114 /* 8115 * Device is reserved by another host. In this case 8116 * we could not spin it up or read the capacity, but 8117 * we continue with the attach anyway. 8118 */ 8119 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8120 "sd_unit_attach: un:0x%p spin-up reservation " 8121 "conflict.\n", un); 8122 reservation_flag = SD_TARGET_IS_RESERVED; 8123 break; 8124 default: 8125 /* Fail the attach if the spin-up failed. */ 8126 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8127 "sd_unit_attach: un:0x%p spin-up failed.", un); 8128 goto spinup_failed; 8129 } 8130 8131 } 8132 8133 /* 8134 * Check to see if this is a MMC drive 8135 */ 8136 if (ISCD(un)) { 8137 sd_set_mmc_caps(ssc); 8138 } 8139 8140 /* 8141 * Add a zero-length attribute to tell the world we support 8142 * kernel ioctls (for layered drivers) 8143 */ 8144 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8145 DDI_KERNEL_IOCTL, NULL, 0); 8146 8147 /* 8148 * Add a boolean property to tell the world we support 8149 * the B_FAILFAST flag (for layered drivers) 8150 */ 8151 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8152 "ddi-failfast-supported", NULL, 0); 8153 8154 /* 8155 * Initialize power management 8156 */ 8157 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8158 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8159 sd_setup_pm(ssc, devi); 8160 if (un->un_f_pm_is_enabled == FALSE) { 8161 /* 8162 * For performance, point to a jump table that does 8163 * not include pm. 8164 * The direct and priority chains don't change with PM. 8165 * 8166 * Note: this is currently done based on individual device 8167 * capabilities. When an interface for determining system 8168 * power enabled state becomes available, or when additional 8169 * layers are added to the command chain, these values will 8170 * have to be re-evaluated for correctness. 8171 */ 8172 if (un->un_f_non_devbsize_supported) { 8173 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8174 } else { 8175 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8176 } 8177 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8178 } 8179 8180 /* 8181 * This property is set to 0 by HA software to avoid retries 8182 * on a reserved disk. (The preferred property name is 8183 * "retry-on-reservation-conflict") (1189689) 8184 * 8185 * Note: The use of a global here can have unintended consequences. A 8186 * per instance variable is preferable to match the capabilities of 8187 * different underlying hba's (4402600) 8188 */ 8189 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8190 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8191 sd_retry_on_reservation_conflict); 8192 if (sd_retry_on_reservation_conflict != 0) { 8193 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8194 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8195 sd_retry_on_reservation_conflict); 8196 } 8197 8198 /* Set up options for QFULL handling. */ 8199 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8200 "qfull-retries", -1)) != -1) { 8201 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8202 rval, 1); 8203 } 8204 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8205 "qfull-retry-interval", -1)) != -1) { 8206 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8207 rval, 1); 8208 } 8209 8210 /* 8211 * This just prints a message that announces the existence of the 8212 * device. The message is always printed in the system logfile, but 8213 * only appears on the console if the system is booted with the 8214 * -v (verbose) argument. 8215 */ 8216 ddi_report_dev(devi); 8217 8218 un->un_mediastate = DKIO_NONE; 8219 8220 /* 8221 * Check Block Device Characteristics VPD. 8222 */ 8223 sd_check_bdc_vpd(ssc); 8224 8225 /* 8226 * Check whether the drive is in emulation mode. 8227 */ 8228 sd_check_emulation_mode(ssc); 8229 8230 cmlb_alloc_handle(&un->un_cmlbhandle); 8231 8232 #if defined(__i386) || defined(__amd64) 8233 /* 8234 * On x86, compensate for off-by-1 legacy error 8235 */ 8236 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8237 (lbasize == un->un_sys_blocksize)) 8238 offbyone = CMLB_OFF_BY_ONE; 8239 #endif 8240 8241 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8242 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8243 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8244 un->un_node_type, offbyone, un->un_cmlbhandle, 8245 (void *)SD_PATH_DIRECT) != 0) { 8246 goto cmlb_attach_failed; 8247 } 8248 8249 8250 /* 8251 * Read and validate the device's geometry (ie, disk label) 8252 * A new unformatted drive will not have a valid geometry, but 8253 * the driver needs to successfully attach to this device so 8254 * the drive can be formatted via ioctls. 8255 */ 8256 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8257 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8258 8259 mutex_enter(SD_MUTEX(un)); 8260 8261 /* 8262 * Read and initialize the devid for the unit. 8263 */ 8264 if (un->un_f_devid_supported) { 8265 sd_register_devid(ssc, devi, reservation_flag); 8266 } 8267 mutex_exit(SD_MUTEX(un)); 8268 8269 #if (defined(__fibre)) 8270 /* 8271 * Register callbacks for fibre only. You can't do this solely 8272 * on the basis of the devid_type because this is hba specific. 8273 * We need to query our hba capabilities to find out whether to 8274 * register or not. 8275 */ 8276 if (un->un_f_is_fibre) { 8277 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8278 sd_init_event_callbacks(un); 8279 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8280 "sd_unit_attach: un:0x%p event callbacks inserted", 8281 un); 8282 } 8283 } 8284 #endif 8285 8286 if (un->un_f_opt_disable_cache == TRUE) { 8287 /* 8288 * Disable both read cache and write cache. This is 8289 * the historic behavior of the keywords in the config file. 8290 */ 8291 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8292 0) { 8293 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8294 "sd_unit_attach: un:0x%p Could not disable " 8295 "caching", un); 8296 goto devid_failed; 8297 } 8298 } 8299 8300 /* 8301 * Check the value of the WCE bit and if it's allowed to be changed, 8302 * set un_f_write_cache_enabled and un_f_cache_mode_changeable 8303 * accordingly. 8304 */ 8305 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8306 sd_get_write_cache_changeable(ssc, &wc_changeable); 8307 mutex_enter(SD_MUTEX(un)); 8308 un->un_f_write_cache_enabled = (wc_enabled != 0); 8309 un->un_f_cache_mode_changeable = (wc_changeable != 0); 8310 mutex_exit(SD_MUTEX(un)); 8311 8312 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8313 un->un_tgt_blocksize != DEV_BSIZE) || 8314 un->un_f_enable_rmw) { 8315 if (!(un->un_wm_cache)) { 8316 (void) snprintf(name_str, sizeof (name_str), 8317 "%s%d_cache", 8318 ddi_driver_name(SD_DEVINFO(un)), 8319 ddi_get_instance(SD_DEVINFO(un))); 8320 un->un_wm_cache = kmem_cache_create( 8321 name_str, sizeof (struct sd_w_map), 8322 8, sd_wm_cache_constructor, 8323 sd_wm_cache_destructor, NULL, 8324 (void *)un, NULL, 0); 8325 if (!(un->un_wm_cache)) { 8326 goto wm_cache_failed; 8327 } 8328 } 8329 } 8330 8331 /* 8332 * Check the value of the NV_SUP bit and set 8333 * un_f_suppress_cache_flush accordingly. 8334 */ 8335 sd_get_nv_sup(ssc); 8336 8337 /* 8338 * Find out what type of reservation this disk supports. 8339 */ 8340 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8341 8342 switch (status) { 8343 case 0: 8344 /* 8345 * SCSI-3 reservations are supported. 8346 */ 8347 un->un_reservation_type = SD_SCSI3_RESERVATION; 8348 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8349 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8350 break; 8351 case ENOTSUP: 8352 /* 8353 * The PERSISTENT RESERVE IN command would not be recognized by 8354 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8355 */ 8356 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8357 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8358 un->un_reservation_type = SD_SCSI2_RESERVATION; 8359 8360 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8361 break; 8362 default: 8363 /* 8364 * default to SCSI-3 reservations 8365 */ 8366 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8367 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8368 un->un_reservation_type = SD_SCSI3_RESERVATION; 8369 8370 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8371 break; 8372 } 8373 8374 /* 8375 * Set the pstat and error stat values here, so data obtained during the 8376 * previous attach-time routines is available. 8377 * 8378 * Note: This is a critical sequence that needs to be maintained: 8379 * 1) Instantiate the kstats before any routines using the iopath 8380 * (i.e. sd_send_scsi_cmd). 8381 * 2) Initialize the error stats (sd_set_errstats) and partition 8382 * stats (sd_set_pstats)here, following 8383 * cmlb_validate_geometry(), sd_register_devid(), and 8384 * sd_cache_control(). 8385 */ 8386 8387 if (un->un_f_pkstats_enabled && geom_label_valid) { 8388 sd_set_pstats(un); 8389 SD_TRACE(SD_LOG_IO_PARTITION, un, 8390 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8391 } 8392 8393 sd_set_errstats(un); 8394 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8395 "sd_unit_attach: un:0x%p errstats set\n", un); 8396 8397 8398 /* 8399 * After successfully attaching an instance, we record the information 8400 * of how many luns have been attached on the relative target and 8401 * controller for parallel SCSI. This information is used when sd tries 8402 * to set the tagged queuing capability in HBA. 8403 */ 8404 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8405 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8406 } 8407 8408 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8409 "sd_unit_attach: un:0x%p exit success\n", un); 8410 8411 /* Uninitialize sd_ssc_t pointer */ 8412 sd_ssc_fini(ssc); 8413 8414 return (DDI_SUCCESS); 8415 8416 /* 8417 * An error occurred during the attach; clean up & return failure. 8418 */ 8419 wm_cache_failed: 8420 devid_failed: 8421 ddi_remove_minor_node(devi, NULL); 8422 8423 cmlb_attach_failed: 8424 /* 8425 * Cleanup from the scsi_ifsetcap() calls (437868) 8426 */ 8427 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8428 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8429 8430 /* 8431 * Refer to the comments of setting tagged-qing in the beginning of 8432 * sd_unit_attach. We can only disable tagged queuing when there is 8433 * no lun attached on the target. 8434 */ 8435 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8436 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8437 } 8438 8439 if (un->un_f_is_fibre == FALSE) { 8440 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8441 } 8442 8443 spinup_failed: 8444 8445 /* Uninitialize sd_ssc_t pointer */ 8446 sd_ssc_fini(ssc); 8447 8448 mutex_enter(SD_MUTEX(un)); 8449 8450 /* Deallocate SCSI FMA memory spaces */ 8451 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8452 8453 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8454 if (un->un_direct_priority_timeid != NULL) { 8455 timeout_id_t temp_id = un->un_direct_priority_timeid; 8456 un->un_direct_priority_timeid = NULL; 8457 mutex_exit(SD_MUTEX(un)); 8458 (void) untimeout(temp_id); 8459 mutex_enter(SD_MUTEX(un)); 8460 } 8461 8462 /* Cancel any pending start/stop timeouts */ 8463 if (un->un_startstop_timeid != NULL) { 8464 timeout_id_t temp_id = un->un_startstop_timeid; 8465 un->un_startstop_timeid = NULL; 8466 mutex_exit(SD_MUTEX(un)); 8467 (void) untimeout(temp_id); 8468 mutex_enter(SD_MUTEX(un)); 8469 } 8470 8471 /* Cancel any pending reset-throttle timeouts */ 8472 if (un->un_reset_throttle_timeid != NULL) { 8473 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8474 un->un_reset_throttle_timeid = NULL; 8475 mutex_exit(SD_MUTEX(un)); 8476 (void) untimeout(temp_id); 8477 mutex_enter(SD_MUTEX(un)); 8478 } 8479 8480 /* Cancel rmw warning message timeouts */ 8481 if (un->un_rmw_msg_timeid != NULL) { 8482 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8483 un->un_rmw_msg_timeid = NULL; 8484 mutex_exit(SD_MUTEX(un)); 8485 (void) untimeout(temp_id); 8486 mutex_enter(SD_MUTEX(un)); 8487 } 8488 8489 /* Cancel any pending retry timeouts */ 8490 if (un->un_retry_timeid != NULL) { 8491 timeout_id_t temp_id = un->un_retry_timeid; 8492 un->un_retry_timeid = NULL; 8493 mutex_exit(SD_MUTEX(un)); 8494 (void) untimeout(temp_id); 8495 mutex_enter(SD_MUTEX(un)); 8496 } 8497 8498 /* Cancel any pending delayed cv broadcast timeouts */ 8499 if (un->un_dcvb_timeid != NULL) { 8500 timeout_id_t temp_id = un->un_dcvb_timeid; 8501 un->un_dcvb_timeid = NULL; 8502 mutex_exit(SD_MUTEX(un)); 8503 (void) untimeout(temp_id); 8504 mutex_enter(SD_MUTEX(un)); 8505 } 8506 8507 mutex_exit(SD_MUTEX(un)); 8508 8509 /* There should not be any in-progress I/O so ASSERT this check */ 8510 ASSERT(un->un_ncmds_in_transport == 0); 8511 ASSERT(un->un_ncmds_in_driver == 0); 8512 8513 /* Do not free the softstate if the callback routine is active */ 8514 sd_sync_with_callback(un); 8515 8516 /* 8517 * Partition stats apparently are not used with removables. These would 8518 * not have been created during attach, so no need to clean them up... 8519 */ 8520 if (un->un_errstats != NULL) { 8521 kstat_delete(un->un_errstats); 8522 un->un_errstats = NULL; 8523 } 8524 8525 create_errstats_failed: 8526 8527 if (un->un_stats != NULL) { 8528 kstat_delete(un->un_stats); 8529 un->un_stats = NULL; 8530 } 8531 8532 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8533 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8534 8535 ddi_prop_remove_all(devi); 8536 sema_destroy(&un->un_semoclose); 8537 cv_destroy(&un->un_state_cv); 8538 8539 sd_free_rqs(un); 8540 8541 alloc_rqs_failed: 8542 8543 devp->sd_private = NULL; 8544 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8545 8546 /* 8547 * Note: the man pages are unclear as to whether or not doing a 8548 * ddi_soft_state_free(sd_state, instance) is the right way to 8549 * clean up after the ddi_soft_state_zalloc() if the subsequent 8550 * ddi_get_soft_state() fails. The implication seems to be 8551 * that the get_soft_state cannot fail if the zalloc succeeds. 8552 */ 8553 #ifndef XPV_HVM_DRIVER 8554 ddi_soft_state_free(sd_state, instance); 8555 #endif /* !XPV_HVM_DRIVER */ 8556 8557 probe_failed: 8558 scsi_unprobe(devp); 8559 8560 return (DDI_FAILURE); 8561 } 8562 8563 8564 /* 8565 * Function: sd_unit_detach 8566 * 8567 * Description: Performs DDI_DETACH processing for sddetach(). 8568 * 8569 * Return Code: DDI_SUCCESS 8570 * DDI_FAILURE 8571 * 8572 * Context: Kernel thread context 8573 */ 8574 8575 static int 8576 sd_unit_detach(dev_info_t *devi) 8577 { 8578 struct scsi_device *devp; 8579 struct sd_lun *un; 8580 int i; 8581 int tgt; 8582 dev_t dev; 8583 dev_info_t *pdip = ddi_get_parent(devi); 8584 #ifndef XPV_HVM_DRIVER 8585 int instance = ddi_get_instance(devi); 8586 #endif /* !XPV_HVM_DRIVER */ 8587 8588 mutex_enter(&sd_detach_mutex); 8589 8590 /* 8591 * Fail the detach for any of the following: 8592 * - Unable to get the sd_lun struct for the instance 8593 * - A layered driver has an outstanding open on the instance 8594 * - Another thread is already detaching this instance 8595 * - Another thread is currently performing an open 8596 */ 8597 devp = ddi_get_driver_private(devi); 8598 if ((devp == NULL) || 8599 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8600 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8601 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8602 mutex_exit(&sd_detach_mutex); 8603 return (DDI_FAILURE); 8604 } 8605 8606 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8607 8608 /* 8609 * Mark this instance as currently in a detach, to inhibit any 8610 * opens from a layered driver. 8611 */ 8612 un->un_detach_count++; 8613 mutex_exit(&sd_detach_mutex); 8614 8615 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8616 SCSI_ADDR_PROP_TARGET, -1); 8617 8618 dev = sd_make_device(SD_DEVINFO(un)); 8619 8620 #ifndef lint 8621 _NOTE(COMPETING_THREADS_NOW); 8622 #endif 8623 8624 mutex_enter(SD_MUTEX(un)); 8625 8626 /* 8627 * Fail the detach if there are any outstanding layered 8628 * opens on this device. 8629 */ 8630 for (i = 0; i < NDKMAP; i++) { 8631 if (un->un_ocmap.lyropen[i] != 0) { 8632 goto err_notclosed; 8633 } 8634 } 8635 8636 /* 8637 * Verify there are NO outstanding commands issued to this device. 8638 * ie, un_ncmds_in_transport == 0. 8639 * It's possible to have outstanding commands through the physio 8640 * code path, even though everything's closed. 8641 */ 8642 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8643 (un->un_direct_priority_timeid != NULL) || 8644 (un->un_state == SD_STATE_RWAIT)) { 8645 mutex_exit(SD_MUTEX(un)); 8646 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8647 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8648 goto err_stillbusy; 8649 } 8650 8651 /* 8652 * If we have the device reserved, release the reservation. 8653 */ 8654 if ((un->un_resvd_status & SD_RESERVE) && 8655 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8656 mutex_exit(SD_MUTEX(un)); 8657 /* 8658 * Note: sd_reserve_release sends a command to the device 8659 * via the sd_ioctlcmd() path, and can sleep. 8660 */ 8661 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8662 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8663 "sd_dr_detach: Cannot release reservation \n"); 8664 } 8665 } else { 8666 mutex_exit(SD_MUTEX(un)); 8667 } 8668 8669 /* 8670 * Untimeout any reserve recover, throttle reset, restart unit 8671 * and delayed broadcast timeout threads. Protect the timeout pointer 8672 * from getting nulled by their callback functions. 8673 */ 8674 mutex_enter(SD_MUTEX(un)); 8675 if (un->un_resvd_timeid != NULL) { 8676 timeout_id_t temp_id = un->un_resvd_timeid; 8677 un->un_resvd_timeid = NULL; 8678 mutex_exit(SD_MUTEX(un)); 8679 (void) untimeout(temp_id); 8680 mutex_enter(SD_MUTEX(un)); 8681 } 8682 8683 if (un->un_reset_throttle_timeid != NULL) { 8684 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8685 un->un_reset_throttle_timeid = NULL; 8686 mutex_exit(SD_MUTEX(un)); 8687 (void) untimeout(temp_id); 8688 mutex_enter(SD_MUTEX(un)); 8689 } 8690 8691 if (un->un_startstop_timeid != NULL) { 8692 timeout_id_t temp_id = un->un_startstop_timeid; 8693 un->un_startstop_timeid = NULL; 8694 mutex_exit(SD_MUTEX(un)); 8695 (void) untimeout(temp_id); 8696 mutex_enter(SD_MUTEX(un)); 8697 } 8698 8699 if (un->un_rmw_msg_timeid != NULL) { 8700 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8701 un->un_rmw_msg_timeid = NULL; 8702 mutex_exit(SD_MUTEX(un)); 8703 (void) untimeout(temp_id); 8704 mutex_enter(SD_MUTEX(un)); 8705 } 8706 8707 if (un->un_dcvb_timeid != NULL) { 8708 timeout_id_t temp_id = un->un_dcvb_timeid; 8709 un->un_dcvb_timeid = NULL; 8710 mutex_exit(SD_MUTEX(un)); 8711 (void) untimeout(temp_id); 8712 } else { 8713 mutex_exit(SD_MUTEX(un)); 8714 } 8715 8716 /* Remove any pending reservation reclaim requests for this device */ 8717 sd_rmv_resv_reclaim_req(dev); 8718 8719 mutex_enter(SD_MUTEX(un)); 8720 8721 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8722 if (un->un_direct_priority_timeid != NULL) { 8723 timeout_id_t temp_id = un->un_direct_priority_timeid; 8724 un->un_direct_priority_timeid = NULL; 8725 mutex_exit(SD_MUTEX(un)); 8726 (void) untimeout(temp_id); 8727 mutex_enter(SD_MUTEX(un)); 8728 } 8729 8730 /* Cancel any active multi-host disk watch thread requests */ 8731 if (un->un_mhd_token != NULL) { 8732 mutex_exit(SD_MUTEX(un)); 8733 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8734 if (scsi_watch_request_terminate(un->un_mhd_token, 8735 SCSI_WATCH_TERMINATE_NOWAIT)) { 8736 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8737 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8738 /* 8739 * Note: We are returning here after having removed 8740 * some driver timeouts above. This is consistent with 8741 * the legacy implementation but perhaps the watch 8742 * terminate call should be made with the wait flag set. 8743 */ 8744 goto err_stillbusy; 8745 } 8746 mutex_enter(SD_MUTEX(un)); 8747 un->un_mhd_token = NULL; 8748 } 8749 8750 if (un->un_swr_token != NULL) { 8751 mutex_exit(SD_MUTEX(un)); 8752 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8753 if (scsi_watch_request_terminate(un->un_swr_token, 8754 SCSI_WATCH_TERMINATE_NOWAIT)) { 8755 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8756 "sd_dr_detach: Cannot cancel swr watch request\n"); 8757 /* 8758 * Note: We are returning here after having removed 8759 * some driver timeouts above. This is consistent with 8760 * the legacy implementation but perhaps the watch 8761 * terminate call should be made with the wait flag set. 8762 */ 8763 goto err_stillbusy; 8764 } 8765 mutex_enter(SD_MUTEX(un)); 8766 un->un_swr_token = NULL; 8767 } 8768 8769 mutex_exit(SD_MUTEX(un)); 8770 8771 /* 8772 * Clear any scsi_reset_notifies. We clear the reset notifies 8773 * if we have not registered one. 8774 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8775 */ 8776 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8777 sd_mhd_reset_notify_cb, (caddr_t)un); 8778 8779 /* 8780 * protect the timeout pointers from getting nulled by 8781 * their callback functions during the cancellation process. 8782 * In such a scenario untimeout can be invoked with a null value. 8783 */ 8784 _NOTE(NO_COMPETING_THREADS_NOW); 8785 8786 mutex_enter(&un->un_pm_mutex); 8787 if (un->un_pm_idle_timeid != NULL) { 8788 timeout_id_t temp_id = un->un_pm_idle_timeid; 8789 un->un_pm_idle_timeid = NULL; 8790 mutex_exit(&un->un_pm_mutex); 8791 8792 /* 8793 * Timeout is active; cancel it. 8794 * Note that it'll never be active on a device 8795 * that does not support PM therefore we don't 8796 * have to check before calling pm_idle_component. 8797 */ 8798 (void) untimeout(temp_id); 8799 (void) pm_idle_component(SD_DEVINFO(un), 0); 8800 mutex_enter(&un->un_pm_mutex); 8801 } 8802 8803 /* 8804 * Check whether there is already a timeout scheduled for power 8805 * management. If yes then don't lower the power here, that's. 8806 * the timeout handler's job. 8807 */ 8808 if (un->un_pm_timeid != NULL) { 8809 timeout_id_t temp_id = un->un_pm_timeid; 8810 un->un_pm_timeid = NULL; 8811 mutex_exit(&un->un_pm_mutex); 8812 /* 8813 * Timeout is active; cancel it. 8814 * Note that it'll never be active on a device 8815 * that does not support PM therefore we don't 8816 * have to check before calling pm_idle_component. 8817 */ 8818 (void) untimeout(temp_id); 8819 (void) pm_idle_component(SD_DEVINFO(un), 0); 8820 8821 } else { 8822 mutex_exit(&un->un_pm_mutex); 8823 if ((un->un_f_pm_is_enabled == TRUE) && 8824 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) 8825 != DDI_SUCCESS)) { 8826 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8827 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8828 /* 8829 * Fix for bug: 4297749, item # 13 8830 * The above test now includes a check to see if PM is 8831 * supported by this device before call 8832 * pm_lower_power(). 8833 * Note, the following is not dead code. The call to 8834 * pm_lower_power above will generate a call back into 8835 * our sdpower routine which might result in a timeout 8836 * handler getting activated. Therefore the following 8837 * code is valid and necessary. 8838 */ 8839 mutex_enter(&un->un_pm_mutex); 8840 if (un->un_pm_timeid != NULL) { 8841 timeout_id_t temp_id = un->un_pm_timeid; 8842 un->un_pm_timeid = NULL; 8843 mutex_exit(&un->un_pm_mutex); 8844 (void) untimeout(temp_id); 8845 (void) pm_idle_component(SD_DEVINFO(un), 0); 8846 } else { 8847 mutex_exit(&un->un_pm_mutex); 8848 } 8849 } 8850 } 8851 8852 /* 8853 * Cleanup from the scsi_ifsetcap() calls (437868) 8854 * Relocated here from above to be after the call to 8855 * pm_lower_power, which was getting errors. 8856 */ 8857 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8858 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8859 8860 /* 8861 * Currently, tagged queuing is supported per target based by HBA. 8862 * Setting this per lun instance actually sets the capability of this 8863 * target in HBA, which affects those luns already attached on the 8864 * same target. So during detach, we can only disable this capability 8865 * only when this is the only lun left on this target. By doing 8866 * this, we assume a target has the same tagged queuing capability 8867 * for every lun. The condition can be removed when HBA is changed to 8868 * support per lun based tagged queuing capability. 8869 */ 8870 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8871 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8872 } 8873 8874 if (un->un_f_is_fibre == FALSE) { 8875 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8876 } 8877 8878 /* 8879 * Remove any event callbacks, fibre only 8880 */ 8881 if (un->un_f_is_fibre == TRUE) { 8882 if ((un->un_insert_event != NULL) && 8883 (ddi_remove_event_handler(un->un_insert_cb_id) != 8884 DDI_SUCCESS)) { 8885 /* 8886 * Note: We are returning here after having done 8887 * substantial cleanup above. This is consistent 8888 * with the legacy implementation but this may not 8889 * be the right thing to do. 8890 */ 8891 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8892 "sd_dr_detach: Cannot cancel insert event\n"); 8893 goto err_remove_event; 8894 } 8895 un->un_insert_event = NULL; 8896 8897 if ((un->un_remove_event != NULL) && 8898 (ddi_remove_event_handler(un->un_remove_cb_id) != 8899 DDI_SUCCESS)) { 8900 /* 8901 * Note: We are returning here after having done 8902 * substantial cleanup above. This is consistent 8903 * with the legacy implementation but this may not 8904 * be the right thing to do. 8905 */ 8906 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8907 "sd_dr_detach: Cannot cancel remove event\n"); 8908 goto err_remove_event; 8909 } 8910 un->un_remove_event = NULL; 8911 } 8912 8913 /* Do not free the softstate if the callback routine is active */ 8914 sd_sync_with_callback(un); 8915 8916 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8917 cmlb_free_handle(&un->un_cmlbhandle); 8918 8919 /* 8920 * Hold the detach mutex here, to make sure that no other threads ever 8921 * can access a (partially) freed soft state structure. 8922 */ 8923 mutex_enter(&sd_detach_mutex); 8924 8925 /* 8926 * Clean up the soft state struct. 8927 * Cleanup is done in reverse order of allocs/inits. 8928 * At this point there should be no competing threads anymore. 8929 */ 8930 8931 scsi_fm_fini(devp); 8932 8933 /* 8934 * Deallocate memory for SCSI FMA. 8935 */ 8936 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8937 8938 /* 8939 * Unregister and free device id if it was not registered 8940 * by the transport. 8941 */ 8942 if (un->un_f_devid_transport_defined == FALSE) 8943 ddi_devid_unregister(devi); 8944 8945 /* 8946 * free the devid structure if allocated before (by ddi_devid_init() 8947 * or ddi_devid_get()). 8948 */ 8949 if (un->un_devid) { 8950 ddi_devid_free(un->un_devid); 8951 un->un_devid = NULL; 8952 } 8953 8954 /* 8955 * Destroy wmap cache if it exists. 8956 */ 8957 if (un->un_wm_cache != NULL) { 8958 kmem_cache_destroy(un->un_wm_cache); 8959 un->un_wm_cache = NULL; 8960 } 8961 8962 /* 8963 * kstat cleanup is done in detach for all device types (4363169). 8964 * We do not want to fail detach if the device kstats are not deleted 8965 * since there is a confusion about the devo_refcnt for the device. 8966 * We just delete the kstats and let detach complete successfully. 8967 */ 8968 if (un->un_stats != NULL) { 8969 kstat_delete(un->un_stats); 8970 un->un_stats = NULL; 8971 } 8972 if (un->un_errstats != NULL) { 8973 kstat_delete(un->un_errstats); 8974 un->un_errstats = NULL; 8975 } 8976 8977 /* Remove partition stats */ 8978 if (un->un_f_pkstats_enabled) { 8979 for (i = 0; i < NSDMAP; i++) { 8980 if (un->un_pstats[i] != NULL) { 8981 kstat_delete(un->un_pstats[i]); 8982 un->un_pstats[i] = NULL; 8983 } 8984 } 8985 } 8986 8987 /* Remove xbuf registration */ 8988 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8989 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8990 8991 /* Remove driver properties */ 8992 ddi_prop_remove_all(devi); 8993 8994 mutex_destroy(&un->un_pm_mutex); 8995 cv_destroy(&un->un_pm_busy_cv); 8996 8997 cv_destroy(&un->un_wcc_cv); 8998 8999 /* Open/close semaphore */ 9000 sema_destroy(&un->un_semoclose); 9001 9002 /* Removable media condvar. */ 9003 cv_destroy(&un->un_state_cv); 9004 9005 /* Suspend/resume condvar. */ 9006 cv_destroy(&un->un_suspend_cv); 9007 cv_destroy(&un->un_disk_busy_cv); 9008 9009 sd_free_rqs(un); 9010 9011 /* Free up soft state */ 9012 devp->sd_private = NULL; 9013 9014 bzero(un, sizeof (struct sd_lun)); 9015 #ifndef XPV_HVM_DRIVER 9016 ddi_soft_state_free(sd_state, instance); 9017 #endif /* !XPV_HVM_DRIVER */ 9018 9019 mutex_exit(&sd_detach_mutex); 9020 9021 /* This frees up the INQUIRY data associated with the device. */ 9022 scsi_unprobe(devp); 9023 9024 /* 9025 * After successfully detaching an instance, we update the information 9026 * of how many luns have been attached in the relative target and 9027 * controller for parallel SCSI. This information is used when sd tries 9028 * to set the tagged queuing capability in HBA. 9029 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 9030 * check if the device is parallel SCSI. However, we don't need to 9031 * check here because we've already checked during attach. No device 9032 * that is not parallel SCSI is in the chain. 9033 */ 9034 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 9035 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 9036 } 9037 9038 return (DDI_SUCCESS); 9039 9040 err_notclosed: 9041 mutex_exit(SD_MUTEX(un)); 9042 9043 err_stillbusy: 9044 _NOTE(NO_COMPETING_THREADS_NOW); 9045 9046 err_remove_event: 9047 mutex_enter(&sd_detach_mutex); 9048 un->un_detach_count--; 9049 mutex_exit(&sd_detach_mutex); 9050 9051 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9052 return (DDI_FAILURE); 9053 } 9054 9055 9056 /* 9057 * Function: sd_create_errstats 9058 * 9059 * Description: This routine instantiates the device error stats. 9060 * 9061 * Note: During attach the stats are instantiated first so they are 9062 * available for attach-time routines that utilize the driver 9063 * iopath to send commands to the device. The stats are initialized 9064 * separately so data obtained during some attach-time routines is 9065 * available. (4362483) 9066 * 9067 * Arguments: un - driver soft state (unit) structure 9068 * instance - driver instance 9069 * 9070 * Context: Kernel thread context 9071 */ 9072 9073 static void 9074 sd_create_errstats(struct sd_lun *un, int instance) 9075 { 9076 struct sd_errstats *stp; 9077 char kstatmodule_err[KSTAT_STRLEN]; 9078 char kstatname[KSTAT_STRLEN]; 9079 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9080 9081 ASSERT(un != NULL); 9082 9083 if (un->un_errstats != NULL) { 9084 return; 9085 } 9086 9087 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9088 "%serr", sd_label); 9089 (void) snprintf(kstatname, sizeof (kstatname), 9090 "%s%d,err", sd_label, instance); 9091 9092 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9093 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9094 9095 if (un->un_errstats == NULL) { 9096 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9097 "sd_create_errstats: Failed kstat_create\n"); 9098 return; 9099 } 9100 9101 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9102 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9103 KSTAT_DATA_UINT32); 9104 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9105 KSTAT_DATA_UINT32); 9106 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9107 KSTAT_DATA_UINT32); 9108 kstat_named_init(&stp->sd_vid, "Vendor", 9109 KSTAT_DATA_CHAR); 9110 kstat_named_init(&stp->sd_pid, "Product", 9111 KSTAT_DATA_CHAR); 9112 kstat_named_init(&stp->sd_revision, "Revision", 9113 KSTAT_DATA_CHAR); 9114 kstat_named_init(&stp->sd_serial, "Serial No", 9115 KSTAT_DATA_CHAR); 9116 kstat_named_init(&stp->sd_capacity, "Size", 9117 KSTAT_DATA_ULONGLONG); 9118 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9119 KSTAT_DATA_UINT32); 9120 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9121 KSTAT_DATA_UINT32); 9122 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9123 KSTAT_DATA_UINT32); 9124 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9125 KSTAT_DATA_UINT32); 9126 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9127 KSTAT_DATA_UINT32); 9128 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9129 KSTAT_DATA_UINT32); 9130 9131 un->un_errstats->ks_private = un; 9132 un->un_errstats->ks_update = nulldev; 9133 9134 kstat_install(un->un_errstats); 9135 } 9136 9137 9138 /* 9139 * Function: sd_set_errstats 9140 * 9141 * Description: This routine sets the value of the vendor id, product id, 9142 * revision, serial number, and capacity device error stats. 9143 * 9144 * Note: During attach the stats are instantiated first so they are 9145 * available for attach-time routines that utilize the driver 9146 * iopath to send commands to the device. The stats are initialized 9147 * separately so data obtained during some attach-time routines is 9148 * available. (4362483) 9149 * 9150 * Arguments: un - driver soft state (unit) structure 9151 * 9152 * Context: Kernel thread context 9153 */ 9154 9155 static void 9156 sd_set_errstats(struct sd_lun *un) 9157 { 9158 struct sd_errstats *stp; 9159 char *sn; 9160 9161 ASSERT(un != NULL); 9162 ASSERT(un->un_errstats != NULL); 9163 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9164 ASSERT(stp != NULL); 9165 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9166 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9167 (void) strncpy(stp->sd_revision.value.c, 9168 un->un_sd->sd_inq->inq_revision, 4); 9169 9170 /* 9171 * All the errstats are persistent across detach/attach, 9172 * so reset all the errstats here in case of the hot 9173 * replacement of disk drives, except for not changed 9174 * Sun qualified drives. 9175 */ 9176 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 9177 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9178 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 9179 stp->sd_softerrs.value.ui32 = 0; 9180 stp->sd_harderrs.value.ui32 = 0; 9181 stp->sd_transerrs.value.ui32 = 0; 9182 stp->sd_rq_media_err.value.ui32 = 0; 9183 stp->sd_rq_ntrdy_err.value.ui32 = 0; 9184 stp->sd_rq_nodev_err.value.ui32 = 0; 9185 stp->sd_rq_recov_err.value.ui32 = 0; 9186 stp->sd_rq_illrq_err.value.ui32 = 0; 9187 stp->sd_rq_pfa_err.value.ui32 = 0; 9188 } 9189 9190 /* 9191 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9192 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9193 * (4376302)) 9194 */ 9195 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9196 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9197 sizeof (SD_INQUIRY(un)->inq_serial)); 9198 } else { 9199 /* 9200 * Set the "Serial No" kstat for non-Sun qualified drives 9201 */ 9202 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un), 9203 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 9204 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) { 9205 (void) strlcpy(stp->sd_serial.value.c, sn, 9206 sizeof (stp->sd_serial.value.c)); 9207 ddi_prop_free(sn); 9208 } 9209 } 9210 9211 if (un->un_f_blockcount_is_valid != TRUE) { 9212 /* 9213 * Set capacity error stat to 0 for no media. This ensures 9214 * a valid capacity is displayed in response to 'iostat -E' 9215 * when no media is present in the device. 9216 */ 9217 stp->sd_capacity.value.ui64 = 0; 9218 } else { 9219 /* 9220 * Multiply un_blockcount by un->un_sys_blocksize to get 9221 * capacity. 9222 * 9223 * Note: for non-512 blocksize devices "un_blockcount" has been 9224 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9225 * (un_tgt_blocksize / un->un_sys_blocksize). 9226 */ 9227 stp->sd_capacity.value.ui64 = (uint64_t) 9228 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9229 } 9230 } 9231 9232 9233 /* 9234 * Function: sd_set_pstats 9235 * 9236 * Description: This routine instantiates and initializes the partition 9237 * stats for each partition with more than zero blocks. 9238 * (4363169) 9239 * 9240 * Arguments: un - driver soft state (unit) structure 9241 * 9242 * Context: Kernel thread context 9243 */ 9244 9245 static void 9246 sd_set_pstats(struct sd_lun *un) 9247 { 9248 char kstatname[KSTAT_STRLEN]; 9249 int instance; 9250 int i; 9251 diskaddr_t nblks = 0; 9252 char *partname = NULL; 9253 9254 ASSERT(un != NULL); 9255 9256 instance = ddi_get_instance(SD_DEVINFO(un)); 9257 9258 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9259 for (i = 0; i < NSDMAP; i++) { 9260 9261 if (cmlb_partinfo(un->un_cmlbhandle, i, 9262 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9263 continue; 9264 mutex_enter(SD_MUTEX(un)); 9265 9266 if ((un->un_pstats[i] == NULL) && 9267 (nblks != 0)) { 9268 9269 (void) snprintf(kstatname, sizeof (kstatname), 9270 "%s%d,%s", sd_label, instance, 9271 partname); 9272 9273 un->un_pstats[i] = kstat_create(sd_label, 9274 instance, kstatname, "partition", KSTAT_TYPE_IO, 9275 1, KSTAT_FLAG_PERSISTENT); 9276 if (un->un_pstats[i] != NULL) { 9277 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9278 kstat_install(un->un_pstats[i]); 9279 } 9280 } 9281 mutex_exit(SD_MUTEX(un)); 9282 } 9283 } 9284 9285 9286 #if (defined(__fibre)) 9287 /* 9288 * Function: sd_init_event_callbacks 9289 * 9290 * Description: This routine initializes the insertion and removal event 9291 * callbacks. (fibre only) 9292 * 9293 * Arguments: un - driver soft state (unit) structure 9294 * 9295 * Context: Kernel thread context 9296 */ 9297 9298 static void 9299 sd_init_event_callbacks(struct sd_lun *un) 9300 { 9301 ASSERT(un != NULL); 9302 9303 if ((un->un_insert_event == NULL) && 9304 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9305 &un->un_insert_event) == DDI_SUCCESS)) { 9306 /* 9307 * Add the callback for an insertion event 9308 */ 9309 (void) ddi_add_event_handler(SD_DEVINFO(un), 9310 un->un_insert_event, sd_event_callback, (void *)un, 9311 &(un->un_insert_cb_id)); 9312 } 9313 9314 if ((un->un_remove_event == NULL) && 9315 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9316 &un->un_remove_event) == DDI_SUCCESS)) { 9317 /* 9318 * Add the callback for a removal event 9319 */ 9320 (void) ddi_add_event_handler(SD_DEVINFO(un), 9321 un->un_remove_event, sd_event_callback, (void *)un, 9322 &(un->un_remove_cb_id)); 9323 } 9324 } 9325 9326 9327 /* 9328 * Function: sd_event_callback 9329 * 9330 * Description: This routine handles insert/remove events (photon). The 9331 * state is changed to OFFLINE which can be used to supress 9332 * error msgs. (fibre only) 9333 * 9334 * Arguments: un - driver soft state (unit) structure 9335 * 9336 * Context: Callout thread context 9337 */ 9338 /* ARGSUSED */ 9339 static void 9340 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9341 void *bus_impldata) 9342 { 9343 struct sd_lun *un = (struct sd_lun *)arg; 9344 9345 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9346 if (event == un->un_insert_event) { 9347 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9348 mutex_enter(SD_MUTEX(un)); 9349 if (un->un_state == SD_STATE_OFFLINE) { 9350 if (un->un_last_state != SD_STATE_SUSPENDED) { 9351 un->un_state = un->un_last_state; 9352 } else { 9353 /* 9354 * We have gone through SUSPEND/RESUME while 9355 * we were offline. Restore the last state 9356 */ 9357 un->un_state = un->un_save_state; 9358 } 9359 } 9360 mutex_exit(SD_MUTEX(un)); 9361 9362 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9363 } else if (event == un->un_remove_event) { 9364 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9365 mutex_enter(SD_MUTEX(un)); 9366 /* 9367 * We need to handle an event callback that occurs during 9368 * the suspend operation, since we don't prevent it. 9369 */ 9370 if (un->un_state != SD_STATE_OFFLINE) { 9371 if (un->un_state != SD_STATE_SUSPENDED) { 9372 New_state(un, SD_STATE_OFFLINE); 9373 } else { 9374 un->un_last_state = SD_STATE_OFFLINE; 9375 } 9376 } 9377 mutex_exit(SD_MUTEX(un)); 9378 } else { 9379 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9380 "!Unknown event\n"); 9381 } 9382 9383 } 9384 #endif 9385 9386 /* 9387 * Values related to caching mode page depending on whether the unit is ATAPI. 9388 */ 9389 #define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ 9390 CDB_GROUP1 : CDB_GROUP0) 9391 #define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ 9392 MODE_HEADER_LENGTH_GRP2 : MODE_HEADER_LENGTH) 9393 /* 9394 * Use mode_cache_scsi3 to ensure we get all of the mode sense data, otherwise 9395 * the mode select will fail (mode_cache_scsi3 is a superset of mode_caching). 9396 */ 9397 #define SDC_BUFLEN(un) (SDC_HDRLEN(un) + MODE_BLK_DESC_LENGTH + \ 9398 sizeof (struct mode_cache_scsi3)) 9399 9400 static int 9401 sd_get_caching_mode_page(sd_ssc_t *ssc, uchar_t page_control, uchar_t **header, 9402 int *bdlen) 9403 { 9404 struct sd_lun *un = ssc->ssc_un; 9405 struct mode_caching *mode_caching_page; 9406 size_t buflen = SDC_BUFLEN(un); 9407 int hdrlen = SDC_HDRLEN(un); 9408 int rval; 9409 9410 /* 9411 * Do a test unit ready, otherwise a mode sense may not work if this 9412 * is the first command sent to the device after boot. 9413 */ 9414 if (sd_send_scsi_TEST_UNIT_READY(ssc, 0) != 0) 9415 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9416 9417 /* 9418 * Allocate memory for the retrieved mode page and its headers. Set 9419 * a pointer to the page itself. 9420 */ 9421 *header = kmem_zalloc(buflen, KM_SLEEP); 9422 9423 /* Get the information from the device */ 9424 rval = sd_send_scsi_MODE_SENSE(ssc, SDC_CDB_GROUP(un), *header, buflen, 9425 page_control | MODEPAGE_CACHING, SD_PATH_DIRECT); 9426 if (rval != 0) { 9427 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, "%s: Mode Sense Failed\n", 9428 __func__); 9429 goto mode_sense_failed; 9430 } 9431 9432 /* 9433 * Determine size of Block Descriptors in order to locate 9434 * the mode page data. ATAPI devices return 0, SCSI devices 9435 * should return MODE_BLK_DESC_LENGTH. 9436 */ 9437 if (un->un_f_cfg_is_atapi == TRUE) { 9438 struct mode_header_grp2 *mhp = 9439 (struct mode_header_grp2 *)(*header); 9440 *bdlen = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9441 } else { 9442 *bdlen = ((struct mode_header *)(*header))->bdesc_length; 9443 } 9444 9445 if (*bdlen > MODE_BLK_DESC_LENGTH) { 9446 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9447 "%s: Mode Sense returned invalid block descriptor length\n", 9448 __func__); 9449 rval = EIO; 9450 goto mode_sense_failed; 9451 } 9452 9453 mode_caching_page = (struct mode_caching *)(*header + hdrlen + *bdlen); 9454 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9455 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9456 "%s: Mode Sense caching page code mismatch %d\n", 9457 __func__, mode_caching_page->mode_page.code); 9458 rval = EIO; 9459 } 9460 9461 mode_sense_failed: 9462 if (rval != 0) { 9463 kmem_free(*header, buflen); 9464 *header = NULL; 9465 *bdlen = 0; 9466 } 9467 return (rval); 9468 } 9469 9470 /* 9471 * Function: sd_cache_control() 9472 * 9473 * Description: This routine is the driver entry point for setting 9474 * read and write caching by modifying the WCE (write cache 9475 * enable) and RCD (read cache disable) bits of mode 9476 * page 8 (MODEPAGE_CACHING). 9477 * 9478 * Arguments: ssc - ssc contains pointer to driver soft state 9479 * (unit) structure for this target. 9480 * rcd_flag - flag for controlling the read cache 9481 * wce_flag - flag for controlling the write cache 9482 * 9483 * Return Code: EIO 9484 * code returned by sd_send_scsi_MODE_SENSE and 9485 * sd_send_scsi_MODE_SELECT 9486 * 9487 * Context: Kernel Thread 9488 */ 9489 9490 static int 9491 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9492 { 9493 struct sd_lun *un = ssc->ssc_un; 9494 struct mode_caching *mode_caching_page; 9495 uchar_t *header; 9496 size_t buflen = SDC_BUFLEN(un); 9497 int hdrlen = SDC_HDRLEN(un); 9498 int bdlen; 9499 int rval; 9500 9501 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen); 9502 switch (rval) { 9503 case 0: 9504 /* Check the relevant bits on successful mode sense */ 9505 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9506 bdlen); 9507 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9508 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9509 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9510 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9511 size_t sbuflen; 9512 uchar_t save_pg; 9513 9514 /* 9515 * Construct select buffer length based on the 9516 * length of the sense data returned. 9517 */ 9518 sbuflen = hdrlen + bdlen + sizeof (struct mode_page) + 9519 (int)mode_caching_page->mode_page.length; 9520 9521 /* Set the caching bits as requested */ 9522 if (rcd_flag == SD_CACHE_ENABLE) 9523 mode_caching_page->rcd = 0; 9524 else if (rcd_flag == SD_CACHE_DISABLE) 9525 mode_caching_page->rcd = 1; 9526 9527 if (wce_flag == SD_CACHE_ENABLE) 9528 mode_caching_page->wce = 1; 9529 else if (wce_flag == SD_CACHE_DISABLE) 9530 mode_caching_page->wce = 0; 9531 9532 /* 9533 * Save the page if the mode sense says the 9534 * drive supports it. 9535 */ 9536 save_pg = mode_caching_page->mode_page.ps ? 9537 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9538 9539 /* Clear reserved bits before mode select */ 9540 mode_caching_page->mode_page.ps = 0; 9541 9542 /* 9543 * Clear out mode header for mode select. 9544 * The rest of the retrieved page will be reused. 9545 */ 9546 bzero(header, hdrlen); 9547 9548 if (un->un_f_cfg_is_atapi == TRUE) { 9549 struct mode_header_grp2 *mhp = 9550 (struct mode_header_grp2 *)header; 9551 mhp->bdesc_length_hi = bdlen >> 8; 9552 mhp->bdesc_length_lo = (uchar_t)bdlen & 0xff; 9553 } else { 9554 ((struct mode_header *)header)->bdesc_length = 9555 bdlen; 9556 } 9557 9558 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9559 9560 /* Issue mode select to change the cache settings */ 9561 rval = sd_send_scsi_MODE_SELECT(ssc, SDC_CDB_GROUP(un), 9562 header, sbuflen, save_pg, SD_PATH_DIRECT); 9563 } 9564 kmem_free(header, buflen); 9565 break; 9566 case EIO: 9567 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9568 break; 9569 default: 9570 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9571 break; 9572 } 9573 9574 return (rval); 9575 } 9576 9577 9578 /* 9579 * Function: sd_get_write_cache_enabled() 9580 * 9581 * Description: This routine is the driver entry point for determining if write 9582 * caching is enabled. It examines the WCE (write cache enable) 9583 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field 9584 * bits set to MODEPAGE_CURRENT. 9585 * 9586 * Arguments: ssc - ssc contains pointer to driver soft state 9587 * (unit) structure for this target. 9588 * is_enabled - pointer to int where write cache enabled state 9589 * is returned (non-zero -> write cache enabled) 9590 * 9591 * Return Code: EIO 9592 * code returned by sd_send_scsi_MODE_SENSE 9593 * 9594 * Context: Kernel Thread 9595 * 9596 * NOTE: If ioctl is added to disable write cache, this sequence should 9597 * be followed so that no locking is required for accesses to 9598 * un->un_f_write_cache_enabled: 9599 * do mode select to clear wce 9600 * do synchronize cache to flush cache 9601 * set un->un_f_write_cache_enabled = FALSE 9602 * 9603 * Conversely, an ioctl to enable the write cache should be done 9604 * in this order: 9605 * set un->un_f_write_cache_enabled = TRUE 9606 * do mode select to set wce 9607 */ 9608 9609 static int 9610 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9611 { 9612 struct sd_lun *un = ssc->ssc_un; 9613 struct mode_caching *mode_caching_page; 9614 uchar_t *header; 9615 size_t buflen = SDC_BUFLEN(un); 9616 int hdrlen = SDC_HDRLEN(un); 9617 int bdlen; 9618 int rval; 9619 9620 /* In case of error, flag as enabled */ 9621 *is_enabled = TRUE; 9622 9623 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen); 9624 switch (rval) { 9625 case 0: 9626 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9627 bdlen); 9628 *is_enabled = mode_caching_page->wce; 9629 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9630 kmem_free(header, buflen); 9631 break; 9632 case EIO: { 9633 /* 9634 * Some disks do not support Mode Sense(6), we 9635 * should ignore this kind of error (sense key is 9636 * 0x5 - illegal request). 9637 */ 9638 uint8_t *sensep; 9639 int senlen; 9640 9641 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9642 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9643 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9644 9645 if (senlen > 0 && 9646 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9647 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9648 } else { 9649 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9650 } 9651 break; 9652 } 9653 default: 9654 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9655 break; 9656 } 9657 9658 return (rval); 9659 } 9660 9661 /* 9662 * Function: sd_get_write_cache_changeable() 9663 * 9664 * Description: This routine is the driver entry point for determining if write 9665 * caching is changeable. It examines the WCE (write cache enable) 9666 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field 9667 * bits set to MODEPAGE_CHANGEABLE. 9668 * 9669 * Arguments: ssc - ssc contains pointer to driver soft state 9670 * (unit) structure for this target. 9671 * is_changeable - pointer to int where write cache changeable 9672 * state is returned (non-zero -> write cache 9673 * changeable) 9674 * 9675 * Context: Kernel Thread 9676 */ 9677 9678 static void 9679 sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable) 9680 { 9681 struct sd_lun *un = ssc->ssc_un; 9682 struct mode_caching *mode_caching_page; 9683 uchar_t *header; 9684 size_t buflen = SDC_BUFLEN(un); 9685 int hdrlen = SDC_HDRLEN(un); 9686 int bdlen; 9687 int rval; 9688 9689 /* In case of error, flag as enabled */ 9690 *is_changeable = TRUE; 9691 9692 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CHANGEABLE, &header, 9693 &bdlen); 9694 switch (rval) { 9695 case 0: 9696 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9697 bdlen); 9698 *is_changeable = mode_caching_page->wce; 9699 kmem_free(header, buflen); 9700 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9701 break; 9702 case EIO: 9703 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9704 break; 9705 default: 9706 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9707 break; 9708 } 9709 } 9710 9711 /* 9712 * Function: sd_get_nv_sup() 9713 * 9714 * Description: This routine is the driver entry point for 9715 * determining whether non-volatile cache is supported. This 9716 * determination process works as follows: 9717 * 9718 * 1. sd first queries sd.conf on whether 9719 * suppress_cache_flush bit is set for this device. 9720 * 9721 * 2. if not there, then queries the internal disk table. 9722 * 9723 * 3. if either sd.conf or internal disk table specifies 9724 * cache flush be suppressed, we don't bother checking 9725 * NV_SUP bit. 9726 * 9727 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9728 * the optional INQUIRY VPD page 0x86. If the device 9729 * supports VPD page 0x86, sd examines the NV_SUP 9730 * (non-volatile cache support) bit in the INQUIRY VPD page 9731 * 0x86: 9732 * o If NV_SUP bit is set, sd assumes the device has a 9733 * non-volatile cache and set the 9734 * un_f_sync_nv_supported to TRUE. 9735 * o Otherwise cache is not non-volatile, 9736 * un_f_sync_nv_supported is set to FALSE. 9737 * 9738 * Arguments: un - driver soft state (unit) structure 9739 * 9740 * Return Code: 9741 * 9742 * Context: Kernel Thread 9743 */ 9744 9745 static void 9746 sd_get_nv_sup(sd_ssc_t *ssc) 9747 { 9748 int rval = 0; 9749 uchar_t *inq86 = NULL; 9750 size_t inq86_len = MAX_INQUIRY_SIZE; 9751 size_t inq86_resid = 0; 9752 struct dk_callback *dkc; 9753 struct sd_lun *un; 9754 9755 ASSERT(ssc != NULL); 9756 un = ssc->ssc_un; 9757 ASSERT(un != NULL); 9758 9759 mutex_enter(SD_MUTEX(un)); 9760 9761 /* 9762 * Be conservative on the device's support of 9763 * SYNC_NV bit: un_f_sync_nv_supported is 9764 * initialized to be false. 9765 */ 9766 un->un_f_sync_nv_supported = FALSE; 9767 9768 /* 9769 * If either sd.conf or internal disk table 9770 * specifies cache flush be suppressed, then 9771 * we don't bother checking NV_SUP bit. 9772 */ 9773 if (un->un_f_suppress_cache_flush == TRUE) { 9774 mutex_exit(SD_MUTEX(un)); 9775 return; 9776 } 9777 9778 if (sd_check_vpd_page_support(ssc) == 0 && 9779 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9780 mutex_exit(SD_MUTEX(un)); 9781 /* collect page 86 data if available */ 9782 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9783 9784 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9785 0x01, 0x86, &inq86_resid); 9786 9787 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9788 SD_TRACE(SD_LOG_COMMON, un, 9789 "sd_get_nv_sup: \ 9790 successfully get VPD page: %x \ 9791 PAGE LENGTH: %x BYTE 6: %x\n", 9792 inq86[1], inq86[3], inq86[6]); 9793 9794 mutex_enter(SD_MUTEX(un)); 9795 /* 9796 * check the value of NV_SUP bit: only if the device 9797 * reports NV_SUP bit to be 1, the 9798 * un_f_sync_nv_supported bit will be set to true. 9799 */ 9800 if (inq86[6] & SD_VPD_NV_SUP) { 9801 un->un_f_sync_nv_supported = TRUE; 9802 } 9803 mutex_exit(SD_MUTEX(un)); 9804 } else if (rval != 0) { 9805 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9806 } 9807 9808 kmem_free(inq86, inq86_len); 9809 } else { 9810 mutex_exit(SD_MUTEX(un)); 9811 } 9812 9813 /* 9814 * Send a SYNC CACHE command to check whether 9815 * SYNC_NV bit is supported. This command should have 9816 * un_f_sync_nv_supported set to correct value. 9817 */ 9818 mutex_enter(SD_MUTEX(un)); 9819 if (un->un_f_sync_nv_supported) { 9820 mutex_exit(SD_MUTEX(un)); 9821 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9822 dkc->dkc_flag = FLUSH_VOLATILE; 9823 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9824 9825 /* 9826 * Send a TEST UNIT READY command to the device. This should 9827 * clear any outstanding UNIT ATTENTION that may be present. 9828 */ 9829 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9830 if (rval != 0) 9831 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9832 9833 kmem_free(dkc, sizeof (struct dk_callback)); 9834 } else { 9835 mutex_exit(SD_MUTEX(un)); 9836 } 9837 9838 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9839 un_f_suppress_cache_flush is set to %d\n", 9840 un->un_f_suppress_cache_flush); 9841 } 9842 9843 /* 9844 * Function: sd_make_device 9845 * 9846 * Description: Utility routine to return the Solaris device number from 9847 * the data in the device's dev_info structure. 9848 * 9849 * Return Code: The Solaris device number 9850 * 9851 * Context: Any 9852 */ 9853 9854 static dev_t 9855 sd_make_device(dev_info_t *devi) 9856 { 9857 return (makedevice(ddi_driver_major(devi), 9858 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9859 } 9860 9861 9862 /* 9863 * Function: sd_pm_entry 9864 * 9865 * Description: Called at the start of a new command to manage power 9866 * and busy status of a device. This includes determining whether 9867 * the current power state of the device is sufficient for 9868 * performing the command or whether it must be changed. 9869 * The PM framework is notified appropriately. 9870 * Only with a return status of DDI_SUCCESS will the 9871 * component be busy to the framework. 9872 * 9873 * All callers of sd_pm_entry must check the return status 9874 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9875 * of DDI_FAILURE indicates the device failed to power up. 9876 * In this case un_pm_count has been adjusted so the result 9877 * on exit is still powered down, ie. count is less than 0. 9878 * Calling sd_pm_exit with this count value hits an ASSERT. 9879 * 9880 * Return Code: DDI_SUCCESS or DDI_FAILURE 9881 * 9882 * Context: Kernel thread context. 9883 */ 9884 9885 static int 9886 sd_pm_entry(struct sd_lun *un) 9887 { 9888 int return_status = DDI_SUCCESS; 9889 9890 ASSERT(!mutex_owned(SD_MUTEX(un))); 9891 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9892 9893 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9894 9895 if (un->un_f_pm_is_enabled == FALSE) { 9896 SD_TRACE(SD_LOG_IO_PM, un, 9897 "sd_pm_entry: exiting, PM not enabled\n"); 9898 return (return_status); 9899 } 9900 9901 /* 9902 * Just increment a counter if PM is enabled. On the transition from 9903 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9904 * the count with each IO and mark the device as idle when the count 9905 * hits 0. 9906 * 9907 * If the count is less than 0 the device is powered down. If a powered 9908 * down device is successfully powered up then the count must be 9909 * incremented to reflect the power up. Note that it'll get incremented 9910 * a second time to become busy. 9911 * 9912 * Because the following has the potential to change the device state 9913 * and must release the un_pm_mutex to do so, only one thread can be 9914 * allowed through at a time. 9915 */ 9916 9917 mutex_enter(&un->un_pm_mutex); 9918 while (un->un_pm_busy == TRUE) { 9919 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9920 } 9921 un->un_pm_busy = TRUE; 9922 9923 if (un->un_pm_count < 1) { 9924 9925 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9926 9927 /* 9928 * Indicate we are now busy so the framework won't attempt to 9929 * power down the device. This call will only fail if either 9930 * we passed a bad component number or the device has no 9931 * components. Neither of these should ever happen. 9932 */ 9933 mutex_exit(&un->un_pm_mutex); 9934 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9935 ASSERT(return_status == DDI_SUCCESS); 9936 9937 mutex_enter(&un->un_pm_mutex); 9938 9939 if (un->un_pm_count < 0) { 9940 mutex_exit(&un->un_pm_mutex); 9941 9942 SD_TRACE(SD_LOG_IO_PM, un, 9943 "sd_pm_entry: power up component\n"); 9944 9945 /* 9946 * pm_raise_power will cause sdpower to be called 9947 * which brings the device power level to the 9948 * desired state, If successful, un_pm_count and 9949 * un_power_level will be updated appropriately. 9950 */ 9951 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9952 SD_PM_STATE_ACTIVE(un)); 9953 9954 mutex_enter(&un->un_pm_mutex); 9955 9956 if (return_status != DDI_SUCCESS) { 9957 /* 9958 * Power up failed. 9959 * Idle the device and adjust the count 9960 * so the result on exit is that we're 9961 * still powered down, ie. count is less than 0. 9962 */ 9963 SD_TRACE(SD_LOG_IO_PM, un, 9964 "sd_pm_entry: power up failed," 9965 " idle the component\n"); 9966 9967 (void) pm_idle_component(SD_DEVINFO(un), 0); 9968 un->un_pm_count--; 9969 } else { 9970 /* 9971 * Device is powered up, verify the 9972 * count is non-negative. 9973 * This is debug only. 9974 */ 9975 ASSERT(un->un_pm_count == 0); 9976 } 9977 } 9978 9979 if (return_status == DDI_SUCCESS) { 9980 /* 9981 * For performance, now that the device has been tagged 9982 * as busy, and it's known to be powered up, update the 9983 * chain types to use jump tables that do not include 9984 * pm. This significantly lowers the overhead and 9985 * therefore improves performance. 9986 */ 9987 9988 mutex_exit(&un->un_pm_mutex); 9989 mutex_enter(SD_MUTEX(un)); 9990 SD_TRACE(SD_LOG_IO_PM, un, 9991 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9992 un->un_uscsi_chain_type); 9993 9994 if (un->un_f_non_devbsize_supported) { 9995 un->un_buf_chain_type = 9996 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9997 } else { 9998 un->un_buf_chain_type = 9999 SD_CHAIN_INFO_DISK_NO_PM; 10000 } 10001 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 10002 10003 SD_TRACE(SD_LOG_IO_PM, un, 10004 " changed uscsi_chain_type to %d\n", 10005 un->un_uscsi_chain_type); 10006 mutex_exit(SD_MUTEX(un)); 10007 mutex_enter(&un->un_pm_mutex); 10008 10009 if (un->un_pm_idle_timeid == NULL) { 10010 /* 300 ms. */ 10011 un->un_pm_idle_timeid = 10012 timeout(sd_pm_idletimeout_handler, un, 10013 (drv_usectohz((clock_t)300000))); 10014 /* 10015 * Include an extra call to busy which keeps the 10016 * device busy with-respect-to the PM layer 10017 * until the timer fires, at which time it'll 10018 * get the extra idle call. 10019 */ 10020 (void) pm_busy_component(SD_DEVINFO(un), 0); 10021 } 10022 } 10023 } 10024 un->un_pm_busy = FALSE; 10025 /* Next... */ 10026 cv_signal(&un->un_pm_busy_cv); 10027 10028 un->un_pm_count++; 10029 10030 SD_TRACE(SD_LOG_IO_PM, un, 10031 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 10032 10033 mutex_exit(&un->un_pm_mutex); 10034 10035 return (return_status); 10036 } 10037 10038 10039 /* 10040 * Function: sd_pm_exit 10041 * 10042 * Description: Called at the completion of a command to manage busy 10043 * status for the device. If the device becomes idle the 10044 * PM framework is notified. 10045 * 10046 * Context: Kernel thread context 10047 */ 10048 10049 static void 10050 sd_pm_exit(struct sd_lun *un) 10051 { 10052 ASSERT(!mutex_owned(SD_MUTEX(un))); 10053 ASSERT(!mutex_owned(&un->un_pm_mutex)); 10054 10055 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 10056 10057 /* 10058 * After attach the following flag is only read, so don't 10059 * take the penalty of acquiring a mutex for it. 10060 */ 10061 if (un->un_f_pm_is_enabled == TRUE) { 10062 10063 mutex_enter(&un->un_pm_mutex); 10064 un->un_pm_count--; 10065 10066 SD_TRACE(SD_LOG_IO_PM, un, 10067 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 10068 10069 ASSERT(un->un_pm_count >= 0); 10070 if (un->un_pm_count == 0) { 10071 mutex_exit(&un->un_pm_mutex); 10072 10073 SD_TRACE(SD_LOG_IO_PM, un, 10074 "sd_pm_exit: idle component\n"); 10075 10076 (void) pm_idle_component(SD_DEVINFO(un), 0); 10077 10078 } else { 10079 mutex_exit(&un->un_pm_mutex); 10080 } 10081 } 10082 10083 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 10084 } 10085 10086 10087 /* 10088 * Function: sdopen 10089 * 10090 * Description: Driver's open(9e) entry point function. 10091 * 10092 * Arguments: dev_i - pointer to device number 10093 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 10094 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10095 * cred_p - user credential pointer 10096 * 10097 * Return Code: EINVAL 10098 * ENXIO 10099 * EIO 10100 * EROFS 10101 * EBUSY 10102 * 10103 * Context: Kernel thread context 10104 */ 10105 /* ARGSUSED */ 10106 static int 10107 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 10108 { 10109 struct sd_lun *un; 10110 int nodelay; 10111 int part; 10112 uint64_t partmask; 10113 int instance; 10114 dev_t dev; 10115 int rval = EIO; 10116 diskaddr_t nblks = 0; 10117 diskaddr_t label_cap; 10118 10119 /* Validate the open type */ 10120 if (otyp >= OTYPCNT) { 10121 return (EINVAL); 10122 } 10123 10124 dev = *dev_p; 10125 instance = SDUNIT(dev); 10126 mutex_enter(&sd_detach_mutex); 10127 10128 /* 10129 * Fail the open if there is no softstate for the instance, or 10130 * if another thread somewhere is trying to detach the instance. 10131 */ 10132 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10133 (un->un_detach_count != 0)) { 10134 mutex_exit(&sd_detach_mutex); 10135 /* 10136 * The probe cache only needs to be cleared when open (9e) fails 10137 * with ENXIO (4238046). 10138 */ 10139 /* 10140 * un-conditionally clearing probe cache is ok with 10141 * separate sd/ssd binaries 10142 * x86 platform can be an issue with both parallel 10143 * and fibre in 1 binary 10144 */ 10145 sd_scsi_clear_probe_cache(); 10146 return (ENXIO); 10147 } 10148 10149 /* 10150 * The un_layer_count is to prevent another thread in specfs from 10151 * trying to detach the instance, which can happen when we are 10152 * called from a higher-layer driver instead of thru specfs. 10153 * This will not be needed when DDI provides a layered driver 10154 * interface that allows specfs to know that an instance is in 10155 * use by a layered driver & should not be detached. 10156 * 10157 * Note: the semantics for layered driver opens are exactly one 10158 * close for every open. 10159 */ 10160 if (otyp == OTYP_LYR) { 10161 un->un_layer_count++; 10162 } 10163 10164 /* 10165 * Keep a count of the current # of opens in progress. This is because 10166 * some layered drivers try to call us as a regular open. This can 10167 * cause problems that we cannot prevent, however by keeping this count 10168 * we can at least keep our open and detach routines from racing against 10169 * each other under such conditions. 10170 */ 10171 un->un_opens_in_progress++; 10172 mutex_exit(&sd_detach_mutex); 10173 10174 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10175 part = SDPART(dev); 10176 partmask = 1 << part; 10177 10178 /* 10179 * We use a semaphore here in order to serialize 10180 * open and close requests on the device. 10181 */ 10182 sema_p(&un->un_semoclose); 10183 10184 mutex_enter(SD_MUTEX(un)); 10185 10186 /* 10187 * All device accesses go thru sdstrategy() where we check 10188 * on suspend status but there could be a scsi_poll command, 10189 * which bypasses sdstrategy(), so we need to check pm 10190 * status. 10191 */ 10192 10193 if (!nodelay) { 10194 while ((un->un_state == SD_STATE_SUSPENDED) || 10195 (un->un_state == SD_STATE_PM_CHANGING)) { 10196 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10197 } 10198 10199 mutex_exit(SD_MUTEX(un)); 10200 if (sd_pm_entry(un) != DDI_SUCCESS) { 10201 rval = EIO; 10202 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10203 "sdopen: sd_pm_entry failed\n"); 10204 goto open_failed_with_pm; 10205 } 10206 mutex_enter(SD_MUTEX(un)); 10207 } 10208 10209 /* check for previous exclusive open */ 10210 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10211 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10212 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10213 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10214 10215 if (un->un_exclopen & (partmask)) { 10216 goto excl_open_fail; 10217 } 10218 10219 if (flag & FEXCL) { 10220 int i; 10221 if (un->un_ocmap.lyropen[part]) { 10222 goto excl_open_fail; 10223 } 10224 for (i = 0; i < (OTYPCNT - 1); i++) { 10225 if (un->un_ocmap.regopen[i] & (partmask)) { 10226 goto excl_open_fail; 10227 } 10228 } 10229 } 10230 10231 /* 10232 * Check the write permission if this is a removable media device, 10233 * NDELAY has not been set, and writable permission is requested. 10234 * 10235 * Note: If NDELAY was set and this is write-protected media the WRITE 10236 * attempt will fail with EIO as part of the I/O processing. This is a 10237 * more permissive implementation that allows the open to succeed and 10238 * WRITE attempts to fail when appropriate. 10239 */ 10240 if (un->un_f_chk_wp_open) { 10241 if ((flag & FWRITE) && (!nodelay)) { 10242 mutex_exit(SD_MUTEX(un)); 10243 /* 10244 * Defer the check for write permission on writable 10245 * DVD drive till sdstrategy and will not fail open even 10246 * if FWRITE is set as the device can be writable 10247 * depending upon the media and the media can change 10248 * after the call to open(). 10249 */ 10250 if (un->un_f_dvdram_writable_device == FALSE) { 10251 if (ISCD(un) || sr_check_wp(dev)) { 10252 rval = EROFS; 10253 mutex_enter(SD_MUTEX(un)); 10254 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10255 "write to cd or write protected media\n"); 10256 goto open_fail; 10257 } 10258 } 10259 mutex_enter(SD_MUTEX(un)); 10260 } 10261 } 10262 10263 /* 10264 * If opening in NDELAY/NONBLOCK mode, just return. 10265 * Check if disk is ready and has a valid geometry later. 10266 */ 10267 if (!nodelay) { 10268 sd_ssc_t *ssc; 10269 10270 mutex_exit(SD_MUTEX(un)); 10271 ssc = sd_ssc_init(un); 10272 rval = sd_ready_and_valid(ssc, part); 10273 sd_ssc_fini(ssc); 10274 mutex_enter(SD_MUTEX(un)); 10275 /* 10276 * Fail if device is not ready or if the number of disk 10277 * blocks is zero or negative for non CD devices. 10278 */ 10279 10280 nblks = 0; 10281 10282 if (rval == SD_READY_VALID && (!ISCD(un))) { 10283 /* if cmlb_partinfo fails, nblks remains 0 */ 10284 mutex_exit(SD_MUTEX(un)); 10285 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10286 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10287 mutex_enter(SD_MUTEX(un)); 10288 } 10289 10290 if ((rval != SD_READY_VALID) || 10291 (!ISCD(un) && nblks <= 0)) { 10292 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10293 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10294 "device not ready or invalid disk block value\n"); 10295 goto open_fail; 10296 } 10297 #if defined(__i386) || defined(__amd64) 10298 } else { 10299 uchar_t *cp; 10300 /* 10301 * x86 requires special nodelay handling, so that p0 is 10302 * always defined and accessible. 10303 * Invalidate geometry only if device is not already open. 10304 */ 10305 cp = &un->un_ocmap.chkd[0]; 10306 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10307 if (*cp != (uchar_t)0) { 10308 break; 10309 } 10310 cp++; 10311 } 10312 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10313 mutex_exit(SD_MUTEX(un)); 10314 cmlb_invalidate(un->un_cmlbhandle, 10315 (void *)SD_PATH_DIRECT); 10316 mutex_enter(SD_MUTEX(un)); 10317 } 10318 10319 #endif 10320 } 10321 10322 if (otyp == OTYP_LYR) { 10323 un->un_ocmap.lyropen[part]++; 10324 } else { 10325 un->un_ocmap.regopen[otyp] |= partmask; 10326 } 10327 10328 /* Set up open and exclusive open flags */ 10329 if (flag & FEXCL) { 10330 un->un_exclopen |= (partmask); 10331 } 10332 10333 /* 10334 * If the lun is EFI labeled and lun capacity is greater than the 10335 * capacity contained in the label, log a sys-event to notify the 10336 * interested module. 10337 * To avoid an infinite loop of logging sys-event, we only log the 10338 * event when the lun is not opened in NDELAY mode. The event handler 10339 * should open the lun in NDELAY mode. 10340 */ 10341 if (!nodelay) { 10342 mutex_exit(SD_MUTEX(un)); 10343 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10344 (void*)SD_PATH_DIRECT) == 0) { 10345 mutex_enter(SD_MUTEX(un)); 10346 if (un->un_f_blockcount_is_valid && 10347 un->un_blockcount > label_cap && 10348 un->un_f_expnevent == B_FALSE) { 10349 un->un_f_expnevent = B_TRUE; 10350 mutex_exit(SD_MUTEX(un)); 10351 sd_log_lun_expansion_event(un, 10352 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10353 mutex_enter(SD_MUTEX(un)); 10354 } 10355 } else { 10356 mutex_enter(SD_MUTEX(un)); 10357 } 10358 } 10359 10360 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10361 "open of part %d type %d\n", part, otyp); 10362 10363 mutex_exit(SD_MUTEX(un)); 10364 if (!nodelay) { 10365 sd_pm_exit(un); 10366 } 10367 10368 sema_v(&un->un_semoclose); 10369 10370 mutex_enter(&sd_detach_mutex); 10371 un->un_opens_in_progress--; 10372 mutex_exit(&sd_detach_mutex); 10373 10374 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10375 return (DDI_SUCCESS); 10376 10377 excl_open_fail: 10378 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10379 rval = EBUSY; 10380 10381 open_fail: 10382 mutex_exit(SD_MUTEX(un)); 10383 10384 /* 10385 * On a failed open we must exit the pm management. 10386 */ 10387 if (!nodelay) { 10388 sd_pm_exit(un); 10389 } 10390 open_failed_with_pm: 10391 sema_v(&un->un_semoclose); 10392 10393 mutex_enter(&sd_detach_mutex); 10394 un->un_opens_in_progress--; 10395 if (otyp == OTYP_LYR) { 10396 un->un_layer_count--; 10397 } 10398 mutex_exit(&sd_detach_mutex); 10399 10400 return (rval); 10401 } 10402 10403 10404 /* 10405 * Function: sdclose 10406 * 10407 * Description: Driver's close(9e) entry point function. 10408 * 10409 * Arguments: dev - device number 10410 * flag - file status flag, informational only 10411 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10412 * cred_p - user credential pointer 10413 * 10414 * Return Code: ENXIO 10415 * 10416 * Context: Kernel thread context 10417 */ 10418 /* ARGSUSED */ 10419 static int 10420 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10421 { 10422 struct sd_lun *un; 10423 uchar_t *cp; 10424 int part; 10425 int nodelay; 10426 int rval = 0; 10427 10428 /* Validate the open type */ 10429 if (otyp >= OTYPCNT) { 10430 return (ENXIO); 10431 } 10432 10433 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10434 return (ENXIO); 10435 } 10436 10437 part = SDPART(dev); 10438 nodelay = flag & (FNDELAY | FNONBLOCK); 10439 10440 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10441 "sdclose: close of part %d type %d\n", part, otyp); 10442 10443 /* 10444 * We use a semaphore here in order to serialize 10445 * open and close requests on the device. 10446 */ 10447 sema_p(&un->un_semoclose); 10448 10449 mutex_enter(SD_MUTEX(un)); 10450 10451 /* Don't proceed if power is being changed. */ 10452 while (un->un_state == SD_STATE_PM_CHANGING) { 10453 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10454 } 10455 10456 if (un->un_exclopen & (1 << part)) { 10457 un->un_exclopen &= ~(1 << part); 10458 } 10459 10460 /* Update the open partition map */ 10461 if (otyp == OTYP_LYR) { 10462 un->un_ocmap.lyropen[part] -= 1; 10463 } else { 10464 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10465 } 10466 10467 cp = &un->un_ocmap.chkd[0]; 10468 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10469 if (*cp != NULL) { 10470 break; 10471 } 10472 cp++; 10473 } 10474 10475 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10476 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10477 10478 /* 10479 * We avoid persistance upon the last close, and set 10480 * the throttle back to the maximum. 10481 */ 10482 un->un_throttle = un->un_saved_throttle; 10483 10484 if (un->un_state == SD_STATE_OFFLINE) { 10485 if (un->un_f_is_fibre == FALSE) { 10486 scsi_log(SD_DEVINFO(un), sd_label, 10487 CE_WARN, "offline\n"); 10488 } 10489 mutex_exit(SD_MUTEX(un)); 10490 cmlb_invalidate(un->un_cmlbhandle, 10491 (void *)SD_PATH_DIRECT); 10492 mutex_enter(SD_MUTEX(un)); 10493 10494 } else { 10495 /* 10496 * Flush any outstanding writes in NVRAM cache. 10497 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10498 * cmd, it may not work for non-Pluto devices. 10499 * SYNCHRONIZE CACHE is not required for removables, 10500 * except DVD-RAM drives. 10501 * 10502 * Also note: because SYNCHRONIZE CACHE is currently 10503 * the only command issued here that requires the 10504 * drive be powered up, only do the power up before 10505 * sending the Sync Cache command. If additional 10506 * commands are added which require a powered up 10507 * drive, the following sequence may have to change. 10508 * 10509 * And finally, note that parallel SCSI on SPARC 10510 * only issues a Sync Cache to DVD-RAM, a newly 10511 * supported device. 10512 */ 10513 #if defined(__i386) || defined(__amd64) 10514 if ((un->un_f_sync_cache_supported && 10515 un->un_f_sync_cache_required) || 10516 un->un_f_dvdram_writable_device == TRUE) { 10517 #else 10518 if (un->un_f_dvdram_writable_device == TRUE) { 10519 #endif 10520 mutex_exit(SD_MUTEX(un)); 10521 if (sd_pm_entry(un) == DDI_SUCCESS) { 10522 rval = 10523 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10524 NULL); 10525 /* ignore error if not supported */ 10526 if (rval == ENOTSUP) { 10527 rval = 0; 10528 } else if (rval != 0) { 10529 rval = EIO; 10530 } 10531 sd_pm_exit(un); 10532 } else { 10533 rval = EIO; 10534 } 10535 mutex_enter(SD_MUTEX(un)); 10536 } 10537 10538 /* 10539 * For devices which supports DOOR_LOCK, send an ALLOW 10540 * MEDIA REMOVAL command, but don't get upset if it 10541 * fails. We need to raise the power of the drive before 10542 * we can call sd_send_scsi_DOORLOCK() 10543 */ 10544 if (un->un_f_doorlock_supported) { 10545 mutex_exit(SD_MUTEX(un)); 10546 if (sd_pm_entry(un) == DDI_SUCCESS) { 10547 sd_ssc_t *ssc; 10548 10549 ssc = sd_ssc_init(un); 10550 rval = sd_send_scsi_DOORLOCK(ssc, 10551 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10552 if (rval != 0) 10553 sd_ssc_assessment(ssc, 10554 SD_FMT_IGNORE); 10555 sd_ssc_fini(ssc); 10556 10557 sd_pm_exit(un); 10558 if (ISCD(un) && (rval != 0) && 10559 (nodelay != 0)) { 10560 rval = ENXIO; 10561 } 10562 } else { 10563 rval = EIO; 10564 } 10565 mutex_enter(SD_MUTEX(un)); 10566 } 10567 10568 /* 10569 * If a device has removable media, invalidate all 10570 * parameters related to media, such as geometry, 10571 * blocksize, and blockcount. 10572 */ 10573 if (un->un_f_has_removable_media) { 10574 sr_ejected(un); 10575 } 10576 10577 /* 10578 * Destroy the cache (if it exists) which was 10579 * allocated for the write maps since this is 10580 * the last close for this media. 10581 */ 10582 if (un->un_wm_cache) { 10583 /* 10584 * Check if there are pending commands. 10585 * and if there are give a warning and 10586 * do not destroy the cache. 10587 */ 10588 if (un->un_ncmds_in_driver > 0) { 10589 scsi_log(SD_DEVINFO(un), 10590 sd_label, CE_WARN, 10591 "Unable to clean up memory " 10592 "because of pending I/O\n"); 10593 } else { 10594 kmem_cache_destroy( 10595 un->un_wm_cache); 10596 un->un_wm_cache = NULL; 10597 } 10598 } 10599 } 10600 } 10601 10602 mutex_exit(SD_MUTEX(un)); 10603 sema_v(&un->un_semoclose); 10604 10605 if (otyp == OTYP_LYR) { 10606 mutex_enter(&sd_detach_mutex); 10607 /* 10608 * The detach routine may run when the layer count 10609 * drops to zero. 10610 */ 10611 un->un_layer_count--; 10612 mutex_exit(&sd_detach_mutex); 10613 } 10614 10615 return (rval); 10616 } 10617 10618 10619 /* 10620 * Function: sd_ready_and_valid 10621 * 10622 * Description: Test if device is ready and has a valid geometry. 10623 * 10624 * Arguments: ssc - sd_ssc_t will contain un 10625 * un - driver soft state (unit) structure 10626 * 10627 * Return Code: SD_READY_VALID ready and valid label 10628 * SD_NOT_READY_VALID not ready, no label 10629 * SD_RESERVED_BY_OTHERS reservation conflict 10630 * 10631 * Context: Never called at interrupt context. 10632 */ 10633 10634 static int 10635 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10636 { 10637 struct sd_errstats *stp; 10638 uint64_t capacity; 10639 uint_t lbasize; 10640 int rval = SD_READY_VALID; 10641 char name_str[48]; 10642 boolean_t is_valid; 10643 struct sd_lun *un; 10644 int status; 10645 10646 ASSERT(ssc != NULL); 10647 un = ssc->ssc_un; 10648 ASSERT(un != NULL); 10649 ASSERT(!mutex_owned(SD_MUTEX(un))); 10650 10651 mutex_enter(SD_MUTEX(un)); 10652 /* 10653 * If a device has removable media, we must check if media is 10654 * ready when checking if this device is ready and valid. 10655 */ 10656 if (un->un_f_has_removable_media) { 10657 mutex_exit(SD_MUTEX(un)); 10658 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10659 10660 if (status != 0) { 10661 rval = SD_NOT_READY_VALID; 10662 mutex_enter(SD_MUTEX(un)); 10663 10664 /* Ignore all failed status for removalbe media */ 10665 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10666 10667 goto done; 10668 } 10669 10670 is_valid = SD_IS_VALID_LABEL(un); 10671 mutex_enter(SD_MUTEX(un)); 10672 if (!is_valid || 10673 (un->un_f_blockcount_is_valid == FALSE) || 10674 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10675 10676 /* capacity has to be read every open. */ 10677 mutex_exit(SD_MUTEX(un)); 10678 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10679 &lbasize, SD_PATH_DIRECT); 10680 10681 if (status != 0) { 10682 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10683 10684 cmlb_invalidate(un->un_cmlbhandle, 10685 (void *)SD_PATH_DIRECT); 10686 mutex_enter(SD_MUTEX(un)); 10687 rval = SD_NOT_READY_VALID; 10688 10689 goto done; 10690 } else { 10691 mutex_enter(SD_MUTEX(un)); 10692 sd_update_block_info(un, lbasize, capacity); 10693 } 10694 } 10695 10696 /* 10697 * Check if the media in the device is writable or not. 10698 */ 10699 if (!is_valid && ISCD(un)) { 10700 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10701 } 10702 10703 } else { 10704 /* 10705 * Do a test unit ready to clear any unit attention from non-cd 10706 * devices. 10707 */ 10708 mutex_exit(SD_MUTEX(un)); 10709 10710 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10711 if (status != 0) { 10712 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10713 } 10714 10715 mutex_enter(SD_MUTEX(un)); 10716 } 10717 10718 10719 /* 10720 * If this is a non 512 block device, allocate space for 10721 * the wmap cache. This is being done here since every time 10722 * a media is changed this routine will be called and the 10723 * block size is a function of media rather than device. 10724 */ 10725 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10726 un->un_f_non_devbsize_supported) && 10727 un->un_tgt_blocksize != DEV_BSIZE) || 10728 un->un_f_enable_rmw) { 10729 if (!(un->un_wm_cache)) { 10730 (void) snprintf(name_str, sizeof (name_str), 10731 "%s%d_cache", 10732 ddi_driver_name(SD_DEVINFO(un)), 10733 ddi_get_instance(SD_DEVINFO(un))); 10734 un->un_wm_cache = kmem_cache_create( 10735 name_str, sizeof (struct sd_w_map), 10736 8, sd_wm_cache_constructor, 10737 sd_wm_cache_destructor, NULL, 10738 (void *)un, NULL, 0); 10739 if (!(un->un_wm_cache)) { 10740 rval = ENOMEM; 10741 goto done; 10742 } 10743 } 10744 } 10745 10746 if (un->un_state == SD_STATE_NORMAL) { 10747 /* 10748 * If the target is not yet ready here (defined by a TUR 10749 * failure), invalidate the geometry and print an 'offline' 10750 * message. This is a legacy message, as the state of the 10751 * target is not actually changed to SD_STATE_OFFLINE. 10752 * 10753 * If the TUR fails for EACCES (Reservation Conflict), 10754 * SD_RESERVED_BY_OTHERS will be returned to indicate 10755 * reservation conflict. If the TUR fails for other 10756 * reasons, SD_NOT_READY_VALID will be returned. 10757 */ 10758 int err; 10759 10760 mutex_exit(SD_MUTEX(un)); 10761 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10762 mutex_enter(SD_MUTEX(un)); 10763 10764 if (err != 0) { 10765 mutex_exit(SD_MUTEX(un)); 10766 cmlb_invalidate(un->un_cmlbhandle, 10767 (void *)SD_PATH_DIRECT); 10768 mutex_enter(SD_MUTEX(un)); 10769 if (err == EACCES) { 10770 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10771 "reservation conflict\n"); 10772 rval = SD_RESERVED_BY_OTHERS; 10773 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10774 } else { 10775 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10776 "drive offline\n"); 10777 rval = SD_NOT_READY_VALID; 10778 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10779 } 10780 goto done; 10781 } 10782 } 10783 10784 if (un->un_f_format_in_progress == FALSE) { 10785 mutex_exit(SD_MUTEX(un)); 10786 10787 (void) cmlb_validate(un->un_cmlbhandle, 0, 10788 (void *)SD_PATH_DIRECT); 10789 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10790 NULL, (void *) SD_PATH_DIRECT) != 0) { 10791 rval = SD_NOT_READY_VALID; 10792 mutex_enter(SD_MUTEX(un)); 10793 10794 goto done; 10795 } 10796 if (un->un_f_pkstats_enabled) { 10797 sd_set_pstats(un); 10798 SD_TRACE(SD_LOG_IO_PARTITION, un, 10799 "sd_ready_and_valid: un:0x%p pstats created and " 10800 "set\n", un); 10801 } 10802 mutex_enter(SD_MUTEX(un)); 10803 } 10804 10805 /* 10806 * If this device supports DOOR_LOCK command, try and send 10807 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10808 * if it fails. For a CD, however, it is an error 10809 */ 10810 if (un->un_f_doorlock_supported) { 10811 mutex_exit(SD_MUTEX(un)); 10812 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10813 SD_PATH_DIRECT); 10814 10815 if ((status != 0) && ISCD(un)) { 10816 rval = SD_NOT_READY_VALID; 10817 mutex_enter(SD_MUTEX(un)); 10818 10819 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10820 10821 goto done; 10822 } else if (status != 0) 10823 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10824 mutex_enter(SD_MUTEX(un)); 10825 } 10826 10827 /* The state has changed, inform the media watch routines */ 10828 un->un_mediastate = DKIO_INSERTED; 10829 cv_broadcast(&un->un_state_cv); 10830 rval = SD_READY_VALID; 10831 10832 done: 10833 10834 /* 10835 * Initialize the capacity kstat value, if no media previously 10836 * (capacity kstat is 0) and a media has been inserted 10837 * (un_blockcount > 0). 10838 */ 10839 if (un->un_errstats != NULL) { 10840 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10841 if ((stp->sd_capacity.value.ui64 == 0) && 10842 (un->un_f_blockcount_is_valid == TRUE)) { 10843 stp->sd_capacity.value.ui64 = 10844 (uint64_t)((uint64_t)un->un_blockcount * 10845 un->un_sys_blocksize); 10846 } 10847 } 10848 10849 mutex_exit(SD_MUTEX(un)); 10850 return (rval); 10851 } 10852 10853 10854 /* 10855 * Function: sdmin 10856 * 10857 * Description: Routine to limit the size of a data transfer. Used in 10858 * conjunction with physio(9F). 10859 * 10860 * Arguments: bp - pointer to the indicated buf(9S) struct. 10861 * 10862 * Context: Kernel thread context. 10863 */ 10864 10865 static void 10866 sdmin(struct buf *bp) 10867 { 10868 struct sd_lun *un; 10869 int instance; 10870 10871 instance = SDUNIT(bp->b_edev); 10872 10873 un = ddi_get_soft_state(sd_state, instance); 10874 ASSERT(un != NULL); 10875 10876 /* 10877 * We depend on buf breakup to restrict 10878 * IO size if it is enabled. 10879 */ 10880 if (un->un_buf_breakup_supported) { 10881 return; 10882 } 10883 10884 if (bp->b_bcount > un->un_max_xfer_size) { 10885 bp->b_bcount = un->un_max_xfer_size; 10886 } 10887 } 10888 10889 10890 /* 10891 * Function: sdread 10892 * 10893 * Description: Driver's read(9e) entry point function. 10894 * 10895 * Arguments: dev - device number 10896 * uio - structure pointer describing where data is to be stored 10897 * in user's space 10898 * cred_p - user credential pointer 10899 * 10900 * Return Code: ENXIO 10901 * EIO 10902 * EINVAL 10903 * value returned by physio 10904 * 10905 * Context: Kernel thread context. 10906 */ 10907 /* ARGSUSED */ 10908 static int 10909 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10910 { 10911 struct sd_lun *un = NULL; 10912 int secmask; 10913 int err = 0; 10914 sd_ssc_t *ssc; 10915 10916 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10917 return (ENXIO); 10918 } 10919 10920 ASSERT(!mutex_owned(SD_MUTEX(un))); 10921 10922 10923 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10924 mutex_enter(SD_MUTEX(un)); 10925 /* 10926 * Because the call to sd_ready_and_valid will issue I/O we 10927 * must wait here if either the device is suspended or 10928 * if it's power level is changing. 10929 */ 10930 while ((un->un_state == SD_STATE_SUSPENDED) || 10931 (un->un_state == SD_STATE_PM_CHANGING)) { 10932 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10933 } 10934 un->un_ncmds_in_driver++; 10935 mutex_exit(SD_MUTEX(un)); 10936 10937 /* Initialize sd_ssc_t for internal uscsi commands */ 10938 ssc = sd_ssc_init(un); 10939 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10940 err = EIO; 10941 } else { 10942 err = 0; 10943 } 10944 sd_ssc_fini(ssc); 10945 10946 mutex_enter(SD_MUTEX(un)); 10947 un->un_ncmds_in_driver--; 10948 ASSERT(un->un_ncmds_in_driver >= 0); 10949 mutex_exit(SD_MUTEX(un)); 10950 if (err != 0) 10951 return (err); 10952 } 10953 10954 /* 10955 * Read requests are restricted to multiples of the system block size. 10956 */ 10957 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 10958 !un->un_f_enable_rmw) 10959 secmask = un->un_tgt_blocksize - 1; 10960 else 10961 secmask = DEV_BSIZE - 1; 10962 10963 if (uio->uio_loffset & ((offset_t)(secmask))) { 10964 SD_ERROR(SD_LOG_READ_WRITE, un, 10965 "sdread: file offset not modulo %d\n", 10966 secmask + 1); 10967 err = EINVAL; 10968 } else if (uio->uio_iov->iov_len & (secmask)) { 10969 SD_ERROR(SD_LOG_READ_WRITE, un, 10970 "sdread: transfer length not modulo %d\n", 10971 secmask + 1); 10972 err = EINVAL; 10973 } else { 10974 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10975 } 10976 10977 return (err); 10978 } 10979 10980 10981 /* 10982 * Function: sdwrite 10983 * 10984 * Description: Driver's write(9e) entry point function. 10985 * 10986 * Arguments: dev - device number 10987 * uio - structure pointer describing where data is stored in 10988 * user's space 10989 * cred_p - user credential pointer 10990 * 10991 * Return Code: ENXIO 10992 * EIO 10993 * EINVAL 10994 * value returned by physio 10995 * 10996 * Context: Kernel thread context. 10997 */ 10998 /* ARGSUSED */ 10999 static int 11000 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 11001 { 11002 struct sd_lun *un = NULL; 11003 int secmask; 11004 int err = 0; 11005 sd_ssc_t *ssc; 11006 11007 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11008 return (ENXIO); 11009 } 11010 11011 ASSERT(!mutex_owned(SD_MUTEX(un))); 11012 11013 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11014 mutex_enter(SD_MUTEX(un)); 11015 /* 11016 * Because the call to sd_ready_and_valid will issue I/O we 11017 * must wait here if either the device is suspended or 11018 * if it's power level is changing. 11019 */ 11020 while ((un->un_state == SD_STATE_SUSPENDED) || 11021 (un->un_state == SD_STATE_PM_CHANGING)) { 11022 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11023 } 11024 un->un_ncmds_in_driver++; 11025 mutex_exit(SD_MUTEX(un)); 11026 11027 /* Initialize sd_ssc_t for internal uscsi commands */ 11028 ssc = sd_ssc_init(un); 11029 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11030 err = EIO; 11031 } else { 11032 err = 0; 11033 } 11034 sd_ssc_fini(ssc); 11035 11036 mutex_enter(SD_MUTEX(un)); 11037 un->un_ncmds_in_driver--; 11038 ASSERT(un->un_ncmds_in_driver >= 0); 11039 mutex_exit(SD_MUTEX(un)); 11040 if (err != 0) 11041 return (err); 11042 } 11043 11044 /* 11045 * Write requests are restricted to multiples of the system block size. 11046 */ 11047 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11048 !un->un_f_enable_rmw) 11049 secmask = un->un_tgt_blocksize - 1; 11050 else 11051 secmask = DEV_BSIZE - 1; 11052 11053 if (uio->uio_loffset & ((offset_t)(secmask))) { 11054 SD_ERROR(SD_LOG_READ_WRITE, un, 11055 "sdwrite: file offset not modulo %d\n", 11056 secmask + 1); 11057 err = EINVAL; 11058 } else if (uio->uio_iov->iov_len & (secmask)) { 11059 SD_ERROR(SD_LOG_READ_WRITE, un, 11060 "sdwrite: transfer length not modulo %d\n", 11061 secmask + 1); 11062 err = EINVAL; 11063 } else { 11064 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 11065 } 11066 11067 return (err); 11068 } 11069 11070 11071 /* 11072 * Function: sdaread 11073 * 11074 * Description: Driver's aread(9e) entry point function. 11075 * 11076 * Arguments: dev - device number 11077 * aio - structure pointer describing where data is to be stored 11078 * cred_p - user credential pointer 11079 * 11080 * Return Code: ENXIO 11081 * EIO 11082 * EINVAL 11083 * value returned by aphysio 11084 * 11085 * Context: Kernel thread context. 11086 */ 11087 /* ARGSUSED */ 11088 static int 11089 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11090 { 11091 struct sd_lun *un = NULL; 11092 struct uio *uio = aio->aio_uio; 11093 int secmask; 11094 int err = 0; 11095 sd_ssc_t *ssc; 11096 11097 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11098 return (ENXIO); 11099 } 11100 11101 ASSERT(!mutex_owned(SD_MUTEX(un))); 11102 11103 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11104 mutex_enter(SD_MUTEX(un)); 11105 /* 11106 * Because the call to sd_ready_and_valid will issue I/O we 11107 * must wait here if either the device is suspended or 11108 * if it's power level is changing. 11109 */ 11110 while ((un->un_state == SD_STATE_SUSPENDED) || 11111 (un->un_state == SD_STATE_PM_CHANGING)) { 11112 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11113 } 11114 un->un_ncmds_in_driver++; 11115 mutex_exit(SD_MUTEX(un)); 11116 11117 /* Initialize sd_ssc_t for internal uscsi commands */ 11118 ssc = sd_ssc_init(un); 11119 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11120 err = EIO; 11121 } else { 11122 err = 0; 11123 } 11124 sd_ssc_fini(ssc); 11125 11126 mutex_enter(SD_MUTEX(un)); 11127 un->un_ncmds_in_driver--; 11128 ASSERT(un->un_ncmds_in_driver >= 0); 11129 mutex_exit(SD_MUTEX(un)); 11130 if (err != 0) 11131 return (err); 11132 } 11133 11134 /* 11135 * Read requests are restricted to multiples of the system block size. 11136 */ 11137 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11138 !un->un_f_enable_rmw) 11139 secmask = un->un_tgt_blocksize - 1; 11140 else 11141 secmask = DEV_BSIZE - 1; 11142 11143 if (uio->uio_loffset & ((offset_t)(secmask))) { 11144 SD_ERROR(SD_LOG_READ_WRITE, un, 11145 "sdaread: file offset not modulo %d\n", 11146 secmask + 1); 11147 err = EINVAL; 11148 } else if (uio->uio_iov->iov_len & (secmask)) { 11149 SD_ERROR(SD_LOG_READ_WRITE, un, 11150 "sdaread: transfer length not modulo %d\n", 11151 secmask + 1); 11152 err = EINVAL; 11153 } else { 11154 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11155 } 11156 11157 return (err); 11158 } 11159 11160 11161 /* 11162 * Function: sdawrite 11163 * 11164 * Description: Driver's awrite(9e) entry point function. 11165 * 11166 * Arguments: dev - device number 11167 * aio - structure pointer describing where data is stored 11168 * cred_p - user credential pointer 11169 * 11170 * Return Code: ENXIO 11171 * EIO 11172 * EINVAL 11173 * value returned by aphysio 11174 * 11175 * Context: Kernel thread context. 11176 */ 11177 /* ARGSUSED */ 11178 static int 11179 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11180 { 11181 struct sd_lun *un = NULL; 11182 struct uio *uio = aio->aio_uio; 11183 int secmask; 11184 int err = 0; 11185 sd_ssc_t *ssc; 11186 11187 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11188 return (ENXIO); 11189 } 11190 11191 ASSERT(!mutex_owned(SD_MUTEX(un))); 11192 11193 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11194 mutex_enter(SD_MUTEX(un)); 11195 /* 11196 * Because the call to sd_ready_and_valid will issue I/O we 11197 * must wait here if either the device is suspended or 11198 * if it's power level is changing. 11199 */ 11200 while ((un->un_state == SD_STATE_SUSPENDED) || 11201 (un->un_state == SD_STATE_PM_CHANGING)) { 11202 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11203 } 11204 un->un_ncmds_in_driver++; 11205 mutex_exit(SD_MUTEX(un)); 11206 11207 /* Initialize sd_ssc_t for internal uscsi commands */ 11208 ssc = sd_ssc_init(un); 11209 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11210 err = EIO; 11211 } else { 11212 err = 0; 11213 } 11214 sd_ssc_fini(ssc); 11215 11216 mutex_enter(SD_MUTEX(un)); 11217 un->un_ncmds_in_driver--; 11218 ASSERT(un->un_ncmds_in_driver >= 0); 11219 mutex_exit(SD_MUTEX(un)); 11220 if (err != 0) 11221 return (err); 11222 } 11223 11224 /* 11225 * Write requests are restricted to multiples of the system block size. 11226 */ 11227 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11228 !un->un_f_enable_rmw) 11229 secmask = un->un_tgt_blocksize - 1; 11230 else 11231 secmask = DEV_BSIZE - 1; 11232 11233 if (uio->uio_loffset & ((offset_t)(secmask))) { 11234 SD_ERROR(SD_LOG_READ_WRITE, un, 11235 "sdawrite: file offset not modulo %d\n", 11236 secmask + 1); 11237 err = EINVAL; 11238 } else if (uio->uio_iov->iov_len & (secmask)) { 11239 SD_ERROR(SD_LOG_READ_WRITE, un, 11240 "sdawrite: transfer length not modulo %d\n", 11241 secmask + 1); 11242 err = EINVAL; 11243 } else { 11244 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11245 } 11246 11247 return (err); 11248 } 11249 11250 11251 11252 11253 11254 /* 11255 * Driver IO processing follows the following sequence: 11256 * 11257 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11258 * | | ^ 11259 * v v | 11260 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11261 * | | | | 11262 * v | | | 11263 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11264 * | | ^ ^ 11265 * v v | | 11266 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11267 * | | | | 11268 * +---+ | +------------+ +-------+ 11269 * | | | | 11270 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11271 * | v | | 11272 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11273 * | | ^ | 11274 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11275 * | v | | 11276 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11277 * | | ^ | 11278 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11279 * | v | | 11280 * | sd_checksum_iostart() sd_checksum_iodone() | 11281 * | | ^ | 11282 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11283 * | v | | 11284 * | sd_pm_iostart() sd_pm_iodone() | 11285 * | | ^ | 11286 * | | | | 11287 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11288 * | ^ 11289 * v | 11290 * sd_core_iostart() | 11291 * | | 11292 * | +------>(*destroypkt)() 11293 * +-> sd_start_cmds() <-+ | | 11294 * | | | v 11295 * | | | scsi_destroy_pkt(9F) 11296 * | | | 11297 * +->(*initpkt)() +- sdintr() 11298 * | | | | 11299 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11300 * | +-> scsi_setup_cdb(9F) | 11301 * | | 11302 * +--> scsi_transport(9F) | 11303 * | | 11304 * +----> SCSA ---->+ 11305 * 11306 * 11307 * This code is based upon the following presumptions: 11308 * 11309 * - iostart and iodone functions operate on buf(9S) structures. These 11310 * functions perform the necessary operations on the buf(9S) and pass 11311 * them along to the next function in the chain by using the macros 11312 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11313 * (for iodone side functions). 11314 * 11315 * - The iostart side functions may sleep. The iodone side functions 11316 * are called under interrupt context and may NOT sleep. Therefore 11317 * iodone side functions also may not call iostart side functions. 11318 * (NOTE: iostart side functions should NOT sleep for memory, as 11319 * this could result in deadlock.) 11320 * 11321 * - An iostart side function may call its corresponding iodone side 11322 * function directly (if necessary). 11323 * 11324 * - In the event of an error, an iostart side function can return a buf(9S) 11325 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11326 * b_error in the usual way of course). 11327 * 11328 * - The taskq mechanism may be used by the iodone side functions to dispatch 11329 * requests to the iostart side functions. The iostart side functions in 11330 * this case would be called under the context of a taskq thread, so it's 11331 * OK for them to block/sleep/spin in this case. 11332 * 11333 * - iostart side functions may allocate "shadow" buf(9S) structs and 11334 * pass them along to the next function in the chain. The corresponding 11335 * iodone side functions must coalesce the "shadow" bufs and return 11336 * the "original" buf to the next higher layer. 11337 * 11338 * - The b_private field of the buf(9S) struct holds a pointer to 11339 * an sd_xbuf struct, which contains information needed to 11340 * construct the scsi_pkt for the command. 11341 * 11342 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11343 * layer must acquire & release the SD_MUTEX(un) as needed. 11344 */ 11345 11346 11347 /* 11348 * Create taskq for all targets in the system. This is created at 11349 * _init(9E) and destroyed at _fini(9E). 11350 * 11351 * Note: here we set the minalloc to a reasonably high number to ensure that 11352 * we will have an adequate supply of task entries available at interrupt time. 11353 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11354 * sd_create_taskq(). Since we do not want to sleep for allocations at 11355 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11356 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11357 * requests any one instant in time. 11358 */ 11359 #define SD_TASKQ_NUMTHREADS 8 11360 #define SD_TASKQ_MINALLOC 256 11361 #define SD_TASKQ_MAXALLOC 256 11362 11363 static taskq_t *sd_tq = NULL; 11364 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11365 11366 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11367 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11368 11369 /* 11370 * The following task queue is being created for the write part of 11371 * read-modify-write of non-512 block size devices. 11372 * Limit the number of threads to 1 for now. This number has been chosen 11373 * considering the fact that it applies only to dvd ram drives/MO drives 11374 * currently. Performance for which is not main criteria at this stage. 11375 * Note: It needs to be explored if we can use a single taskq in future 11376 */ 11377 #define SD_WMR_TASKQ_NUMTHREADS 1 11378 static taskq_t *sd_wmr_tq = NULL; 11379 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11380 11381 /* 11382 * Function: sd_taskq_create 11383 * 11384 * Description: Create taskq thread(s) and preallocate task entries 11385 * 11386 * Return Code: Returns a pointer to the allocated taskq_t. 11387 * 11388 * Context: Can sleep. Requires blockable context. 11389 * 11390 * Notes: - The taskq() facility currently is NOT part of the DDI. 11391 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11392 * - taskq_create() will block for memory, also it will panic 11393 * if it cannot create the requested number of threads. 11394 * - Currently taskq_create() creates threads that cannot be 11395 * swapped. 11396 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11397 * supply of taskq entries at interrupt time (ie, so that we 11398 * do not have to sleep for memory) 11399 */ 11400 11401 static void 11402 sd_taskq_create(void) 11403 { 11404 char taskq_name[TASKQ_NAMELEN]; 11405 11406 ASSERT(sd_tq == NULL); 11407 ASSERT(sd_wmr_tq == NULL); 11408 11409 (void) snprintf(taskq_name, sizeof (taskq_name), 11410 "%s_drv_taskq", sd_label); 11411 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11412 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11413 TASKQ_PREPOPULATE)); 11414 11415 (void) snprintf(taskq_name, sizeof (taskq_name), 11416 "%s_rmw_taskq", sd_label); 11417 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11418 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11419 TASKQ_PREPOPULATE)); 11420 } 11421 11422 11423 /* 11424 * Function: sd_taskq_delete 11425 * 11426 * Description: Complementary cleanup routine for sd_taskq_create(). 11427 * 11428 * Context: Kernel thread context. 11429 */ 11430 11431 static void 11432 sd_taskq_delete(void) 11433 { 11434 ASSERT(sd_tq != NULL); 11435 ASSERT(sd_wmr_tq != NULL); 11436 taskq_destroy(sd_tq); 11437 taskq_destroy(sd_wmr_tq); 11438 sd_tq = NULL; 11439 sd_wmr_tq = NULL; 11440 } 11441 11442 11443 /* 11444 * Function: sdstrategy 11445 * 11446 * Description: Driver's strategy (9E) entry point function. 11447 * 11448 * Arguments: bp - pointer to buf(9S) 11449 * 11450 * Return Code: Always returns zero 11451 * 11452 * Context: Kernel thread context. 11453 */ 11454 11455 static int 11456 sdstrategy(struct buf *bp) 11457 { 11458 struct sd_lun *un; 11459 11460 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11461 if (un == NULL) { 11462 bioerror(bp, EIO); 11463 bp->b_resid = bp->b_bcount; 11464 biodone(bp); 11465 return (0); 11466 } 11467 11468 /* As was done in the past, fail new cmds. if state is dumping. */ 11469 if (un->un_state == SD_STATE_DUMPING) { 11470 bioerror(bp, ENXIO); 11471 bp->b_resid = bp->b_bcount; 11472 biodone(bp); 11473 return (0); 11474 } 11475 11476 ASSERT(!mutex_owned(SD_MUTEX(un))); 11477 11478 /* 11479 * Commands may sneak in while we released the mutex in 11480 * DDI_SUSPEND, we should block new commands. However, old 11481 * commands that are still in the driver at this point should 11482 * still be allowed to drain. 11483 */ 11484 mutex_enter(SD_MUTEX(un)); 11485 /* 11486 * Must wait here if either the device is suspended or 11487 * if it's power level is changing. 11488 */ 11489 while ((un->un_state == SD_STATE_SUSPENDED) || 11490 (un->un_state == SD_STATE_PM_CHANGING)) { 11491 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11492 } 11493 11494 un->un_ncmds_in_driver++; 11495 11496 /* 11497 * atapi: Since we are running the CD for now in PIO mode we need to 11498 * call bp_mapin here to avoid bp_mapin called interrupt context under 11499 * the HBA's init_pkt routine. 11500 */ 11501 if (un->un_f_cfg_is_atapi == TRUE) { 11502 mutex_exit(SD_MUTEX(un)); 11503 bp_mapin(bp); 11504 mutex_enter(SD_MUTEX(un)); 11505 } 11506 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11507 un->un_ncmds_in_driver); 11508 11509 if (bp->b_flags & B_WRITE) 11510 un->un_f_sync_cache_required = TRUE; 11511 11512 mutex_exit(SD_MUTEX(un)); 11513 11514 /* 11515 * This will (eventually) allocate the sd_xbuf area and 11516 * call sd_xbuf_strategy(). We just want to return the 11517 * result of ddi_xbuf_qstrategy so that we have an opt- 11518 * imized tail call which saves us a stack frame. 11519 */ 11520 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11521 } 11522 11523 11524 /* 11525 * Function: sd_xbuf_strategy 11526 * 11527 * Description: Function for initiating IO operations via the 11528 * ddi_xbuf_qstrategy() mechanism. 11529 * 11530 * Context: Kernel thread context. 11531 */ 11532 11533 static void 11534 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11535 { 11536 struct sd_lun *un = arg; 11537 11538 ASSERT(bp != NULL); 11539 ASSERT(xp != NULL); 11540 ASSERT(un != NULL); 11541 ASSERT(!mutex_owned(SD_MUTEX(un))); 11542 11543 /* 11544 * Initialize the fields in the xbuf and save a pointer to the 11545 * xbuf in bp->b_private. 11546 */ 11547 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11548 11549 /* Send the buf down the iostart chain */ 11550 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11551 } 11552 11553 11554 /* 11555 * Function: sd_xbuf_init 11556 * 11557 * Description: Prepare the given sd_xbuf struct for use. 11558 * 11559 * Arguments: un - ptr to softstate 11560 * bp - ptr to associated buf(9S) 11561 * xp - ptr to associated sd_xbuf 11562 * chain_type - IO chain type to use: 11563 * SD_CHAIN_NULL 11564 * SD_CHAIN_BUFIO 11565 * SD_CHAIN_USCSI 11566 * SD_CHAIN_DIRECT 11567 * SD_CHAIN_DIRECT_PRIORITY 11568 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11569 * initialization; may be NULL if none. 11570 * 11571 * Context: Kernel thread context 11572 */ 11573 11574 static void 11575 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11576 uchar_t chain_type, void *pktinfop) 11577 { 11578 int index; 11579 11580 ASSERT(un != NULL); 11581 ASSERT(bp != NULL); 11582 ASSERT(xp != NULL); 11583 11584 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11585 bp, chain_type); 11586 11587 xp->xb_un = un; 11588 xp->xb_pktp = NULL; 11589 xp->xb_pktinfo = pktinfop; 11590 xp->xb_private = bp->b_private; 11591 xp->xb_blkno = (daddr_t)bp->b_blkno; 11592 11593 /* 11594 * Set up the iostart and iodone chain indexes in the xbuf, based 11595 * upon the specified chain type to use. 11596 */ 11597 switch (chain_type) { 11598 case SD_CHAIN_NULL: 11599 /* 11600 * Fall thru to just use the values for the buf type, even 11601 * tho for the NULL chain these values will never be used. 11602 */ 11603 /* FALLTHRU */ 11604 case SD_CHAIN_BUFIO: 11605 index = un->un_buf_chain_type; 11606 if ((!un->un_f_has_removable_media) && 11607 (un->un_tgt_blocksize != 0) && 11608 (un->un_tgt_blocksize != DEV_BSIZE || 11609 un->un_f_enable_rmw)) { 11610 int secmask = 0, blknomask = 0; 11611 if (un->un_f_enable_rmw) { 11612 blknomask = 11613 (un->un_phy_blocksize / DEV_BSIZE) - 1; 11614 secmask = un->un_phy_blocksize - 1; 11615 } else { 11616 blknomask = 11617 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11618 secmask = un->un_tgt_blocksize - 1; 11619 } 11620 11621 if ((bp->b_lblkno & (blknomask)) || 11622 (bp->b_bcount & (secmask))) { 11623 if ((un->un_f_rmw_type != 11624 SD_RMW_TYPE_RETURN_ERROR) || 11625 un->un_f_enable_rmw) { 11626 if (un->un_f_pm_is_enabled == FALSE) 11627 index = 11628 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11629 else 11630 index = 11631 SD_CHAIN_INFO_MSS_DISK; 11632 } 11633 } 11634 } 11635 break; 11636 case SD_CHAIN_USCSI: 11637 index = un->un_uscsi_chain_type; 11638 break; 11639 case SD_CHAIN_DIRECT: 11640 index = un->un_direct_chain_type; 11641 break; 11642 case SD_CHAIN_DIRECT_PRIORITY: 11643 index = un->un_priority_chain_type; 11644 break; 11645 default: 11646 /* We're really broken if we ever get here... */ 11647 panic("sd_xbuf_init: illegal chain type!"); 11648 /*NOTREACHED*/ 11649 } 11650 11651 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11652 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11653 11654 /* 11655 * It might be a bit easier to simply bzero the entire xbuf above, 11656 * but it turns out that since we init a fair number of members anyway, 11657 * we save a fair number cycles by doing explicit assignment of zero. 11658 */ 11659 xp->xb_pkt_flags = 0; 11660 xp->xb_dma_resid = 0; 11661 xp->xb_retry_count = 0; 11662 xp->xb_victim_retry_count = 0; 11663 xp->xb_ua_retry_count = 0; 11664 xp->xb_nr_retry_count = 0; 11665 xp->xb_sense_bp = NULL; 11666 xp->xb_sense_status = 0; 11667 xp->xb_sense_state = 0; 11668 xp->xb_sense_resid = 0; 11669 xp->xb_ena = 0; 11670 11671 bp->b_private = xp; 11672 bp->b_flags &= ~(B_DONE | B_ERROR); 11673 bp->b_resid = 0; 11674 bp->av_forw = NULL; 11675 bp->av_back = NULL; 11676 bioerror(bp, 0); 11677 11678 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11679 } 11680 11681 11682 /* 11683 * Function: sd_uscsi_strategy 11684 * 11685 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11686 * 11687 * Arguments: bp - buf struct ptr 11688 * 11689 * Return Code: Always returns 0 11690 * 11691 * Context: Kernel thread context 11692 */ 11693 11694 static int 11695 sd_uscsi_strategy(struct buf *bp) 11696 { 11697 struct sd_lun *un; 11698 struct sd_uscsi_info *uip; 11699 struct sd_xbuf *xp; 11700 uchar_t chain_type; 11701 uchar_t cmd; 11702 11703 ASSERT(bp != NULL); 11704 11705 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11706 if (un == NULL) { 11707 bioerror(bp, EIO); 11708 bp->b_resid = bp->b_bcount; 11709 biodone(bp); 11710 return (0); 11711 } 11712 11713 ASSERT(!mutex_owned(SD_MUTEX(un))); 11714 11715 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11716 11717 /* 11718 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11719 */ 11720 ASSERT(bp->b_private != NULL); 11721 uip = (struct sd_uscsi_info *)bp->b_private; 11722 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11723 11724 mutex_enter(SD_MUTEX(un)); 11725 /* 11726 * atapi: Since we are running the CD for now in PIO mode we need to 11727 * call bp_mapin here to avoid bp_mapin called interrupt context under 11728 * the HBA's init_pkt routine. 11729 */ 11730 if (un->un_f_cfg_is_atapi == TRUE) { 11731 mutex_exit(SD_MUTEX(un)); 11732 bp_mapin(bp); 11733 mutex_enter(SD_MUTEX(un)); 11734 } 11735 un->un_ncmds_in_driver++; 11736 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11737 un->un_ncmds_in_driver); 11738 11739 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11740 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11741 un->un_f_sync_cache_required = TRUE; 11742 11743 mutex_exit(SD_MUTEX(un)); 11744 11745 switch (uip->ui_flags) { 11746 case SD_PATH_DIRECT: 11747 chain_type = SD_CHAIN_DIRECT; 11748 break; 11749 case SD_PATH_DIRECT_PRIORITY: 11750 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11751 break; 11752 default: 11753 chain_type = SD_CHAIN_USCSI; 11754 break; 11755 } 11756 11757 /* 11758 * We may allocate extra buf for external USCSI commands. If the 11759 * application asks for bigger than 20-byte sense data via USCSI, 11760 * SCSA layer will allocate 252 bytes sense buf for that command. 11761 */ 11762 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11763 SENSE_LENGTH) { 11764 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11765 MAX_SENSE_LENGTH, KM_SLEEP); 11766 } else { 11767 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11768 } 11769 11770 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11771 11772 /* Use the index obtained within xbuf_init */ 11773 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11774 11775 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11776 11777 return (0); 11778 } 11779 11780 /* 11781 * Function: sd_send_scsi_cmd 11782 * 11783 * Description: Runs a USCSI command for user (when called thru sdioctl), 11784 * or for the driver 11785 * 11786 * Arguments: dev - the dev_t for the device 11787 * incmd - ptr to a valid uscsi_cmd struct 11788 * flag - bit flag, indicating open settings, 32/64 bit type 11789 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11790 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11791 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11792 * to use the USCSI "direct" chain and bypass the normal 11793 * command waitq. 11794 * 11795 * Return Code: 0 - successful completion of the given command 11796 * EIO - scsi_uscsi_handle_command() failed 11797 * ENXIO - soft state not found for specified dev 11798 * EINVAL 11799 * EFAULT - copyin/copyout error 11800 * return code of scsi_uscsi_handle_command(): 11801 * EIO 11802 * ENXIO 11803 * EACCES 11804 * 11805 * Context: Waits for command to complete. Can sleep. 11806 */ 11807 11808 static int 11809 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11810 enum uio_seg dataspace, int path_flag) 11811 { 11812 struct sd_lun *un; 11813 sd_ssc_t *ssc; 11814 int rval; 11815 11816 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11817 if (un == NULL) { 11818 return (ENXIO); 11819 } 11820 11821 /* 11822 * Using sd_ssc_send to handle uscsi cmd 11823 */ 11824 ssc = sd_ssc_init(un); 11825 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11826 sd_ssc_fini(ssc); 11827 11828 return (rval); 11829 } 11830 11831 /* 11832 * Function: sd_ssc_init 11833 * 11834 * Description: Uscsi end-user call this function to initialize necessary 11835 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11836 * 11837 * The return value of sd_send_scsi_cmd will be treated as a 11838 * fault in various conditions. Even it is not Zero, some 11839 * callers may ignore the return value. That is to say, we can 11840 * not make an accurate assessment in sdintr, since if a 11841 * command is failed in sdintr it does not mean the caller of 11842 * sd_send_scsi_cmd will treat it as a real failure. 11843 * 11844 * To avoid printing too many error logs for a failed uscsi 11845 * packet that the caller may not treat it as a failure, the 11846 * sd will keep silent for handling all uscsi commands. 11847 * 11848 * During detach->attach and attach-open, for some types of 11849 * problems, the driver should be providing information about 11850 * the problem encountered. Device use USCSI_SILENT, which 11851 * suppresses all driver information. The result is that no 11852 * information about the problem is available. Being 11853 * completely silent during this time is inappropriate. The 11854 * driver needs a more selective filter than USCSI_SILENT, so 11855 * that information related to faults is provided. 11856 * 11857 * To make the accurate accessment, the caller of 11858 * sd_send_scsi_USCSI_CMD should take the ownership and 11859 * get necessary information to print error messages. 11860 * 11861 * If we want to print necessary info of uscsi command, we need to 11862 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11863 * assessment. We use sd_ssc_init to alloc necessary 11864 * structs for sending an uscsi command and we are also 11865 * responsible for free the memory by calling 11866 * sd_ssc_fini. 11867 * 11868 * The calling secquences will look like: 11869 * sd_ssc_init-> 11870 * 11871 * ... 11872 * 11873 * sd_send_scsi_USCSI_CMD-> 11874 * sd_ssc_send-> - - - sdintr 11875 * ... 11876 * 11877 * if we think the return value should be treated as a 11878 * failure, we make the accessment here and print out 11879 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11880 * 11881 * ... 11882 * 11883 * sd_ssc_fini 11884 * 11885 * 11886 * Arguments: un - pointer to driver soft state (unit) structure for this 11887 * target. 11888 * 11889 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11890 * uscsi_cmd and sd_uscsi_info. 11891 * NULL - if can not alloc memory for sd_ssc_t struct 11892 * 11893 * Context: Kernel Thread. 11894 */ 11895 static sd_ssc_t * 11896 sd_ssc_init(struct sd_lun *un) 11897 { 11898 sd_ssc_t *ssc; 11899 struct uscsi_cmd *ucmdp; 11900 struct sd_uscsi_info *uip; 11901 11902 ASSERT(un != NULL); 11903 ASSERT(!mutex_owned(SD_MUTEX(un))); 11904 11905 /* 11906 * Allocate sd_ssc_t structure 11907 */ 11908 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11909 11910 /* 11911 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11912 */ 11913 ucmdp = scsi_uscsi_alloc(); 11914 11915 /* 11916 * Allocate sd_uscsi_info structure 11917 */ 11918 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11919 11920 ssc->ssc_uscsi_cmd = ucmdp; 11921 ssc->ssc_uscsi_info = uip; 11922 ssc->ssc_un = un; 11923 11924 return (ssc); 11925 } 11926 11927 /* 11928 * Function: sd_ssc_fini 11929 * 11930 * Description: To free sd_ssc_t and it's hanging off 11931 * 11932 * Arguments: ssc - struct pointer of sd_ssc_t. 11933 */ 11934 static void 11935 sd_ssc_fini(sd_ssc_t *ssc) 11936 { 11937 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11938 11939 if (ssc->ssc_uscsi_info != NULL) { 11940 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11941 ssc->ssc_uscsi_info = NULL; 11942 } 11943 11944 kmem_free(ssc, sizeof (sd_ssc_t)); 11945 ssc = NULL; 11946 } 11947 11948 /* 11949 * Function: sd_ssc_send 11950 * 11951 * Description: Runs a USCSI command for user when called through sdioctl, 11952 * or for the driver. 11953 * 11954 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11955 * sd_uscsi_info in. 11956 * incmd - ptr to a valid uscsi_cmd struct 11957 * flag - bit flag, indicating open settings, 32/64 bit type 11958 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11959 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11960 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11961 * to use the USCSI "direct" chain and bypass the normal 11962 * command waitq. 11963 * 11964 * Return Code: 0 - successful completion of the given command 11965 * EIO - scsi_uscsi_handle_command() failed 11966 * ENXIO - soft state not found for specified dev 11967 * ECANCELED - command cancelled due to low power 11968 * EINVAL 11969 * EFAULT - copyin/copyout error 11970 * return code of scsi_uscsi_handle_command(): 11971 * EIO 11972 * ENXIO 11973 * EACCES 11974 * 11975 * Context: Kernel Thread; 11976 * Waits for command to complete. Can sleep. 11977 */ 11978 static int 11979 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11980 enum uio_seg dataspace, int path_flag) 11981 { 11982 struct sd_uscsi_info *uip; 11983 struct uscsi_cmd *uscmd; 11984 struct sd_lun *un; 11985 dev_t dev; 11986 11987 int format = 0; 11988 int rval; 11989 11990 ASSERT(ssc != NULL); 11991 un = ssc->ssc_un; 11992 ASSERT(un != NULL); 11993 uscmd = ssc->ssc_uscsi_cmd; 11994 ASSERT(uscmd != NULL); 11995 ASSERT(!mutex_owned(SD_MUTEX(un))); 11996 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11997 /* 11998 * If enter here, it indicates that the previous uscsi 11999 * command has not been processed by sd_ssc_assessment. 12000 * This is violating our rules of FMA telemetry processing. 12001 * We should print out this message and the last undisposed 12002 * uscsi command. 12003 */ 12004 if (uscmd->uscsi_cdb != NULL) { 12005 SD_INFO(SD_LOG_SDTEST, un, 12006 "sd_ssc_send is missing the alternative " 12007 "sd_ssc_assessment when running command 0x%x.\n", 12008 uscmd->uscsi_cdb[0]); 12009 } 12010 /* 12011 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 12012 * the initial status. 12013 */ 12014 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12015 } 12016 12017 /* 12018 * We need to make sure sd_ssc_send will have sd_ssc_assessment 12019 * followed to avoid missing FMA telemetries. 12020 */ 12021 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 12022 12023 /* 12024 * if USCSI_PMFAILFAST is set and un is in low power, fail the 12025 * command immediately. 12026 */ 12027 mutex_enter(SD_MUTEX(un)); 12028 mutex_enter(&un->un_pm_mutex); 12029 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && 12030 SD_DEVICE_IS_IN_LOW_POWER(un)) { 12031 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" 12032 "un:0x%p is in low power\n", un); 12033 mutex_exit(&un->un_pm_mutex); 12034 mutex_exit(SD_MUTEX(un)); 12035 return (ECANCELED); 12036 } 12037 mutex_exit(&un->un_pm_mutex); 12038 mutex_exit(SD_MUTEX(un)); 12039 12040 #ifdef SDDEBUG 12041 switch (dataspace) { 12042 case UIO_USERSPACE: 12043 SD_TRACE(SD_LOG_IO, un, 12044 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 12045 break; 12046 case UIO_SYSSPACE: 12047 SD_TRACE(SD_LOG_IO, un, 12048 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 12049 break; 12050 default: 12051 SD_TRACE(SD_LOG_IO, un, 12052 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 12053 break; 12054 } 12055 #endif 12056 12057 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 12058 SD_ADDRESS(un), &uscmd); 12059 if (rval != 0) { 12060 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 12061 "scsi_uscsi_alloc_and_copyin failed\n", un); 12062 return (rval); 12063 } 12064 12065 if ((uscmd->uscsi_cdb != NULL) && 12066 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 12067 mutex_enter(SD_MUTEX(un)); 12068 un->un_f_format_in_progress = TRUE; 12069 mutex_exit(SD_MUTEX(un)); 12070 format = 1; 12071 } 12072 12073 /* 12074 * Allocate an sd_uscsi_info struct and fill it with the info 12075 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 12076 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 12077 * since we allocate the buf here in this function, we do not 12078 * need to preserve the prior contents of b_private. 12079 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 12080 */ 12081 uip = ssc->ssc_uscsi_info; 12082 uip->ui_flags = path_flag; 12083 uip->ui_cmdp = uscmd; 12084 12085 /* 12086 * Commands sent with priority are intended for error recovery 12087 * situations, and do not have retries performed. 12088 */ 12089 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 12090 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 12091 } 12092 uscmd->uscsi_flags &= ~USCSI_NOINTR; 12093 12094 dev = SD_GET_DEV(un); 12095 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 12096 sd_uscsi_strategy, NULL, uip); 12097 12098 /* 12099 * mark ssc_flags right after handle_cmd to make sure 12100 * the uscsi has been sent 12101 */ 12102 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 12103 12104 #ifdef SDDEBUG 12105 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12106 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 12107 uscmd->uscsi_status, uscmd->uscsi_resid); 12108 if (uscmd->uscsi_bufaddr != NULL) { 12109 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12110 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 12111 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 12112 if (dataspace == UIO_SYSSPACE) { 12113 SD_DUMP_MEMORY(un, SD_LOG_IO, 12114 "data", (uchar_t *)uscmd->uscsi_bufaddr, 12115 uscmd->uscsi_buflen, SD_LOG_HEX); 12116 } 12117 } 12118 #endif 12119 12120 if (format == 1) { 12121 mutex_enter(SD_MUTEX(un)); 12122 un->un_f_format_in_progress = FALSE; 12123 mutex_exit(SD_MUTEX(un)); 12124 } 12125 12126 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 12127 12128 return (rval); 12129 } 12130 12131 /* 12132 * Function: sd_ssc_print 12133 * 12134 * Description: Print information available to the console. 12135 * 12136 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12137 * sd_uscsi_info in. 12138 * sd_severity - log level. 12139 * Context: Kernel thread or interrupt context. 12140 */ 12141 static void 12142 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 12143 { 12144 struct uscsi_cmd *ucmdp; 12145 struct scsi_device *devp; 12146 dev_info_t *devinfo; 12147 uchar_t *sensep; 12148 int senlen; 12149 union scsi_cdb *cdbp; 12150 uchar_t com; 12151 extern struct scsi_key_strings scsi_cmds[]; 12152 12153 ASSERT(ssc != NULL); 12154 ASSERT(ssc->ssc_un != NULL); 12155 12156 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 12157 return; 12158 ucmdp = ssc->ssc_uscsi_cmd; 12159 devp = SD_SCSI_DEVP(ssc->ssc_un); 12160 devinfo = SD_DEVINFO(ssc->ssc_un); 12161 ASSERT(ucmdp != NULL); 12162 ASSERT(devp != NULL); 12163 ASSERT(devinfo != NULL); 12164 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 12165 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 12166 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 12167 12168 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 12169 if (cdbp == NULL) 12170 return; 12171 /* We don't print log if no sense data available. */ 12172 if (senlen == 0) 12173 sensep = NULL; 12174 com = cdbp->scc_cmd; 12175 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 12176 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 12177 } 12178 12179 /* 12180 * Function: sd_ssc_assessment 12181 * 12182 * Description: We use this function to make an assessment at the point 12183 * where SD driver may encounter a potential error. 12184 * 12185 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12186 * sd_uscsi_info in. 12187 * tp_assess - a hint of strategy for ereport posting. 12188 * Possible values of tp_assess include: 12189 * SD_FMT_IGNORE - we don't post any ereport because we're 12190 * sure that it is ok to ignore the underlying problems. 12191 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 12192 * but it might be not correct to ignore the underlying hardware 12193 * error. 12194 * SD_FMT_STATUS_CHECK - we will post an ereport with the 12195 * payload driver-assessment of value "fail" or 12196 * "fatal"(depending on what information we have here). This 12197 * assessment value is usually set when SD driver think there 12198 * is a potential error occurred(Typically, when return value 12199 * of the SCSI command is EIO). 12200 * SD_FMT_STANDARD - we will post an ereport with the payload 12201 * driver-assessment of value "info". This assessment value is 12202 * set when the SCSI command returned successfully and with 12203 * sense data sent back. 12204 * 12205 * Context: Kernel thread. 12206 */ 12207 static void 12208 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 12209 { 12210 int senlen = 0; 12211 struct uscsi_cmd *ucmdp = NULL; 12212 struct sd_lun *un; 12213 12214 ASSERT(ssc != NULL); 12215 un = ssc->ssc_un; 12216 ASSERT(un != NULL); 12217 ucmdp = ssc->ssc_uscsi_cmd; 12218 ASSERT(ucmdp != NULL); 12219 12220 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12221 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 12222 } else { 12223 /* 12224 * If enter here, it indicates that we have a wrong 12225 * calling sequence of sd_ssc_send and sd_ssc_assessment, 12226 * both of which should be called in a pair in case of 12227 * loss of FMA telemetries. 12228 */ 12229 if (ucmdp->uscsi_cdb != NULL) { 12230 SD_INFO(SD_LOG_SDTEST, un, 12231 "sd_ssc_assessment is missing the " 12232 "alternative sd_ssc_send when running 0x%x, " 12233 "or there are superfluous sd_ssc_assessment for " 12234 "the same sd_ssc_send.\n", 12235 ucmdp->uscsi_cdb[0]); 12236 } 12237 /* 12238 * Set the ssc_flags to the initial value to avoid passing 12239 * down dirty flags to the following sd_ssc_send function. 12240 */ 12241 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12242 return; 12243 } 12244 12245 /* 12246 * Only handle an issued command which is waiting for assessment. 12247 * A command which is not issued will not have 12248 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 12249 */ 12250 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 12251 sd_ssc_print(ssc, SCSI_ERR_INFO); 12252 return; 12253 } else { 12254 /* 12255 * For an issued command, we should clear this flag in 12256 * order to make the sd_ssc_t structure be used off 12257 * multiple uscsi commands. 12258 */ 12259 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12260 } 12261 12262 /* 12263 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12264 * commands here. And we should clear the ssc_flags before return. 12265 */ 12266 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12267 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12268 return; 12269 } 12270 12271 switch (tp_assess) { 12272 case SD_FMT_IGNORE: 12273 case SD_FMT_IGNORE_COMPROMISE: 12274 break; 12275 case SD_FMT_STATUS_CHECK: 12276 /* 12277 * For a failed command(including the succeeded command 12278 * with invalid data sent back). 12279 */ 12280 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12281 break; 12282 case SD_FMT_STANDARD: 12283 /* 12284 * Always for the succeeded commands probably with sense 12285 * data sent back. 12286 * Limitation: 12287 * We can only handle a succeeded command with sense 12288 * data sent back when auto-request-sense is enabled. 12289 */ 12290 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12291 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12292 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12293 (un->un_f_arq_enabled == TRUE) && 12294 senlen > 0 && 12295 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12296 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12297 } 12298 break; 12299 default: 12300 /* 12301 * Should not have other type of assessment. 12302 */ 12303 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12304 "sd_ssc_assessment got wrong " 12305 "sd_type_assessment %d.\n", tp_assess); 12306 break; 12307 } 12308 /* 12309 * Clear up the ssc_flags before return. 12310 */ 12311 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12312 } 12313 12314 /* 12315 * Function: sd_ssc_post 12316 * 12317 * Description: 1. read the driver property to get fm-scsi-log flag. 12318 * 2. print log if fm_log_capable is non-zero. 12319 * 3. call sd_ssc_ereport_post to post ereport if possible. 12320 * 12321 * Context: May be called from kernel thread or interrupt context. 12322 */ 12323 static void 12324 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12325 { 12326 struct sd_lun *un; 12327 int sd_severity; 12328 12329 ASSERT(ssc != NULL); 12330 un = ssc->ssc_un; 12331 ASSERT(un != NULL); 12332 12333 /* 12334 * We may enter here from sd_ssc_assessment(for USCSI command) or 12335 * by directly called from sdintr context. 12336 * We don't handle a non-disk drive(CD-ROM, removable media). 12337 * Clear the ssc_flags before return in case we've set 12338 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12339 * driver. 12340 */ 12341 if (ISCD(un) || un->un_f_has_removable_media) { 12342 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12343 return; 12344 } 12345 12346 switch (sd_assess) { 12347 case SD_FM_DRV_FATAL: 12348 sd_severity = SCSI_ERR_FATAL; 12349 break; 12350 case SD_FM_DRV_RECOVERY: 12351 sd_severity = SCSI_ERR_RECOVERED; 12352 break; 12353 case SD_FM_DRV_RETRY: 12354 sd_severity = SCSI_ERR_RETRYABLE; 12355 break; 12356 case SD_FM_DRV_NOTICE: 12357 sd_severity = SCSI_ERR_INFO; 12358 break; 12359 default: 12360 sd_severity = SCSI_ERR_UNKNOWN; 12361 } 12362 /* print log */ 12363 sd_ssc_print(ssc, sd_severity); 12364 12365 /* always post ereport */ 12366 sd_ssc_ereport_post(ssc, sd_assess); 12367 } 12368 12369 /* 12370 * Function: sd_ssc_set_info 12371 * 12372 * Description: Mark ssc_flags and set ssc_info which would be the 12373 * payload of uderr ereport. This function will cause 12374 * sd_ssc_ereport_post to post uderr ereport only. 12375 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12376 * the function will also call SD_ERROR or scsi_log for a 12377 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12378 * 12379 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12380 * sd_uscsi_info in. 12381 * ssc_flags - indicate the sub-category of a uderr. 12382 * comp - this argument is meaningful only when 12383 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12384 * values include: 12385 * > 0, SD_ERROR is used with comp as the driver logging 12386 * component; 12387 * = 0, scsi-log is used to log error telemetries; 12388 * < 0, no log available for this telemetry. 12389 * 12390 * Context: Kernel thread or interrupt context 12391 */ 12392 static void 12393 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12394 { 12395 va_list ap; 12396 12397 ASSERT(ssc != NULL); 12398 ASSERT(ssc->ssc_un != NULL); 12399 12400 ssc->ssc_flags |= ssc_flags; 12401 va_start(ap, fmt); 12402 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12403 va_end(ap); 12404 12405 /* 12406 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12407 * with invalid data sent back. For non-uscsi command, the 12408 * following code will be bypassed. 12409 */ 12410 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12411 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12412 /* 12413 * If the error belong to certain component and we 12414 * do not want it to show up on the console, we 12415 * will use SD_ERROR, otherwise scsi_log is 12416 * preferred. 12417 */ 12418 if (comp > 0) { 12419 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12420 } else if (comp == 0) { 12421 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12422 CE_WARN, ssc->ssc_info); 12423 } 12424 } 12425 } 12426 } 12427 12428 /* 12429 * Function: sd_buf_iodone 12430 * 12431 * Description: Frees the sd_xbuf & returns the buf to its originator. 12432 * 12433 * Context: May be called from interrupt context. 12434 */ 12435 /* ARGSUSED */ 12436 static void 12437 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12438 { 12439 struct sd_xbuf *xp; 12440 12441 ASSERT(un != NULL); 12442 ASSERT(bp != NULL); 12443 ASSERT(!mutex_owned(SD_MUTEX(un))); 12444 12445 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12446 12447 xp = SD_GET_XBUF(bp); 12448 ASSERT(xp != NULL); 12449 12450 /* xbuf is gone after this */ 12451 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12452 mutex_enter(SD_MUTEX(un)); 12453 12454 /* 12455 * Grab time when the cmd completed. 12456 * This is used for determining if the system has been 12457 * idle long enough to make it idle to the PM framework. 12458 * This is for lowering the overhead, and therefore improving 12459 * performance per I/O operation. 12460 */ 12461 un->un_pm_idle_time = gethrtime(); 12462 12463 un->un_ncmds_in_driver--; 12464 ASSERT(un->un_ncmds_in_driver >= 0); 12465 SD_INFO(SD_LOG_IO, un, 12466 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12467 un->un_ncmds_in_driver); 12468 12469 mutex_exit(SD_MUTEX(un)); 12470 } 12471 12472 biodone(bp); /* bp is gone after this */ 12473 12474 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12475 } 12476 12477 12478 /* 12479 * Function: sd_uscsi_iodone 12480 * 12481 * Description: Frees the sd_xbuf & returns the buf to its originator. 12482 * 12483 * Context: May be called from interrupt context. 12484 */ 12485 /* ARGSUSED */ 12486 static void 12487 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12488 { 12489 struct sd_xbuf *xp; 12490 12491 ASSERT(un != NULL); 12492 ASSERT(bp != NULL); 12493 12494 xp = SD_GET_XBUF(bp); 12495 ASSERT(xp != NULL); 12496 ASSERT(!mutex_owned(SD_MUTEX(un))); 12497 12498 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12499 12500 bp->b_private = xp->xb_private; 12501 12502 mutex_enter(SD_MUTEX(un)); 12503 12504 /* 12505 * Grab time when the cmd completed. 12506 * This is used for determining if the system has been 12507 * idle long enough to make it idle to the PM framework. 12508 * This is for lowering the overhead, and therefore improving 12509 * performance per I/O operation. 12510 */ 12511 un->un_pm_idle_time = gethrtime(); 12512 12513 un->un_ncmds_in_driver--; 12514 ASSERT(un->un_ncmds_in_driver >= 0); 12515 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12516 un->un_ncmds_in_driver); 12517 12518 mutex_exit(SD_MUTEX(un)); 12519 12520 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12521 SENSE_LENGTH) { 12522 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12523 MAX_SENSE_LENGTH); 12524 } else { 12525 kmem_free(xp, sizeof (struct sd_xbuf)); 12526 } 12527 12528 biodone(bp); 12529 12530 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12531 } 12532 12533 12534 /* 12535 * Function: sd_mapblockaddr_iostart 12536 * 12537 * Description: Verify request lies within the partition limits for 12538 * the indicated minor device. Issue "overrun" buf if 12539 * request would exceed partition range. Converts 12540 * partition-relative block address to absolute. 12541 * 12542 * Upon exit of this function: 12543 * 1.I/O is aligned 12544 * xp->xb_blkno represents the absolute sector address 12545 * 2.I/O is misaligned 12546 * xp->xb_blkno represents the absolute logical block address 12547 * based on DEV_BSIZE. The logical block address will be 12548 * converted to physical sector address in sd_mapblocksize_\ 12549 * iostart. 12550 * 3.I/O is misaligned but is aligned in "overrun" buf 12551 * xp->xb_blkno represents the absolute logical block address 12552 * based on DEV_BSIZE. The logical block address will be 12553 * converted to physical sector address in sd_mapblocksize_\ 12554 * iostart. But no RMW will be issued in this case. 12555 * 12556 * Context: Can sleep 12557 * 12558 * Issues: This follows what the old code did, in terms of accessing 12559 * some of the partition info in the unit struct without holding 12560 * the mutext. This is a general issue, if the partition info 12561 * can be altered while IO is in progress... as soon as we send 12562 * a buf, its partitioning can be invalid before it gets to the 12563 * device. Probably the right fix is to move partitioning out 12564 * of the driver entirely. 12565 */ 12566 12567 static void 12568 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12569 { 12570 diskaddr_t nblocks; /* #blocks in the given partition */ 12571 daddr_t blocknum; /* Block number specified by the buf */ 12572 size_t requested_nblocks; 12573 size_t available_nblocks; 12574 int partition; 12575 diskaddr_t partition_offset; 12576 struct sd_xbuf *xp; 12577 int secmask = 0, blknomask = 0; 12578 ushort_t is_aligned = TRUE; 12579 12580 ASSERT(un != NULL); 12581 ASSERT(bp != NULL); 12582 ASSERT(!mutex_owned(SD_MUTEX(un))); 12583 12584 SD_TRACE(SD_LOG_IO_PARTITION, un, 12585 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12586 12587 xp = SD_GET_XBUF(bp); 12588 ASSERT(xp != NULL); 12589 12590 /* 12591 * If the geometry is not indicated as valid, attempt to access 12592 * the unit & verify the geometry/label. This can be the case for 12593 * removable-media devices, of if the device was opened in 12594 * NDELAY/NONBLOCK mode. 12595 */ 12596 partition = SDPART(bp->b_edev); 12597 12598 if (!SD_IS_VALID_LABEL(un)) { 12599 sd_ssc_t *ssc; 12600 /* 12601 * Initialize sd_ssc_t for internal uscsi commands 12602 * In case of potential porformance issue, we need 12603 * to alloc memory only if there is invalid label 12604 */ 12605 ssc = sd_ssc_init(un); 12606 12607 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12608 /* 12609 * For removable devices it is possible to start an 12610 * I/O without a media by opening the device in nodelay 12611 * mode. Also for writable CDs there can be many 12612 * scenarios where there is no geometry yet but volume 12613 * manager is trying to issue a read() just because 12614 * it can see TOC on the CD. So do not print a message 12615 * for removables. 12616 */ 12617 if (!un->un_f_has_removable_media) { 12618 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12619 "i/o to invalid geometry\n"); 12620 } 12621 bioerror(bp, EIO); 12622 bp->b_resid = bp->b_bcount; 12623 SD_BEGIN_IODONE(index, un, bp); 12624 12625 sd_ssc_fini(ssc); 12626 return; 12627 } 12628 sd_ssc_fini(ssc); 12629 } 12630 12631 nblocks = 0; 12632 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12633 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12634 12635 if (un->un_f_enable_rmw) { 12636 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1; 12637 secmask = un->un_phy_blocksize - 1; 12638 } else { 12639 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12640 secmask = un->un_tgt_blocksize - 1; 12641 } 12642 12643 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12644 is_aligned = FALSE; 12645 } 12646 12647 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) { 12648 /* 12649 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12650 * Convert the logical block number to target's physical sector 12651 * number. 12652 */ 12653 if (is_aligned) { 12654 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12655 } else { 12656 /* 12657 * There is no RMW if we're just reading, so don't 12658 * warn or error out because of it. 12659 */ 12660 if (bp->b_flags & B_READ) { 12661 /*EMPTY*/ 12662 } else if (!un->un_f_enable_rmw && 12663 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) { 12664 bp->b_flags |= B_ERROR; 12665 goto error_exit; 12666 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) { 12667 mutex_enter(SD_MUTEX(un)); 12668 if (!un->un_f_enable_rmw && 12669 un->un_rmw_msg_timeid == NULL) { 12670 scsi_log(SD_DEVINFO(un), sd_label, 12671 CE_WARN, "I/O request is not " 12672 "aligned with %d disk sector size. " 12673 "It is handled through Read Modify " 12674 "Write but the performance is " 12675 "very low.\n", 12676 un->un_tgt_blocksize); 12677 un->un_rmw_msg_timeid = 12678 timeout(sd_rmw_msg_print_handler, 12679 un, SD_RMW_MSG_PRINT_TIMEOUT); 12680 } else { 12681 un->un_rmw_incre_count ++; 12682 } 12683 mutex_exit(SD_MUTEX(un)); 12684 } 12685 12686 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12687 partition_offset = SD_TGT2SYSBLOCK(un, 12688 partition_offset); 12689 } 12690 } 12691 12692 /* 12693 * blocknum is the starting block number of the request. At this 12694 * point it is still relative to the start of the minor device. 12695 */ 12696 blocknum = xp->xb_blkno; 12697 12698 /* 12699 * Legacy: If the starting block number is one past the last block 12700 * in the partition, do not set B_ERROR in the buf. 12701 */ 12702 if (blocknum == nblocks) { 12703 goto error_exit; 12704 } 12705 12706 /* 12707 * Confirm that the first block of the request lies within the 12708 * partition limits. Also the requested number of bytes must be 12709 * a multiple of the system block size. 12710 */ 12711 if ((blocknum < 0) || (blocknum >= nblocks) || 12712 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12713 bp->b_flags |= B_ERROR; 12714 goto error_exit; 12715 } 12716 12717 /* 12718 * If the requsted # blocks exceeds the available # blocks, that 12719 * is an overrun of the partition. 12720 */ 12721 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12722 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12723 } else { 12724 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12725 } 12726 12727 available_nblocks = (size_t)(nblocks - blocknum); 12728 ASSERT(nblocks >= blocknum); 12729 12730 if (requested_nblocks > available_nblocks) { 12731 size_t resid; 12732 12733 /* 12734 * Allocate an "overrun" buf to allow the request to proceed 12735 * for the amount of space available in the partition. The 12736 * amount not transferred will be added into the b_resid 12737 * when the operation is complete. The overrun buf 12738 * replaces the original buf here, and the original buf 12739 * is saved inside the overrun buf, for later use. 12740 */ 12741 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12742 resid = SD_TGTBLOCKS2BYTES(un, 12743 (offset_t)(requested_nblocks - available_nblocks)); 12744 } else { 12745 resid = SD_SYSBLOCKS2BYTES( 12746 (offset_t)(requested_nblocks - available_nblocks)); 12747 } 12748 12749 size_t count = bp->b_bcount - resid; 12750 /* 12751 * Note: count is an unsigned entity thus it'll NEVER 12752 * be less than 0 so ASSERT the original values are 12753 * correct. 12754 */ 12755 ASSERT(bp->b_bcount >= resid); 12756 12757 bp = sd_bioclone_alloc(bp, count, blocknum, 12758 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12759 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12760 ASSERT(xp != NULL); 12761 } 12762 12763 /* At this point there should be no residual for this buf. */ 12764 ASSERT(bp->b_resid == 0); 12765 12766 /* Convert the block number to an absolute address. */ 12767 xp->xb_blkno += partition_offset; 12768 12769 SD_NEXT_IOSTART(index, un, bp); 12770 12771 SD_TRACE(SD_LOG_IO_PARTITION, un, 12772 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12773 12774 return; 12775 12776 error_exit: 12777 bp->b_resid = bp->b_bcount; 12778 SD_BEGIN_IODONE(index, un, bp); 12779 SD_TRACE(SD_LOG_IO_PARTITION, un, 12780 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12781 } 12782 12783 12784 /* 12785 * Function: sd_mapblockaddr_iodone 12786 * 12787 * Description: Completion-side processing for partition management. 12788 * 12789 * Context: May be called under interrupt context 12790 */ 12791 12792 static void 12793 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12794 { 12795 /* int partition; */ /* Not used, see below. */ 12796 ASSERT(un != NULL); 12797 ASSERT(bp != NULL); 12798 ASSERT(!mutex_owned(SD_MUTEX(un))); 12799 12800 SD_TRACE(SD_LOG_IO_PARTITION, un, 12801 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12802 12803 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12804 /* 12805 * We have an "overrun" buf to deal with... 12806 */ 12807 struct sd_xbuf *xp; 12808 struct buf *obp; /* ptr to the original buf */ 12809 12810 xp = SD_GET_XBUF(bp); 12811 ASSERT(xp != NULL); 12812 12813 /* Retrieve the pointer to the original buf */ 12814 obp = (struct buf *)xp->xb_private; 12815 ASSERT(obp != NULL); 12816 12817 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12818 bioerror(obp, bp->b_error); 12819 12820 sd_bioclone_free(bp); 12821 12822 /* 12823 * Get back the original buf. 12824 * Note that since the restoration of xb_blkno below 12825 * was removed, the sd_xbuf is not needed. 12826 */ 12827 bp = obp; 12828 /* 12829 * xp = SD_GET_XBUF(bp); 12830 * ASSERT(xp != NULL); 12831 */ 12832 } 12833 12834 /* 12835 * Convert sd->xb_blkno back to a minor-device relative value. 12836 * Note: this has been commented out, as it is not needed in the 12837 * current implementation of the driver (ie, since this function 12838 * is at the top of the layering chains, so the info will be 12839 * discarded) and it is in the "hot" IO path. 12840 * 12841 * partition = getminor(bp->b_edev) & SDPART_MASK; 12842 * xp->xb_blkno -= un->un_offset[partition]; 12843 */ 12844 12845 SD_NEXT_IODONE(index, un, bp); 12846 12847 SD_TRACE(SD_LOG_IO_PARTITION, un, 12848 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12849 } 12850 12851 12852 /* 12853 * Function: sd_mapblocksize_iostart 12854 * 12855 * Description: Convert between system block size (un->un_sys_blocksize) 12856 * and target block size (un->un_tgt_blocksize). 12857 * 12858 * Context: Can sleep to allocate resources. 12859 * 12860 * Assumptions: A higher layer has already performed any partition validation, 12861 * and converted the xp->xb_blkno to an absolute value relative 12862 * to the start of the device. 12863 * 12864 * It is also assumed that the higher layer has implemented 12865 * an "overrun" mechanism for the case where the request would 12866 * read/write beyond the end of a partition. In this case we 12867 * assume (and ASSERT) that bp->b_resid == 0. 12868 * 12869 * Note: The implementation for this routine assumes the target 12870 * block size remains constant between allocation and transport. 12871 */ 12872 12873 static void 12874 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12875 { 12876 struct sd_mapblocksize_info *bsp; 12877 struct sd_xbuf *xp; 12878 offset_t first_byte; 12879 daddr_t start_block, end_block; 12880 daddr_t request_bytes; 12881 ushort_t is_aligned = FALSE; 12882 12883 ASSERT(un != NULL); 12884 ASSERT(bp != NULL); 12885 ASSERT(!mutex_owned(SD_MUTEX(un))); 12886 ASSERT(bp->b_resid == 0); 12887 12888 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12889 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12890 12891 /* 12892 * For a non-writable CD, a write request is an error 12893 */ 12894 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12895 (un->un_f_mmc_writable_media == FALSE)) { 12896 bioerror(bp, EIO); 12897 bp->b_resid = bp->b_bcount; 12898 SD_BEGIN_IODONE(index, un, bp); 12899 return; 12900 } 12901 12902 /* 12903 * We do not need a shadow buf if the device is using 12904 * un->un_sys_blocksize as its block size or if bcount == 0. 12905 * In this case there is no layer-private data block allocated. 12906 */ 12907 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 12908 (bp->b_bcount == 0)) { 12909 goto done; 12910 } 12911 12912 #if defined(__i386) || defined(__amd64) 12913 /* We do not support non-block-aligned transfers for ROD devices */ 12914 ASSERT(!ISROD(un)); 12915 #endif 12916 12917 xp = SD_GET_XBUF(bp); 12918 ASSERT(xp != NULL); 12919 12920 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12921 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12922 un->un_tgt_blocksize, DEV_BSIZE); 12923 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12924 "request start block:0x%x\n", xp->xb_blkno); 12925 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12926 "request len:0x%x\n", bp->b_bcount); 12927 12928 /* 12929 * Allocate the layer-private data area for the mapblocksize layer. 12930 * Layers are allowed to use the xp_private member of the sd_xbuf 12931 * struct to store the pointer to their layer-private data block, but 12932 * each layer also has the responsibility of restoring the prior 12933 * contents of xb_private before returning the buf/xbuf to the 12934 * higher layer that sent it. 12935 * 12936 * Here we save the prior contents of xp->xb_private into the 12937 * bsp->mbs_oprivate field of our layer-private data area. This value 12938 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12939 * the layer-private area and returning the buf/xbuf to the layer 12940 * that sent it. 12941 * 12942 * Note that here we use kmem_zalloc for the allocation as there are 12943 * parts of the mapblocksize code that expect certain fields to be 12944 * zero unless explicitly set to a required value. 12945 */ 12946 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12947 bsp->mbs_oprivate = xp->xb_private; 12948 xp->xb_private = bsp; 12949 12950 /* 12951 * This treats the data on the disk (target) as an array of bytes. 12952 * first_byte is the byte offset, from the beginning of the device, 12953 * to the location of the request. This is converted from a 12954 * un->un_sys_blocksize block address to a byte offset, and then back 12955 * to a block address based upon a un->un_tgt_blocksize block size. 12956 * 12957 * xp->xb_blkno should be absolute upon entry into this function, 12958 * but, but it is based upon partitions that use the "system" 12959 * block size. It must be adjusted to reflect the block size of 12960 * the target. 12961 * 12962 * Note that end_block is actually the block that follows the last 12963 * block of the request, but that's what is needed for the computation. 12964 */ 12965 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12966 if (un->un_f_enable_rmw) { 12967 start_block = xp->xb_blkno = 12968 (first_byte / un->un_phy_blocksize) * 12969 (un->un_phy_blocksize / DEV_BSIZE); 12970 end_block = ((first_byte + bp->b_bcount + 12971 un->un_phy_blocksize - 1) / un->un_phy_blocksize) * 12972 (un->un_phy_blocksize / DEV_BSIZE); 12973 } else { 12974 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12975 end_block = (first_byte + bp->b_bcount + 12976 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 12977 } 12978 12979 /* request_bytes is rounded up to a multiple of the target block size */ 12980 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12981 12982 /* 12983 * See if the starting address of the request and the request 12984 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12985 * then we do not need to allocate a shadow buf to handle the request. 12986 */ 12987 if (un->un_f_enable_rmw) { 12988 if (((first_byte % un->un_phy_blocksize) == 0) && 12989 ((bp->b_bcount % un->un_phy_blocksize) == 0)) { 12990 is_aligned = TRUE; 12991 } 12992 } else { 12993 if (((first_byte % un->un_tgt_blocksize) == 0) && 12994 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12995 is_aligned = TRUE; 12996 } 12997 } 12998 12999 if ((bp->b_flags & B_READ) == 0) { 13000 /* 13001 * Lock the range for a write operation. An aligned request is 13002 * considered a simple write; otherwise the request must be a 13003 * read-modify-write. 13004 */ 13005 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 13006 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 13007 } 13008 13009 /* 13010 * Alloc a shadow buf if the request is not aligned. Also, this is 13011 * where the READ command is generated for a read-modify-write. (The 13012 * write phase is deferred until after the read completes.) 13013 */ 13014 if (is_aligned == FALSE) { 13015 13016 struct sd_mapblocksize_info *shadow_bsp; 13017 struct sd_xbuf *shadow_xp; 13018 struct buf *shadow_bp; 13019 13020 /* 13021 * Allocate the shadow buf and it associated xbuf. Note that 13022 * after this call the xb_blkno value in both the original 13023 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 13024 * same: absolute relative to the start of the device, and 13025 * adjusted for the target block size. The b_blkno in the 13026 * shadow buf will also be set to this value. We should never 13027 * change b_blkno in the original bp however. 13028 * 13029 * Note also that the shadow buf will always need to be a 13030 * READ command, regardless of whether the incoming command 13031 * is a READ or a WRITE. 13032 */ 13033 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 13034 xp->xb_blkno, 13035 (int (*)(struct buf *)) sd_mapblocksize_iodone); 13036 13037 shadow_xp = SD_GET_XBUF(shadow_bp); 13038 13039 /* 13040 * Allocate the layer-private data for the shadow buf. 13041 * (No need to preserve xb_private in the shadow xbuf.) 13042 */ 13043 shadow_xp->xb_private = shadow_bsp = 13044 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 13045 13046 /* 13047 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 13048 * to figure out where the start of the user data is (based upon 13049 * the system block size) in the data returned by the READ 13050 * command (which will be based upon the target blocksize). Note 13051 * that this is only really used if the request is unaligned. 13052 */ 13053 if (un->un_f_enable_rmw) { 13054 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13055 ((offset_t)xp->xb_blkno * un->un_sys_blocksize)); 13056 ASSERT((bsp->mbs_copy_offset >= 0) && 13057 (bsp->mbs_copy_offset < un->un_phy_blocksize)); 13058 } else { 13059 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13060 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 13061 ASSERT((bsp->mbs_copy_offset >= 0) && 13062 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 13063 } 13064 13065 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 13066 13067 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 13068 13069 /* Transfer the wmap (if any) to the shadow buf */ 13070 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 13071 bsp->mbs_wmp = NULL; 13072 13073 /* 13074 * The shadow buf goes on from here in place of the 13075 * original buf. 13076 */ 13077 shadow_bsp->mbs_orig_bp = bp; 13078 bp = shadow_bp; 13079 } 13080 13081 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13082 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 13083 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13084 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 13085 request_bytes); 13086 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13087 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 13088 13089 done: 13090 SD_NEXT_IOSTART(index, un, bp); 13091 13092 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13093 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 13094 } 13095 13096 13097 /* 13098 * Function: sd_mapblocksize_iodone 13099 * 13100 * Description: Completion side processing for block-size mapping. 13101 * 13102 * Context: May be called under interrupt context 13103 */ 13104 13105 static void 13106 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 13107 { 13108 struct sd_mapblocksize_info *bsp; 13109 struct sd_xbuf *xp; 13110 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 13111 struct buf *orig_bp; /* ptr to the original buf */ 13112 offset_t shadow_end; 13113 offset_t request_end; 13114 offset_t shadow_start; 13115 ssize_t copy_offset; 13116 size_t copy_length; 13117 size_t shortfall; 13118 uint_t is_write; /* TRUE if this bp is a WRITE */ 13119 uint_t has_wmap; /* TRUE is this bp has a wmap */ 13120 13121 ASSERT(un != NULL); 13122 ASSERT(bp != NULL); 13123 13124 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13125 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 13126 13127 /* 13128 * There is no shadow buf or layer-private data if the target is 13129 * using un->un_sys_blocksize as its block size or if bcount == 0. 13130 */ 13131 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 13132 (bp->b_bcount == 0)) { 13133 goto exit; 13134 } 13135 13136 xp = SD_GET_XBUF(bp); 13137 ASSERT(xp != NULL); 13138 13139 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 13140 bsp = xp->xb_private; 13141 13142 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 13143 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 13144 13145 if (is_write) { 13146 /* 13147 * For a WRITE request we must free up the block range that 13148 * we have locked up. This holds regardless of whether this is 13149 * an aligned write request or a read-modify-write request. 13150 */ 13151 sd_range_unlock(un, bsp->mbs_wmp); 13152 bsp->mbs_wmp = NULL; 13153 } 13154 13155 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 13156 /* 13157 * An aligned read or write command will have no shadow buf; 13158 * there is not much else to do with it. 13159 */ 13160 goto done; 13161 } 13162 13163 orig_bp = bsp->mbs_orig_bp; 13164 ASSERT(orig_bp != NULL); 13165 orig_xp = SD_GET_XBUF(orig_bp); 13166 ASSERT(orig_xp != NULL); 13167 ASSERT(!mutex_owned(SD_MUTEX(un))); 13168 13169 if (!is_write && has_wmap) { 13170 /* 13171 * A READ with a wmap means this is the READ phase of a 13172 * read-modify-write. If an error occurred on the READ then 13173 * we do not proceed with the WRITE phase or copy any data. 13174 * Just release the write maps and return with an error. 13175 */ 13176 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 13177 orig_bp->b_resid = orig_bp->b_bcount; 13178 bioerror(orig_bp, bp->b_error); 13179 sd_range_unlock(un, bsp->mbs_wmp); 13180 goto freebuf_done; 13181 } 13182 } 13183 13184 /* 13185 * Here is where we set up to copy the data from the shadow buf 13186 * into the space associated with the original buf. 13187 * 13188 * To deal with the conversion between block sizes, these 13189 * computations treat the data as an array of bytes, with the 13190 * first byte (byte 0) corresponding to the first byte in the 13191 * first block on the disk. 13192 */ 13193 13194 /* 13195 * shadow_start and shadow_len indicate the location and size of 13196 * the data returned with the shadow IO request. 13197 */ 13198 if (un->un_f_enable_rmw) { 13199 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 13200 } else { 13201 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 13202 } 13203 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 13204 13205 /* 13206 * copy_offset gives the offset (in bytes) from the start of the first 13207 * block of the READ request to the beginning of the data. We retrieve 13208 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 13209 * there by sd_mapblockize_iostart(). copy_length gives the amount of 13210 * data to be copied (in bytes). 13211 */ 13212 copy_offset = bsp->mbs_copy_offset; 13213 if (un->un_f_enable_rmw) { 13214 ASSERT((copy_offset >= 0) && 13215 (copy_offset < un->un_phy_blocksize)); 13216 } else { 13217 ASSERT((copy_offset >= 0) && 13218 (copy_offset < un->un_tgt_blocksize)); 13219 } 13220 13221 copy_length = orig_bp->b_bcount; 13222 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 13223 13224 /* 13225 * Set up the resid and error fields of orig_bp as appropriate. 13226 */ 13227 if (shadow_end >= request_end) { 13228 /* We got all the requested data; set resid to zero */ 13229 orig_bp->b_resid = 0; 13230 } else { 13231 /* 13232 * We failed to get enough data to fully satisfy the original 13233 * request. Just copy back whatever data we got and set 13234 * up the residual and error code as required. 13235 * 13236 * 'shortfall' is the amount by which the data received with the 13237 * shadow buf has "fallen short" of the requested amount. 13238 */ 13239 shortfall = (size_t)(request_end - shadow_end); 13240 13241 if (shortfall > orig_bp->b_bcount) { 13242 /* 13243 * We did not get enough data to even partially 13244 * fulfill the original request. The residual is 13245 * equal to the amount requested. 13246 */ 13247 orig_bp->b_resid = orig_bp->b_bcount; 13248 } else { 13249 /* 13250 * We did not get all the data that we requested 13251 * from the device, but we will try to return what 13252 * portion we did get. 13253 */ 13254 orig_bp->b_resid = shortfall; 13255 } 13256 ASSERT(copy_length >= orig_bp->b_resid); 13257 copy_length -= orig_bp->b_resid; 13258 } 13259 13260 /* Propagate the error code from the shadow buf to the original buf */ 13261 bioerror(orig_bp, bp->b_error); 13262 13263 if (is_write) { 13264 goto freebuf_done; /* No data copying for a WRITE */ 13265 } 13266 13267 if (has_wmap) { 13268 /* 13269 * This is a READ command from the READ phase of a 13270 * read-modify-write request. We have to copy the data given 13271 * by the user OVER the data returned by the READ command, 13272 * then convert the command from a READ to a WRITE and send 13273 * it back to the target. 13274 */ 13275 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 13276 copy_length); 13277 13278 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 13279 13280 /* 13281 * Dispatch the WRITE command to the taskq thread, which 13282 * will in turn send the command to the target. When the 13283 * WRITE command completes, we (sd_mapblocksize_iodone()) 13284 * will get called again as part of the iodone chain 13285 * processing for it. Note that we will still be dealing 13286 * with the shadow buf at that point. 13287 */ 13288 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 13289 KM_NOSLEEP) != 0) { 13290 /* 13291 * Dispatch was successful so we are done. Return 13292 * without going any higher up the iodone chain. Do 13293 * not free up any layer-private data until after the 13294 * WRITE completes. 13295 */ 13296 return; 13297 } 13298 13299 /* 13300 * Dispatch of the WRITE command failed; set up the error 13301 * condition and send this IO back up the iodone chain. 13302 */ 13303 bioerror(orig_bp, EIO); 13304 orig_bp->b_resid = orig_bp->b_bcount; 13305 13306 } else { 13307 /* 13308 * This is a regular READ request (ie, not a RMW). Copy the 13309 * data from the shadow buf into the original buf. The 13310 * copy_offset compensates for any "misalignment" between the 13311 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13312 * original buf (with its un->un_sys_blocksize blocks). 13313 */ 13314 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13315 copy_length); 13316 } 13317 13318 freebuf_done: 13319 13320 /* 13321 * At this point we still have both the shadow buf AND the original 13322 * buf to deal with, as well as the layer-private data area in each. 13323 * Local variables are as follows: 13324 * 13325 * bp -- points to shadow buf 13326 * xp -- points to xbuf of shadow buf 13327 * bsp -- points to layer-private data area of shadow buf 13328 * orig_bp -- points to original buf 13329 * 13330 * First free the shadow buf and its associated xbuf, then free the 13331 * layer-private data area from the shadow buf. There is no need to 13332 * restore xb_private in the shadow xbuf. 13333 */ 13334 sd_shadow_buf_free(bp); 13335 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13336 13337 /* 13338 * Now update the local variables to point to the original buf, xbuf, 13339 * and layer-private area. 13340 */ 13341 bp = orig_bp; 13342 xp = SD_GET_XBUF(bp); 13343 ASSERT(xp != NULL); 13344 ASSERT(xp == orig_xp); 13345 bsp = xp->xb_private; 13346 ASSERT(bsp != NULL); 13347 13348 done: 13349 /* 13350 * Restore xb_private to whatever it was set to by the next higher 13351 * layer in the chain, then free the layer-private data area. 13352 */ 13353 xp->xb_private = bsp->mbs_oprivate; 13354 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13355 13356 exit: 13357 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13358 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13359 13360 SD_NEXT_IODONE(index, un, bp); 13361 } 13362 13363 13364 /* 13365 * Function: sd_checksum_iostart 13366 * 13367 * Description: A stub function for a layer that's currently not used. 13368 * For now just a placeholder. 13369 * 13370 * Context: Kernel thread context 13371 */ 13372 13373 static void 13374 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13375 { 13376 ASSERT(un != NULL); 13377 ASSERT(bp != NULL); 13378 ASSERT(!mutex_owned(SD_MUTEX(un))); 13379 SD_NEXT_IOSTART(index, un, bp); 13380 } 13381 13382 13383 /* 13384 * Function: sd_checksum_iodone 13385 * 13386 * Description: A stub function for a layer that's currently not used. 13387 * For now just a placeholder. 13388 * 13389 * Context: May be called under interrupt context 13390 */ 13391 13392 static void 13393 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13394 { 13395 ASSERT(un != NULL); 13396 ASSERT(bp != NULL); 13397 ASSERT(!mutex_owned(SD_MUTEX(un))); 13398 SD_NEXT_IODONE(index, un, bp); 13399 } 13400 13401 13402 /* 13403 * Function: sd_checksum_uscsi_iostart 13404 * 13405 * Description: A stub function for a layer that's currently not used. 13406 * For now just a placeholder. 13407 * 13408 * Context: Kernel thread context 13409 */ 13410 13411 static void 13412 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13413 { 13414 ASSERT(un != NULL); 13415 ASSERT(bp != NULL); 13416 ASSERT(!mutex_owned(SD_MUTEX(un))); 13417 SD_NEXT_IOSTART(index, un, bp); 13418 } 13419 13420 13421 /* 13422 * Function: sd_checksum_uscsi_iodone 13423 * 13424 * Description: A stub function for a layer that's currently not used. 13425 * For now just a placeholder. 13426 * 13427 * Context: May be called under interrupt context 13428 */ 13429 13430 static void 13431 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13432 { 13433 ASSERT(un != NULL); 13434 ASSERT(bp != NULL); 13435 ASSERT(!mutex_owned(SD_MUTEX(un))); 13436 SD_NEXT_IODONE(index, un, bp); 13437 } 13438 13439 13440 /* 13441 * Function: sd_pm_iostart 13442 * 13443 * Description: iostart-side routine for Power mangement. 13444 * 13445 * Context: Kernel thread context 13446 */ 13447 13448 static void 13449 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13450 { 13451 ASSERT(un != NULL); 13452 ASSERT(bp != NULL); 13453 ASSERT(!mutex_owned(SD_MUTEX(un))); 13454 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13455 13456 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13457 13458 if (sd_pm_entry(un) != DDI_SUCCESS) { 13459 /* 13460 * Set up to return the failed buf back up the 'iodone' 13461 * side of the calling chain. 13462 */ 13463 bioerror(bp, EIO); 13464 bp->b_resid = bp->b_bcount; 13465 13466 SD_BEGIN_IODONE(index, un, bp); 13467 13468 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13469 return; 13470 } 13471 13472 SD_NEXT_IOSTART(index, un, bp); 13473 13474 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13475 } 13476 13477 13478 /* 13479 * Function: sd_pm_iodone 13480 * 13481 * Description: iodone-side routine for power mangement. 13482 * 13483 * Context: may be called from interrupt context 13484 */ 13485 13486 static void 13487 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13488 { 13489 ASSERT(un != NULL); 13490 ASSERT(bp != NULL); 13491 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13492 13493 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13494 13495 /* 13496 * After attach the following flag is only read, so don't 13497 * take the penalty of acquiring a mutex for it. 13498 */ 13499 if (un->un_f_pm_is_enabled == TRUE) { 13500 sd_pm_exit(un); 13501 } 13502 13503 SD_NEXT_IODONE(index, un, bp); 13504 13505 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13506 } 13507 13508 13509 /* 13510 * Function: sd_core_iostart 13511 * 13512 * Description: Primary driver function for enqueuing buf(9S) structs from 13513 * the system and initiating IO to the target device 13514 * 13515 * Context: Kernel thread context. Can sleep. 13516 * 13517 * Assumptions: - The given xp->xb_blkno is absolute 13518 * (ie, relative to the start of the device). 13519 * - The IO is to be done using the native blocksize of 13520 * the device, as specified in un->un_tgt_blocksize. 13521 */ 13522 /* ARGSUSED */ 13523 static void 13524 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13525 { 13526 struct sd_xbuf *xp; 13527 13528 ASSERT(un != NULL); 13529 ASSERT(bp != NULL); 13530 ASSERT(!mutex_owned(SD_MUTEX(un))); 13531 ASSERT(bp->b_resid == 0); 13532 13533 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13534 13535 xp = SD_GET_XBUF(bp); 13536 ASSERT(xp != NULL); 13537 13538 mutex_enter(SD_MUTEX(un)); 13539 13540 /* 13541 * If we are currently in the failfast state, fail any new IO 13542 * that has B_FAILFAST set, then return. 13543 */ 13544 if ((bp->b_flags & B_FAILFAST) && 13545 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13546 mutex_exit(SD_MUTEX(un)); 13547 bioerror(bp, EIO); 13548 bp->b_resid = bp->b_bcount; 13549 SD_BEGIN_IODONE(index, un, bp); 13550 return; 13551 } 13552 13553 if (SD_IS_DIRECT_PRIORITY(xp)) { 13554 /* 13555 * Priority command -- transport it immediately. 13556 * 13557 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13558 * because all direct priority commands should be associated 13559 * with error recovery actions which we don't want to retry. 13560 */ 13561 sd_start_cmds(un, bp); 13562 } else { 13563 /* 13564 * Normal command -- add it to the wait queue, then start 13565 * transporting commands from the wait queue. 13566 */ 13567 sd_add_buf_to_waitq(un, bp); 13568 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13569 sd_start_cmds(un, NULL); 13570 } 13571 13572 mutex_exit(SD_MUTEX(un)); 13573 13574 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13575 } 13576 13577 13578 /* 13579 * Function: sd_init_cdb_limits 13580 * 13581 * Description: This is to handle scsi_pkt initialization differences 13582 * between the driver platforms. 13583 * 13584 * Legacy behaviors: 13585 * 13586 * If the block number or the sector count exceeds the 13587 * capabilities of a Group 0 command, shift over to a 13588 * Group 1 command. We don't blindly use Group 1 13589 * commands because a) some drives (CDC Wren IVs) get a 13590 * bit confused, and b) there is probably a fair amount 13591 * of speed difference for a target to receive and decode 13592 * a 10 byte command instead of a 6 byte command. 13593 * 13594 * The xfer time difference of 6 vs 10 byte CDBs is 13595 * still significant so this code is still worthwhile. 13596 * 10 byte CDBs are very inefficient with the fas HBA driver 13597 * and older disks. Each CDB byte took 1 usec with some 13598 * popular disks. 13599 * 13600 * Context: Must be called at attach time 13601 */ 13602 13603 static void 13604 sd_init_cdb_limits(struct sd_lun *un) 13605 { 13606 int hba_cdb_limit; 13607 13608 /* 13609 * Use CDB_GROUP1 commands for most devices except for 13610 * parallel SCSI fixed drives in which case we get better 13611 * performance using CDB_GROUP0 commands (where applicable). 13612 */ 13613 un->un_mincdb = SD_CDB_GROUP1; 13614 #if !defined(__fibre) 13615 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13616 !un->un_f_has_removable_media) { 13617 un->un_mincdb = SD_CDB_GROUP0; 13618 } 13619 #endif 13620 13621 /* 13622 * Try to read the max-cdb-length supported by HBA. 13623 */ 13624 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13625 if (0 >= un->un_max_hba_cdb) { 13626 un->un_max_hba_cdb = CDB_GROUP4; 13627 hba_cdb_limit = SD_CDB_GROUP4; 13628 } else if (0 < un->un_max_hba_cdb && 13629 un->un_max_hba_cdb < CDB_GROUP1) { 13630 hba_cdb_limit = SD_CDB_GROUP0; 13631 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13632 un->un_max_hba_cdb < CDB_GROUP5) { 13633 hba_cdb_limit = SD_CDB_GROUP1; 13634 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13635 un->un_max_hba_cdb < CDB_GROUP4) { 13636 hba_cdb_limit = SD_CDB_GROUP5; 13637 } else { 13638 hba_cdb_limit = SD_CDB_GROUP4; 13639 } 13640 13641 /* 13642 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13643 * commands for fixed disks unless we are building for a 32 bit 13644 * kernel. 13645 */ 13646 #ifdef _LP64 13647 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13648 min(hba_cdb_limit, SD_CDB_GROUP4); 13649 #else 13650 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13651 min(hba_cdb_limit, SD_CDB_GROUP1); 13652 #endif 13653 13654 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13655 ? sizeof (struct scsi_arq_status) : 1); 13656 if (!ISCD(un)) 13657 un->un_cmd_timeout = (ushort_t)sd_io_time; 13658 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13659 } 13660 13661 13662 /* 13663 * Function: sd_initpkt_for_buf 13664 * 13665 * Description: Allocate and initialize for transport a scsi_pkt struct, 13666 * based upon the info specified in the given buf struct. 13667 * 13668 * Assumes the xb_blkno in the request is absolute (ie, 13669 * relative to the start of the device (NOT partition!). 13670 * Also assumes that the request is using the native block 13671 * size of the device (as returned by the READ CAPACITY 13672 * command). 13673 * 13674 * Return Code: SD_PKT_ALLOC_SUCCESS 13675 * SD_PKT_ALLOC_FAILURE 13676 * SD_PKT_ALLOC_FAILURE_NO_DMA 13677 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13678 * 13679 * Context: Kernel thread and may be called from software interrupt context 13680 * as part of a sdrunout callback. This function may not block or 13681 * call routines that block 13682 */ 13683 13684 static int 13685 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13686 { 13687 struct sd_xbuf *xp; 13688 struct scsi_pkt *pktp = NULL; 13689 struct sd_lun *un; 13690 size_t blockcount; 13691 daddr_t startblock; 13692 int rval; 13693 int cmd_flags; 13694 13695 ASSERT(bp != NULL); 13696 ASSERT(pktpp != NULL); 13697 xp = SD_GET_XBUF(bp); 13698 ASSERT(xp != NULL); 13699 un = SD_GET_UN(bp); 13700 ASSERT(un != NULL); 13701 ASSERT(mutex_owned(SD_MUTEX(un))); 13702 ASSERT(bp->b_resid == 0); 13703 13704 SD_TRACE(SD_LOG_IO_CORE, un, 13705 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13706 13707 mutex_exit(SD_MUTEX(un)); 13708 13709 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13710 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13711 /* 13712 * Already have a scsi_pkt -- just need DMA resources. 13713 * We must recompute the CDB in case the mapping returns 13714 * a nonzero pkt_resid. 13715 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13716 * that is being retried, the unmap/remap of the DMA resouces 13717 * will result in the entire transfer starting over again 13718 * from the very first block. 13719 */ 13720 ASSERT(xp->xb_pktp != NULL); 13721 pktp = xp->xb_pktp; 13722 } else { 13723 pktp = NULL; 13724 } 13725 #endif /* __i386 || __amd64 */ 13726 13727 startblock = xp->xb_blkno; /* Absolute block num. */ 13728 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13729 13730 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13731 13732 /* 13733 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13734 * call scsi_init_pkt, and build the CDB. 13735 */ 13736 rval = sd_setup_rw_pkt(un, &pktp, bp, 13737 cmd_flags, sdrunout, (caddr_t)un, 13738 startblock, blockcount); 13739 13740 if (rval == 0) { 13741 /* 13742 * Success. 13743 * 13744 * If partial DMA is being used and required for this transfer. 13745 * set it up here. 13746 */ 13747 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13748 (pktp->pkt_resid != 0)) { 13749 13750 /* 13751 * Save the CDB length and pkt_resid for the 13752 * next xfer 13753 */ 13754 xp->xb_dma_resid = pktp->pkt_resid; 13755 13756 /* rezero resid */ 13757 pktp->pkt_resid = 0; 13758 13759 } else { 13760 xp->xb_dma_resid = 0; 13761 } 13762 13763 pktp->pkt_flags = un->un_tagflags; 13764 pktp->pkt_time = un->un_cmd_timeout; 13765 pktp->pkt_comp = sdintr; 13766 13767 pktp->pkt_private = bp; 13768 *pktpp = pktp; 13769 13770 SD_TRACE(SD_LOG_IO_CORE, un, 13771 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13772 13773 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13774 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13775 #endif 13776 13777 mutex_enter(SD_MUTEX(un)); 13778 return (SD_PKT_ALLOC_SUCCESS); 13779 13780 } 13781 13782 /* 13783 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13784 * from sd_setup_rw_pkt. 13785 */ 13786 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13787 13788 if (rval == SD_PKT_ALLOC_FAILURE) { 13789 *pktpp = NULL; 13790 /* 13791 * Set the driver state to RWAIT to indicate the driver 13792 * is waiting on resource allocations. The driver will not 13793 * suspend, pm_suspend, or detatch while the state is RWAIT. 13794 */ 13795 mutex_enter(SD_MUTEX(un)); 13796 New_state(un, SD_STATE_RWAIT); 13797 13798 SD_ERROR(SD_LOG_IO_CORE, un, 13799 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13800 13801 if ((bp->b_flags & B_ERROR) != 0) { 13802 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13803 } 13804 return (SD_PKT_ALLOC_FAILURE); 13805 } else { 13806 /* 13807 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13808 * 13809 * This should never happen. Maybe someone messed with the 13810 * kernel's minphys? 13811 */ 13812 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13813 "Request rejected: too large for CDB: " 13814 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13815 SD_ERROR(SD_LOG_IO_CORE, un, 13816 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13817 mutex_enter(SD_MUTEX(un)); 13818 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13819 13820 } 13821 } 13822 13823 13824 /* 13825 * Function: sd_destroypkt_for_buf 13826 * 13827 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13828 * 13829 * Context: Kernel thread or interrupt context 13830 */ 13831 13832 static void 13833 sd_destroypkt_for_buf(struct buf *bp) 13834 { 13835 ASSERT(bp != NULL); 13836 ASSERT(SD_GET_UN(bp) != NULL); 13837 13838 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13839 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13840 13841 ASSERT(SD_GET_PKTP(bp) != NULL); 13842 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13843 13844 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13845 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13846 } 13847 13848 /* 13849 * Function: sd_setup_rw_pkt 13850 * 13851 * Description: Determines appropriate CDB group for the requested LBA 13852 * and transfer length, calls scsi_init_pkt, and builds 13853 * the CDB. Do not use for partial DMA transfers except 13854 * for the initial transfer since the CDB size must 13855 * remain constant. 13856 * 13857 * Context: Kernel thread and may be called from software interrupt 13858 * context as part of a sdrunout callback. This function may not 13859 * block or call routines that block 13860 */ 13861 13862 13863 int 13864 sd_setup_rw_pkt(struct sd_lun *un, 13865 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13866 int (*callback)(caddr_t), caddr_t callback_arg, 13867 diskaddr_t lba, uint32_t blockcount) 13868 { 13869 struct scsi_pkt *return_pktp; 13870 union scsi_cdb *cdbp; 13871 struct sd_cdbinfo *cp = NULL; 13872 int i; 13873 13874 /* 13875 * See which size CDB to use, based upon the request. 13876 */ 13877 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13878 13879 /* 13880 * Check lba and block count against sd_cdbtab limits. 13881 * In the partial DMA case, we have to use the same size 13882 * CDB for all the transfers. Check lba + blockcount 13883 * against the max LBA so we know that segment of the 13884 * transfer can use the CDB we select. 13885 */ 13886 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13887 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13888 13889 /* 13890 * The command will fit into the CDB type 13891 * specified by sd_cdbtab[i]. 13892 */ 13893 cp = sd_cdbtab + i; 13894 13895 /* 13896 * Call scsi_init_pkt so we can fill in the 13897 * CDB. 13898 */ 13899 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13900 bp, cp->sc_grpcode, un->un_status_len, 0, 13901 flags, callback, callback_arg); 13902 13903 if (return_pktp != NULL) { 13904 13905 /* 13906 * Return new value of pkt 13907 */ 13908 *pktpp = return_pktp; 13909 13910 /* 13911 * To be safe, zero the CDB insuring there is 13912 * no leftover data from a previous command. 13913 */ 13914 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13915 13916 /* 13917 * Handle partial DMA mapping 13918 */ 13919 if (return_pktp->pkt_resid != 0) { 13920 13921 /* 13922 * Not going to xfer as many blocks as 13923 * originally expected 13924 */ 13925 blockcount -= 13926 SD_BYTES2TGTBLOCKS(un, 13927 return_pktp->pkt_resid); 13928 } 13929 13930 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13931 13932 /* 13933 * Set command byte based on the CDB 13934 * type we matched. 13935 */ 13936 cdbp->scc_cmd = cp->sc_grpmask | 13937 ((bp->b_flags & B_READ) ? 13938 SCMD_READ : SCMD_WRITE); 13939 13940 SD_FILL_SCSI1_LUN(un, return_pktp); 13941 13942 /* 13943 * Fill in LBA and length 13944 */ 13945 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13946 (cp->sc_grpcode == CDB_GROUP4) || 13947 (cp->sc_grpcode == CDB_GROUP0) || 13948 (cp->sc_grpcode == CDB_GROUP5)); 13949 13950 if (cp->sc_grpcode == CDB_GROUP1) { 13951 FORMG1ADDR(cdbp, lba); 13952 FORMG1COUNT(cdbp, blockcount); 13953 return (0); 13954 } else if (cp->sc_grpcode == CDB_GROUP4) { 13955 FORMG4LONGADDR(cdbp, lba); 13956 FORMG4COUNT(cdbp, blockcount); 13957 return (0); 13958 } else if (cp->sc_grpcode == CDB_GROUP0) { 13959 FORMG0ADDR(cdbp, lba); 13960 FORMG0COUNT(cdbp, blockcount); 13961 return (0); 13962 } else if (cp->sc_grpcode == CDB_GROUP5) { 13963 FORMG5ADDR(cdbp, lba); 13964 FORMG5COUNT(cdbp, blockcount); 13965 return (0); 13966 } 13967 13968 /* 13969 * It should be impossible to not match one 13970 * of the CDB types above, so we should never 13971 * reach this point. Set the CDB command byte 13972 * to test-unit-ready to avoid writing 13973 * to somewhere we don't intend. 13974 */ 13975 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13976 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13977 } else { 13978 /* 13979 * Couldn't get scsi_pkt 13980 */ 13981 return (SD_PKT_ALLOC_FAILURE); 13982 } 13983 } 13984 } 13985 13986 /* 13987 * None of the available CDB types were suitable. This really 13988 * should never happen: on a 64 bit system we support 13989 * READ16/WRITE16 which will hold an entire 64 bit disk address 13990 * and on a 32 bit system we will refuse to bind to a device 13991 * larger than 2TB so addresses will never be larger than 32 bits. 13992 */ 13993 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13994 } 13995 13996 /* 13997 * Function: sd_setup_next_rw_pkt 13998 * 13999 * Description: Setup packet for partial DMA transfers, except for the 14000 * initial transfer. sd_setup_rw_pkt should be used for 14001 * the initial transfer. 14002 * 14003 * Context: Kernel thread and may be called from interrupt context. 14004 */ 14005 14006 int 14007 sd_setup_next_rw_pkt(struct sd_lun *un, 14008 struct scsi_pkt *pktp, struct buf *bp, 14009 diskaddr_t lba, uint32_t blockcount) 14010 { 14011 uchar_t com; 14012 union scsi_cdb *cdbp; 14013 uchar_t cdb_group_id; 14014 14015 ASSERT(pktp != NULL); 14016 ASSERT(pktp->pkt_cdbp != NULL); 14017 14018 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 14019 com = cdbp->scc_cmd; 14020 cdb_group_id = CDB_GROUPID(com); 14021 14022 ASSERT((cdb_group_id == CDB_GROUPID_0) || 14023 (cdb_group_id == CDB_GROUPID_1) || 14024 (cdb_group_id == CDB_GROUPID_4) || 14025 (cdb_group_id == CDB_GROUPID_5)); 14026 14027 /* 14028 * Move pkt to the next portion of the xfer. 14029 * func is NULL_FUNC so we do not have to release 14030 * the disk mutex here. 14031 */ 14032 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 14033 NULL_FUNC, NULL) == pktp) { 14034 /* Success. Handle partial DMA */ 14035 if (pktp->pkt_resid != 0) { 14036 blockcount -= 14037 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 14038 } 14039 14040 cdbp->scc_cmd = com; 14041 SD_FILL_SCSI1_LUN(un, pktp); 14042 if (cdb_group_id == CDB_GROUPID_1) { 14043 FORMG1ADDR(cdbp, lba); 14044 FORMG1COUNT(cdbp, blockcount); 14045 return (0); 14046 } else if (cdb_group_id == CDB_GROUPID_4) { 14047 FORMG4LONGADDR(cdbp, lba); 14048 FORMG4COUNT(cdbp, blockcount); 14049 return (0); 14050 } else if (cdb_group_id == CDB_GROUPID_0) { 14051 FORMG0ADDR(cdbp, lba); 14052 FORMG0COUNT(cdbp, blockcount); 14053 return (0); 14054 } else if (cdb_group_id == CDB_GROUPID_5) { 14055 FORMG5ADDR(cdbp, lba); 14056 FORMG5COUNT(cdbp, blockcount); 14057 return (0); 14058 } 14059 14060 /* Unreachable */ 14061 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 14062 } 14063 14064 /* 14065 * Error setting up next portion of cmd transfer. 14066 * Something is definitely very wrong and this 14067 * should not happen. 14068 */ 14069 return (SD_PKT_ALLOC_FAILURE); 14070 } 14071 14072 /* 14073 * Function: sd_initpkt_for_uscsi 14074 * 14075 * Description: Allocate and initialize for transport a scsi_pkt struct, 14076 * based upon the info specified in the given uscsi_cmd struct. 14077 * 14078 * Return Code: SD_PKT_ALLOC_SUCCESS 14079 * SD_PKT_ALLOC_FAILURE 14080 * SD_PKT_ALLOC_FAILURE_NO_DMA 14081 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 14082 * 14083 * Context: Kernel thread and may be called from software interrupt context 14084 * as part of a sdrunout callback. This function may not block or 14085 * call routines that block 14086 */ 14087 14088 static int 14089 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 14090 { 14091 struct uscsi_cmd *uscmd; 14092 struct sd_xbuf *xp; 14093 struct scsi_pkt *pktp; 14094 struct sd_lun *un; 14095 uint32_t flags = 0; 14096 14097 ASSERT(bp != NULL); 14098 ASSERT(pktpp != NULL); 14099 xp = SD_GET_XBUF(bp); 14100 ASSERT(xp != NULL); 14101 un = SD_GET_UN(bp); 14102 ASSERT(un != NULL); 14103 ASSERT(mutex_owned(SD_MUTEX(un))); 14104 14105 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14106 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14107 ASSERT(uscmd != NULL); 14108 14109 SD_TRACE(SD_LOG_IO_CORE, un, 14110 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 14111 14112 /* 14113 * Allocate the scsi_pkt for the command. 14114 * 14115 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 14116 * during scsi_init_pkt time and will continue to use the 14117 * same path as long as the same scsi_pkt is used without 14118 * intervening scsi_dmafree(). Since uscsi command does 14119 * not call scsi_dmafree() before retry failed command, it 14120 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 14121 * set such that scsi_vhci can use other available path for 14122 * retry. Besides, ucsci command does not allow DMA breakup, 14123 * so there is no need to set PKT_DMA_PARTIAL flag. 14124 * 14125 * More fundamentally, we can't support breaking up this DMA into 14126 * multiple windows on x86. There is, in general, no guarantee 14127 * that arbitrary SCSI commands are idempotent, which is required 14128 * if we want to use multiple windows for a given command. 14129 */ 14130 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14131 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14132 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14133 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 14134 - sizeof (struct scsi_extended_sense)), 0, 14135 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 14136 sdrunout, (caddr_t)un); 14137 } else { 14138 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14139 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14140 sizeof (struct scsi_arq_status), 0, 14141 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 14142 sdrunout, (caddr_t)un); 14143 } 14144 14145 if (pktp == NULL) { 14146 *pktpp = NULL; 14147 /* 14148 * Set the driver state to RWAIT to indicate the driver 14149 * is waiting on resource allocations. The driver will not 14150 * suspend, pm_suspend, or detatch while the state is RWAIT. 14151 */ 14152 New_state(un, SD_STATE_RWAIT); 14153 14154 SD_ERROR(SD_LOG_IO_CORE, un, 14155 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 14156 14157 if ((bp->b_flags & B_ERROR) != 0) { 14158 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 14159 } 14160 return (SD_PKT_ALLOC_FAILURE); 14161 } 14162 14163 /* 14164 * We do not do DMA breakup for USCSI commands, so return failure 14165 * here if all the needed DMA resources were not allocated. 14166 */ 14167 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 14168 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 14169 scsi_destroy_pkt(pktp); 14170 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 14171 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 14172 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 14173 } 14174 14175 /* Init the cdb from the given uscsi struct */ 14176 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 14177 uscmd->uscsi_cdb[0], 0, 0, 0); 14178 14179 SD_FILL_SCSI1_LUN(un, pktp); 14180 14181 /* 14182 * Set up the optional USCSI flags. See the uscsi (7I) man page 14183 * for listing of the supported flags. 14184 */ 14185 14186 if (uscmd->uscsi_flags & USCSI_SILENT) { 14187 flags |= FLAG_SILENT; 14188 } 14189 14190 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 14191 flags |= FLAG_DIAGNOSE; 14192 } 14193 14194 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 14195 flags |= FLAG_ISOLATE; 14196 } 14197 14198 if (un->un_f_is_fibre == FALSE) { 14199 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 14200 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 14201 } 14202 } 14203 14204 /* 14205 * Set the pkt flags here so we save time later. 14206 * Note: These flags are NOT in the uscsi man page!!! 14207 */ 14208 if (uscmd->uscsi_flags & USCSI_HEAD) { 14209 flags |= FLAG_HEAD; 14210 } 14211 14212 if (uscmd->uscsi_flags & USCSI_NOINTR) { 14213 flags |= FLAG_NOINTR; 14214 } 14215 14216 /* 14217 * For tagged queueing, things get a bit complicated. 14218 * Check first for head of queue and last for ordered queue. 14219 * If neither head nor order, use the default driver tag flags. 14220 */ 14221 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 14222 if (uscmd->uscsi_flags & USCSI_HTAG) { 14223 flags |= FLAG_HTAG; 14224 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 14225 flags |= FLAG_OTAG; 14226 } else { 14227 flags |= un->un_tagflags & FLAG_TAGMASK; 14228 } 14229 } 14230 14231 if (uscmd->uscsi_flags & USCSI_NODISCON) { 14232 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 14233 } 14234 14235 pktp->pkt_flags = flags; 14236 14237 /* Transfer uscsi information to scsi_pkt */ 14238 (void) scsi_uscsi_pktinit(uscmd, pktp); 14239 14240 /* Copy the caller's CDB into the pkt... */ 14241 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 14242 14243 if (uscmd->uscsi_timeout == 0) { 14244 pktp->pkt_time = un->un_uscsi_timeout; 14245 } else { 14246 pktp->pkt_time = uscmd->uscsi_timeout; 14247 } 14248 14249 /* need it later to identify USCSI request in sdintr */ 14250 xp->xb_pkt_flags |= SD_XB_USCSICMD; 14251 14252 xp->xb_sense_resid = uscmd->uscsi_rqresid; 14253 14254 pktp->pkt_private = bp; 14255 pktp->pkt_comp = sdintr; 14256 *pktpp = pktp; 14257 14258 SD_TRACE(SD_LOG_IO_CORE, un, 14259 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 14260 14261 return (SD_PKT_ALLOC_SUCCESS); 14262 } 14263 14264 14265 /* 14266 * Function: sd_destroypkt_for_uscsi 14267 * 14268 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 14269 * IOs.. Also saves relevant info into the associated uscsi_cmd 14270 * struct. 14271 * 14272 * Context: May be called under interrupt context 14273 */ 14274 14275 static void 14276 sd_destroypkt_for_uscsi(struct buf *bp) 14277 { 14278 struct uscsi_cmd *uscmd; 14279 struct sd_xbuf *xp; 14280 struct scsi_pkt *pktp; 14281 struct sd_lun *un; 14282 struct sd_uscsi_info *suip; 14283 14284 ASSERT(bp != NULL); 14285 xp = SD_GET_XBUF(bp); 14286 ASSERT(xp != NULL); 14287 un = SD_GET_UN(bp); 14288 ASSERT(un != NULL); 14289 ASSERT(!mutex_owned(SD_MUTEX(un))); 14290 pktp = SD_GET_PKTP(bp); 14291 ASSERT(pktp != NULL); 14292 14293 SD_TRACE(SD_LOG_IO_CORE, un, 14294 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 14295 14296 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14297 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14298 ASSERT(uscmd != NULL); 14299 14300 /* Save the status and the residual into the uscsi_cmd struct */ 14301 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 14302 uscmd->uscsi_resid = bp->b_resid; 14303 14304 /* Transfer scsi_pkt information to uscsi */ 14305 (void) scsi_uscsi_pktfini(pktp, uscmd); 14306 14307 /* 14308 * If enabled, copy any saved sense data into the area specified 14309 * by the uscsi command. 14310 */ 14311 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14312 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14313 /* 14314 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14315 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14316 */ 14317 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14318 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14319 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14320 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14321 MAX_SENSE_LENGTH); 14322 } else { 14323 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14324 SENSE_LENGTH); 14325 } 14326 } 14327 /* 14328 * The following assignments are for SCSI FMA. 14329 */ 14330 ASSERT(xp->xb_private != NULL); 14331 suip = (struct sd_uscsi_info *)xp->xb_private; 14332 suip->ui_pkt_reason = pktp->pkt_reason; 14333 suip->ui_pkt_state = pktp->pkt_state; 14334 suip->ui_pkt_statistics = pktp->pkt_statistics; 14335 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14336 14337 /* We are done with the scsi_pkt; free it now */ 14338 ASSERT(SD_GET_PKTP(bp) != NULL); 14339 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14340 14341 SD_TRACE(SD_LOG_IO_CORE, un, 14342 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14343 } 14344 14345 14346 /* 14347 * Function: sd_bioclone_alloc 14348 * 14349 * Description: Allocate a buf(9S) and init it as per the given buf 14350 * and the various arguments. The associated sd_xbuf 14351 * struct is (nearly) duplicated. The struct buf *bp 14352 * argument is saved in new_xp->xb_private. 14353 * 14354 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14355 * datalen - size of data area for the shadow bp 14356 * blkno - starting LBA 14357 * func - function pointer for b_iodone in the shadow buf. (May 14358 * be NULL if none.) 14359 * 14360 * Return Code: Pointer to allocates buf(9S) struct 14361 * 14362 * Context: Can sleep. 14363 */ 14364 14365 static struct buf * 14366 sd_bioclone_alloc(struct buf *bp, size_t datalen, daddr_t blkno, 14367 int (*func)(struct buf *)) 14368 { 14369 struct sd_lun *un; 14370 struct sd_xbuf *xp; 14371 struct sd_xbuf *new_xp; 14372 struct buf *new_bp; 14373 14374 ASSERT(bp != NULL); 14375 xp = SD_GET_XBUF(bp); 14376 ASSERT(xp != NULL); 14377 un = SD_GET_UN(bp); 14378 ASSERT(un != NULL); 14379 ASSERT(!mutex_owned(SD_MUTEX(un))); 14380 14381 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14382 NULL, KM_SLEEP); 14383 14384 new_bp->b_lblkno = blkno; 14385 14386 /* 14387 * Allocate an xbuf for the shadow bp and copy the contents of the 14388 * original xbuf into it. 14389 */ 14390 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14391 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14392 14393 /* 14394 * The given bp is automatically saved in the xb_private member 14395 * of the new xbuf. Callers are allowed to depend on this. 14396 */ 14397 new_xp->xb_private = bp; 14398 14399 new_bp->b_private = new_xp; 14400 14401 return (new_bp); 14402 } 14403 14404 /* 14405 * Function: sd_shadow_buf_alloc 14406 * 14407 * Description: Allocate a buf(9S) and init it as per the given buf 14408 * and the various arguments. The associated sd_xbuf 14409 * struct is (nearly) duplicated. The struct buf *bp 14410 * argument is saved in new_xp->xb_private. 14411 * 14412 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14413 * datalen - size of data area for the shadow bp 14414 * bflags - B_READ or B_WRITE (pseudo flag) 14415 * blkno - starting LBA 14416 * func - function pointer for b_iodone in the shadow buf. (May 14417 * be NULL if none.) 14418 * 14419 * Return Code: Pointer to allocates buf(9S) struct 14420 * 14421 * Context: Can sleep. 14422 */ 14423 14424 static struct buf * 14425 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14426 daddr_t blkno, int (*func)(struct buf *)) 14427 { 14428 struct sd_lun *un; 14429 struct sd_xbuf *xp; 14430 struct sd_xbuf *new_xp; 14431 struct buf *new_bp; 14432 14433 ASSERT(bp != NULL); 14434 xp = SD_GET_XBUF(bp); 14435 ASSERT(xp != NULL); 14436 un = SD_GET_UN(bp); 14437 ASSERT(un != NULL); 14438 ASSERT(!mutex_owned(SD_MUTEX(un))); 14439 14440 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14441 bp_mapin(bp); 14442 } 14443 14444 bflags &= (B_READ | B_WRITE); 14445 #if defined(__i386) || defined(__amd64) 14446 new_bp = getrbuf(KM_SLEEP); 14447 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14448 new_bp->b_bcount = datalen; 14449 new_bp->b_flags = bflags | 14450 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14451 #else 14452 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14453 datalen, bflags, SLEEP_FUNC, NULL); 14454 #endif 14455 new_bp->av_forw = NULL; 14456 new_bp->av_back = NULL; 14457 new_bp->b_dev = bp->b_dev; 14458 new_bp->b_blkno = blkno; 14459 new_bp->b_iodone = func; 14460 new_bp->b_edev = bp->b_edev; 14461 new_bp->b_resid = 0; 14462 14463 /* We need to preserve the B_FAILFAST flag */ 14464 if (bp->b_flags & B_FAILFAST) { 14465 new_bp->b_flags |= B_FAILFAST; 14466 } 14467 14468 /* 14469 * Allocate an xbuf for the shadow bp and copy the contents of the 14470 * original xbuf into it. 14471 */ 14472 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14473 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14474 14475 /* Need later to copy data between the shadow buf & original buf! */ 14476 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14477 14478 /* 14479 * The given bp is automatically saved in the xb_private member 14480 * of the new xbuf. Callers are allowed to depend on this. 14481 */ 14482 new_xp->xb_private = bp; 14483 14484 new_bp->b_private = new_xp; 14485 14486 return (new_bp); 14487 } 14488 14489 /* 14490 * Function: sd_bioclone_free 14491 * 14492 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14493 * in the larger than partition operation. 14494 * 14495 * Context: May be called under interrupt context 14496 */ 14497 14498 static void 14499 sd_bioclone_free(struct buf *bp) 14500 { 14501 struct sd_xbuf *xp; 14502 14503 ASSERT(bp != NULL); 14504 xp = SD_GET_XBUF(bp); 14505 ASSERT(xp != NULL); 14506 14507 /* 14508 * Call bp_mapout() before freeing the buf, in case a lower 14509 * layer or HBA had done a bp_mapin(). we must do this here 14510 * as we are the "originator" of the shadow buf. 14511 */ 14512 bp_mapout(bp); 14513 14514 /* 14515 * Null out b_iodone before freeing the bp, to ensure that the driver 14516 * never gets confused by a stale value in this field. (Just a little 14517 * extra defensiveness here.) 14518 */ 14519 bp->b_iodone = NULL; 14520 14521 freerbuf(bp); 14522 14523 kmem_free(xp, sizeof (struct sd_xbuf)); 14524 } 14525 14526 /* 14527 * Function: sd_shadow_buf_free 14528 * 14529 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14530 * 14531 * Context: May be called under interrupt context 14532 */ 14533 14534 static void 14535 sd_shadow_buf_free(struct buf *bp) 14536 { 14537 struct sd_xbuf *xp; 14538 14539 ASSERT(bp != NULL); 14540 xp = SD_GET_XBUF(bp); 14541 ASSERT(xp != NULL); 14542 14543 #if defined(__sparc) 14544 /* 14545 * Call bp_mapout() before freeing the buf, in case a lower 14546 * layer or HBA had done a bp_mapin(). we must do this here 14547 * as we are the "originator" of the shadow buf. 14548 */ 14549 bp_mapout(bp); 14550 #endif 14551 14552 /* 14553 * Null out b_iodone before freeing the bp, to ensure that the driver 14554 * never gets confused by a stale value in this field. (Just a little 14555 * extra defensiveness here.) 14556 */ 14557 bp->b_iodone = NULL; 14558 14559 #if defined(__i386) || defined(__amd64) 14560 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14561 freerbuf(bp); 14562 #else 14563 scsi_free_consistent_buf(bp); 14564 #endif 14565 14566 kmem_free(xp, sizeof (struct sd_xbuf)); 14567 } 14568 14569 14570 /* 14571 * Function: sd_print_transport_rejected_message 14572 * 14573 * Description: This implements the ludicrously complex rules for printing 14574 * a "transport rejected" message. This is to address the 14575 * specific problem of having a flood of this error message 14576 * produced when a failover occurs. 14577 * 14578 * Context: Any. 14579 */ 14580 14581 static void 14582 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14583 int code) 14584 { 14585 ASSERT(un != NULL); 14586 ASSERT(mutex_owned(SD_MUTEX(un))); 14587 ASSERT(xp != NULL); 14588 14589 /* 14590 * Print the "transport rejected" message under the following 14591 * conditions: 14592 * 14593 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14594 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14595 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14596 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14597 * scsi_transport(9F) (which indicates that the target might have 14598 * gone off-line). This uses the un->un_tran_fatal_count 14599 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14600 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14601 * from scsi_transport(). 14602 * 14603 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14604 * the preceeding cases in order for the message to be printed. 14605 */ 14606 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14607 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14608 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14609 (code != TRAN_FATAL_ERROR) || 14610 (un->un_tran_fatal_count == 1)) { 14611 switch (code) { 14612 case TRAN_BADPKT: 14613 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14614 "transport rejected bad packet\n"); 14615 break; 14616 case TRAN_FATAL_ERROR: 14617 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14618 "transport rejected fatal error\n"); 14619 break; 14620 default: 14621 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14622 "transport rejected (%d)\n", code); 14623 break; 14624 } 14625 } 14626 } 14627 } 14628 14629 14630 /* 14631 * Function: sd_add_buf_to_waitq 14632 * 14633 * Description: Add the given buf(9S) struct to the wait queue for the 14634 * instance. If sorting is enabled, then the buf is added 14635 * to the queue via an elevator sort algorithm (a la 14636 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14637 * If sorting is not enabled, then the buf is just added 14638 * to the end of the wait queue. 14639 * 14640 * Return Code: void 14641 * 14642 * Context: Does not sleep/block, therefore technically can be called 14643 * from any context. However if sorting is enabled then the 14644 * execution time is indeterminate, and may take long if 14645 * the wait queue grows large. 14646 */ 14647 14648 static void 14649 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14650 { 14651 struct buf *ap; 14652 14653 ASSERT(bp != NULL); 14654 ASSERT(un != NULL); 14655 ASSERT(mutex_owned(SD_MUTEX(un))); 14656 14657 /* If the queue is empty, add the buf as the only entry & return. */ 14658 if (un->un_waitq_headp == NULL) { 14659 ASSERT(un->un_waitq_tailp == NULL); 14660 un->un_waitq_headp = un->un_waitq_tailp = bp; 14661 bp->av_forw = NULL; 14662 return; 14663 } 14664 14665 ASSERT(un->un_waitq_tailp != NULL); 14666 14667 /* 14668 * If sorting is disabled, just add the buf to the tail end of 14669 * the wait queue and return. 14670 */ 14671 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) { 14672 un->un_waitq_tailp->av_forw = bp; 14673 un->un_waitq_tailp = bp; 14674 bp->av_forw = NULL; 14675 return; 14676 } 14677 14678 /* 14679 * Sort thru the list of requests currently on the wait queue 14680 * and add the new buf request at the appropriate position. 14681 * 14682 * The un->un_waitq_headp is an activity chain pointer on which 14683 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14684 * first queue holds those requests which are positioned after 14685 * the current SD_GET_BLKNO() (in the first request); the second holds 14686 * requests which came in after their SD_GET_BLKNO() number was passed. 14687 * Thus we implement a one way scan, retracting after reaching 14688 * the end of the drive to the first request on the second 14689 * queue, at which time it becomes the first queue. 14690 * A one-way scan is natural because of the way UNIX read-ahead 14691 * blocks are allocated. 14692 * 14693 * If we lie after the first request, then we must locate the 14694 * second request list and add ourselves to it. 14695 */ 14696 ap = un->un_waitq_headp; 14697 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14698 while (ap->av_forw != NULL) { 14699 /* 14700 * Look for an "inversion" in the (normally 14701 * ascending) block numbers. This indicates 14702 * the start of the second request list. 14703 */ 14704 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14705 /* 14706 * Search the second request list for the 14707 * first request at a larger block number. 14708 * We go before that; however if there is 14709 * no such request, we go at the end. 14710 */ 14711 do { 14712 if (SD_GET_BLKNO(bp) < 14713 SD_GET_BLKNO(ap->av_forw)) { 14714 goto insert; 14715 } 14716 ap = ap->av_forw; 14717 } while (ap->av_forw != NULL); 14718 goto insert; /* after last */ 14719 } 14720 ap = ap->av_forw; 14721 } 14722 14723 /* 14724 * No inversions... we will go after the last, and 14725 * be the first request in the second request list. 14726 */ 14727 goto insert; 14728 } 14729 14730 /* 14731 * Request is at/after the current request... 14732 * sort in the first request list. 14733 */ 14734 while (ap->av_forw != NULL) { 14735 /* 14736 * We want to go after the current request (1) if 14737 * there is an inversion after it (i.e. it is the end 14738 * of the first request list), or (2) if the next 14739 * request is a larger block no. than our request. 14740 */ 14741 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14742 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14743 goto insert; 14744 } 14745 ap = ap->av_forw; 14746 } 14747 14748 /* 14749 * Neither a second list nor a larger request, therefore 14750 * we go at the end of the first list (which is the same 14751 * as the end of the whole schebang). 14752 */ 14753 insert: 14754 bp->av_forw = ap->av_forw; 14755 ap->av_forw = bp; 14756 14757 /* 14758 * If we inserted onto the tail end of the waitq, make sure the 14759 * tail pointer is updated. 14760 */ 14761 if (ap == un->un_waitq_tailp) { 14762 un->un_waitq_tailp = bp; 14763 } 14764 } 14765 14766 14767 /* 14768 * Function: sd_start_cmds 14769 * 14770 * Description: Remove and transport cmds from the driver queues. 14771 * 14772 * Arguments: un - pointer to the unit (soft state) struct for the target. 14773 * 14774 * immed_bp - ptr to a buf to be transported immediately. Only 14775 * the immed_bp is transported; bufs on the waitq are not 14776 * processed and the un_retry_bp is not checked. If immed_bp is 14777 * NULL, then normal queue processing is performed. 14778 * 14779 * Context: May be called from kernel thread context, interrupt context, 14780 * or runout callback context. This function may not block or 14781 * call routines that block. 14782 */ 14783 14784 static void 14785 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14786 { 14787 struct sd_xbuf *xp; 14788 struct buf *bp; 14789 void (*statp)(kstat_io_t *); 14790 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14791 void (*saved_statp)(kstat_io_t *); 14792 #endif 14793 int rval; 14794 struct sd_fm_internal *sfip = NULL; 14795 14796 ASSERT(un != NULL); 14797 ASSERT(mutex_owned(SD_MUTEX(un))); 14798 ASSERT(un->un_ncmds_in_transport >= 0); 14799 ASSERT(un->un_throttle >= 0); 14800 14801 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14802 14803 do { 14804 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14805 saved_statp = NULL; 14806 #endif 14807 14808 /* 14809 * If we are syncing or dumping, fail the command to 14810 * avoid recursively calling back into scsi_transport(). 14811 * The dump I/O itself uses a separate code path so this 14812 * only prevents non-dump I/O from being sent while dumping. 14813 * File system sync takes place before dumping begins. 14814 * During panic, filesystem I/O is allowed provided 14815 * un_in_callback is <= 1. This is to prevent recursion 14816 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14817 * sd_start_cmds and so on. See panic.c for more information 14818 * about the states the system can be in during panic. 14819 */ 14820 if ((un->un_state == SD_STATE_DUMPING) || 14821 (ddi_in_panic() && (un->un_in_callback > 1))) { 14822 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14823 "sd_start_cmds: panicking\n"); 14824 goto exit; 14825 } 14826 14827 if ((bp = immed_bp) != NULL) { 14828 /* 14829 * We have a bp that must be transported immediately. 14830 * It's OK to transport the immed_bp here without doing 14831 * the throttle limit check because the immed_bp is 14832 * always used in a retry/recovery case. This means 14833 * that we know we are not at the throttle limit by 14834 * virtue of the fact that to get here we must have 14835 * already gotten a command back via sdintr(). This also 14836 * relies on (1) the command on un_retry_bp preventing 14837 * further commands from the waitq from being issued; 14838 * and (2) the code in sd_retry_command checking the 14839 * throttle limit before issuing a delayed or immediate 14840 * retry. This holds even if the throttle limit is 14841 * currently ratcheted down from its maximum value. 14842 */ 14843 statp = kstat_runq_enter; 14844 if (bp == un->un_retry_bp) { 14845 ASSERT((un->un_retry_statp == NULL) || 14846 (un->un_retry_statp == kstat_waitq_enter) || 14847 (un->un_retry_statp == 14848 kstat_runq_back_to_waitq)); 14849 /* 14850 * If the waitq kstat was incremented when 14851 * sd_set_retry_bp() queued this bp for a retry, 14852 * then we must set up statp so that the waitq 14853 * count will get decremented correctly below. 14854 * Also we must clear un->un_retry_statp to 14855 * ensure that we do not act on a stale value 14856 * in this field. 14857 */ 14858 if ((un->un_retry_statp == kstat_waitq_enter) || 14859 (un->un_retry_statp == 14860 kstat_runq_back_to_waitq)) { 14861 statp = kstat_waitq_to_runq; 14862 } 14863 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14864 saved_statp = un->un_retry_statp; 14865 #endif 14866 un->un_retry_statp = NULL; 14867 14868 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14869 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14870 "un_throttle:%d un_ncmds_in_transport:%d\n", 14871 un, un->un_retry_bp, un->un_throttle, 14872 un->un_ncmds_in_transport); 14873 } else { 14874 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14875 "processing priority bp:0x%p\n", bp); 14876 } 14877 14878 } else if ((bp = un->un_waitq_headp) != NULL) { 14879 /* 14880 * A command on the waitq is ready to go, but do not 14881 * send it if: 14882 * 14883 * (1) the throttle limit has been reached, or 14884 * (2) a retry is pending, or 14885 * (3) a START_STOP_UNIT callback pending, or 14886 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14887 * command is pending. 14888 * 14889 * For all of these conditions, IO processing will 14890 * restart after the condition is cleared. 14891 */ 14892 if (un->un_ncmds_in_transport >= un->un_throttle) { 14893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14894 "sd_start_cmds: exiting, " 14895 "throttle limit reached!\n"); 14896 goto exit; 14897 } 14898 if (un->un_retry_bp != NULL) { 14899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14900 "sd_start_cmds: exiting, retry pending!\n"); 14901 goto exit; 14902 } 14903 if (un->un_startstop_timeid != NULL) { 14904 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14905 "sd_start_cmds: exiting, " 14906 "START_STOP pending!\n"); 14907 goto exit; 14908 } 14909 if (un->un_direct_priority_timeid != NULL) { 14910 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14911 "sd_start_cmds: exiting, " 14912 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14913 goto exit; 14914 } 14915 14916 /* Dequeue the command */ 14917 un->un_waitq_headp = bp->av_forw; 14918 if (un->un_waitq_headp == NULL) { 14919 un->un_waitq_tailp = NULL; 14920 } 14921 bp->av_forw = NULL; 14922 statp = kstat_waitq_to_runq; 14923 SD_TRACE(SD_LOG_IO_CORE, un, 14924 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14925 14926 } else { 14927 /* No work to do so bail out now */ 14928 SD_TRACE(SD_LOG_IO_CORE, un, 14929 "sd_start_cmds: no more work, exiting!\n"); 14930 goto exit; 14931 } 14932 14933 /* 14934 * Reset the state to normal. This is the mechanism by which 14935 * the state transitions from either SD_STATE_RWAIT or 14936 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14937 * If state is SD_STATE_PM_CHANGING then this command is 14938 * part of the device power control and the state must 14939 * not be put back to normal. Doing so would would 14940 * allow new commands to proceed when they shouldn't, 14941 * the device may be going off. 14942 */ 14943 if ((un->un_state != SD_STATE_SUSPENDED) && 14944 (un->un_state != SD_STATE_PM_CHANGING)) { 14945 New_state(un, SD_STATE_NORMAL); 14946 } 14947 14948 xp = SD_GET_XBUF(bp); 14949 ASSERT(xp != NULL); 14950 14951 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14952 /* 14953 * Allocate the scsi_pkt if we need one, or attach DMA 14954 * resources if we have a scsi_pkt that needs them. The 14955 * latter should only occur for commands that are being 14956 * retried. 14957 */ 14958 if ((xp->xb_pktp == NULL) || 14959 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14960 #else 14961 if (xp->xb_pktp == NULL) { 14962 #endif 14963 /* 14964 * There is no scsi_pkt allocated for this buf. Call 14965 * the initpkt function to allocate & init one. 14966 * 14967 * The scsi_init_pkt runout callback functionality is 14968 * implemented as follows: 14969 * 14970 * 1) The initpkt function always calls 14971 * scsi_init_pkt(9F) with sdrunout specified as the 14972 * callback routine. 14973 * 2) A successful packet allocation is initialized and 14974 * the I/O is transported. 14975 * 3) The I/O associated with an allocation resource 14976 * failure is left on its queue to be retried via 14977 * runout or the next I/O. 14978 * 4) The I/O associated with a DMA error is removed 14979 * from the queue and failed with EIO. Processing of 14980 * the transport queues is also halted to be 14981 * restarted via runout or the next I/O. 14982 * 5) The I/O associated with a CDB size or packet 14983 * size error is removed from the queue and failed 14984 * with EIO. Processing of the transport queues is 14985 * continued. 14986 * 14987 * Note: there is no interface for canceling a runout 14988 * callback. To prevent the driver from detaching or 14989 * suspending while a runout is pending the driver 14990 * state is set to SD_STATE_RWAIT 14991 * 14992 * Note: using the scsi_init_pkt callback facility can 14993 * result in an I/O request persisting at the head of 14994 * the list which cannot be satisfied even after 14995 * multiple retries. In the future the driver may 14996 * implement some kind of maximum runout count before 14997 * failing an I/O. 14998 * 14999 * Note: the use of funcp below may seem superfluous, 15000 * but it helps warlock figure out the correct 15001 * initpkt function calls (see [s]sd.wlcmd). 15002 */ 15003 struct scsi_pkt *pktp; 15004 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 15005 15006 ASSERT(bp != un->un_rqs_bp); 15007 15008 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 15009 switch ((*funcp)(bp, &pktp)) { 15010 case SD_PKT_ALLOC_SUCCESS: 15011 xp->xb_pktp = pktp; 15012 SD_TRACE(SD_LOG_IO_CORE, un, 15013 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 15014 pktp); 15015 goto got_pkt; 15016 15017 case SD_PKT_ALLOC_FAILURE: 15018 /* 15019 * Temporary (hopefully) resource depletion. 15020 * Since retries and RQS commands always have a 15021 * scsi_pkt allocated, these cases should never 15022 * get here. So the only cases this needs to 15023 * handle is a bp from the waitq (which we put 15024 * back onto the waitq for sdrunout), or a bp 15025 * sent as an immed_bp (which we just fail). 15026 */ 15027 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15028 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 15029 15030 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15031 15032 if (bp == immed_bp) { 15033 /* 15034 * If SD_XB_DMA_FREED is clear, then 15035 * this is a failure to allocate a 15036 * scsi_pkt, and we must fail the 15037 * command. 15038 */ 15039 if ((xp->xb_pkt_flags & 15040 SD_XB_DMA_FREED) == 0) { 15041 break; 15042 } 15043 15044 /* 15045 * If this immediate command is NOT our 15046 * un_retry_bp, then we must fail it. 15047 */ 15048 if (bp != un->un_retry_bp) { 15049 break; 15050 } 15051 15052 /* 15053 * We get here if this cmd is our 15054 * un_retry_bp that was DMAFREED, but 15055 * scsi_init_pkt() failed to reallocate 15056 * DMA resources when we attempted to 15057 * retry it. This can happen when an 15058 * mpxio failover is in progress, but 15059 * we don't want to just fail the 15060 * command in this case. 15061 * 15062 * Use timeout(9F) to restart it after 15063 * a 100ms delay. We don't want to 15064 * let sdrunout() restart it, because 15065 * sdrunout() is just supposed to start 15066 * commands that are sitting on the 15067 * wait queue. The un_retry_bp stays 15068 * set until the command completes, but 15069 * sdrunout can be called many times 15070 * before that happens. Since sdrunout 15071 * cannot tell if the un_retry_bp is 15072 * already in the transport, it could 15073 * end up calling scsi_transport() for 15074 * the un_retry_bp multiple times. 15075 * 15076 * Also: don't schedule the callback 15077 * if some other callback is already 15078 * pending. 15079 */ 15080 if (un->un_retry_statp == NULL) { 15081 /* 15082 * restore the kstat pointer to 15083 * keep kstat counts coherent 15084 * when we do retry the command. 15085 */ 15086 un->un_retry_statp = 15087 saved_statp; 15088 } 15089 15090 if ((un->un_startstop_timeid == NULL) && 15091 (un->un_retry_timeid == NULL) && 15092 (un->un_direct_priority_timeid == 15093 NULL)) { 15094 15095 un->un_retry_timeid = 15096 timeout( 15097 sd_start_retry_command, 15098 un, SD_RESTART_TIMEOUT); 15099 } 15100 goto exit; 15101 } 15102 15103 #else 15104 if (bp == immed_bp) { 15105 break; /* Just fail the command */ 15106 } 15107 #endif 15108 15109 /* Add the buf back to the head of the waitq */ 15110 bp->av_forw = un->un_waitq_headp; 15111 un->un_waitq_headp = bp; 15112 if (un->un_waitq_tailp == NULL) { 15113 un->un_waitq_tailp = bp; 15114 } 15115 goto exit; 15116 15117 case SD_PKT_ALLOC_FAILURE_NO_DMA: 15118 /* 15119 * HBA DMA resource failure. Fail the command 15120 * and continue processing of the queues. 15121 */ 15122 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15123 "sd_start_cmds: " 15124 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 15125 break; 15126 15127 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 15128 /* 15129 * Note:x86: Partial DMA mapping not supported 15130 * for USCSI commands, and all the needed DMA 15131 * resources were not allocated. 15132 */ 15133 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15134 "sd_start_cmds: " 15135 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 15136 break; 15137 15138 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 15139 /* 15140 * Note:x86: Request cannot fit into CDB based 15141 * on lba and len. 15142 */ 15143 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15144 "sd_start_cmds: " 15145 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 15146 break; 15147 15148 default: 15149 /* Should NEVER get here! */ 15150 panic("scsi_initpkt error"); 15151 /*NOTREACHED*/ 15152 } 15153 15154 /* 15155 * Fatal error in allocating a scsi_pkt for this buf. 15156 * Update kstats & return the buf with an error code. 15157 * We must use sd_return_failed_command_no_restart() to 15158 * avoid a recursive call back into sd_start_cmds(). 15159 * However this also means that we must keep processing 15160 * the waitq here in order to avoid stalling. 15161 */ 15162 if (statp == kstat_waitq_to_runq) { 15163 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 15164 } 15165 sd_return_failed_command_no_restart(un, bp, EIO); 15166 if (bp == immed_bp) { 15167 /* immed_bp is gone by now, so clear this */ 15168 immed_bp = NULL; 15169 } 15170 continue; 15171 } 15172 got_pkt: 15173 if (bp == immed_bp) { 15174 /* goto the head of the class.... */ 15175 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15176 } 15177 15178 un->un_ncmds_in_transport++; 15179 SD_UPDATE_KSTATS(un, statp, bp); 15180 15181 /* 15182 * Call scsi_transport() to send the command to the target. 15183 * According to SCSA architecture, we must drop the mutex here 15184 * before calling scsi_transport() in order to avoid deadlock. 15185 * Note that the scsi_pkt's completion routine can be executed 15186 * (from interrupt context) even before the call to 15187 * scsi_transport() returns. 15188 */ 15189 SD_TRACE(SD_LOG_IO_CORE, un, 15190 "sd_start_cmds: calling scsi_transport()\n"); 15191 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 15192 15193 mutex_exit(SD_MUTEX(un)); 15194 rval = scsi_transport(xp->xb_pktp); 15195 mutex_enter(SD_MUTEX(un)); 15196 15197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15198 "sd_start_cmds: scsi_transport() returned %d\n", rval); 15199 15200 switch (rval) { 15201 case TRAN_ACCEPT: 15202 /* Clear this with every pkt accepted by the HBA */ 15203 un->un_tran_fatal_count = 0; 15204 break; /* Success; try the next cmd (if any) */ 15205 15206 case TRAN_BUSY: 15207 un->un_ncmds_in_transport--; 15208 ASSERT(un->un_ncmds_in_transport >= 0); 15209 15210 /* 15211 * Don't retry request sense, the sense data 15212 * is lost when another request is sent. 15213 * Free up the rqs buf and retry 15214 * the original failed cmd. Update kstat. 15215 */ 15216 if (bp == un->un_rqs_bp) { 15217 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15218 bp = sd_mark_rqs_idle(un, xp); 15219 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15220 NULL, NULL, EIO, un->un_busy_timeout / 500, 15221 kstat_waitq_enter); 15222 goto exit; 15223 } 15224 15225 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15226 /* 15227 * Free the DMA resources for the scsi_pkt. This will 15228 * allow mpxio to select another path the next time 15229 * we call scsi_transport() with this scsi_pkt. 15230 * See sdintr() for the rationalization behind this. 15231 */ 15232 if ((un->un_f_is_fibre == TRUE) && 15233 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15234 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 15235 scsi_dmafree(xp->xb_pktp); 15236 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15237 } 15238 #endif 15239 15240 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 15241 /* 15242 * Commands that are SD_PATH_DIRECT_PRIORITY 15243 * are for error recovery situations. These do 15244 * not use the normal command waitq, so if they 15245 * get a TRAN_BUSY we cannot put them back onto 15246 * the waitq for later retry. One possible 15247 * problem is that there could already be some 15248 * other command on un_retry_bp that is waiting 15249 * for this one to complete, so we would be 15250 * deadlocked if we put this command back onto 15251 * the waitq for later retry (since un_retry_bp 15252 * must complete before the driver gets back to 15253 * commands on the waitq). 15254 * 15255 * To avoid deadlock we must schedule a callback 15256 * that will restart this command after a set 15257 * interval. This should keep retrying for as 15258 * long as the underlying transport keeps 15259 * returning TRAN_BUSY (just like for other 15260 * commands). Use the same timeout interval as 15261 * for the ordinary TRAN_BUSY retry. 15262 */ 15263 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15264 "sd_start_cmds: scsi_transport() returned " 15265 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 15266 15267 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15268 un->un_direct_priority_timeid = 15269 timeout(sd_start_direct_priority_command, 15270 bp, un->un_busy_timeout / 500); 15271 15272 goto exit; 15273 } 15274 15275 /* 15276 * For TRAN_BUSY, we want to reduce the throttle value, 15277 * unless we are retrying a command. 15278 */ 15279 if (bp != un->un_retry_bp) { 15280 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 15281 } 15282 15283 /* 15284 * Set up the bp to be tried again 10 ms later. 15285 * Note:x86: Is there a timeout value in the sd_lun 15286 * for this condition? 15287 */ 15288 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 15289 kstat_runq_back_to_waitq); 15290 goto exit; 15291 15292 case TRAN_FATAL_ERROR: 15293 un->un_tran_fatal_count++; 15294 /* FALLTHRU */ 15295 15296 case TRAN_BADPKT: 15297 default: 15298 un->un_ncmds_in_transport--; 15299 ASSERT(un->un_ncmds_in_transport >= 0); 15300 15301 /* 15302 * If this is our REQUEST SENSE command with a 15303 * transport error, we must get back the pointers 15304 * to the original buf, and mark the REQUEST 15305 * SENSE command as "available". 15306 */ 15307 if (bp == un->un_rqs_bp) { 15308 bp = sd_mark_rqs_idle(un, xp); 15309 xp = SD_GET_XBUF(bp); 15310 } else { 15311 /* 15312 * Legacy behavior: do not update transport 15313 * error count for request sense commands. 15314 */ 15315 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15316 } 15317 15318 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15319 sd_print_transport_rejected_message(un, xp, rval); 15320 15321 /* 15322 * This command will be terminated by SD driver due 15323 * to a fatal transport error. We should post 15324 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15325 * of "fail" for any command to indicate this 15326 * situation. 15327 */ 15328 if (xp->xb_ena > 0) { 15329 ASSERT(un->un_fm_private != NULL); 15330 sfip = un->un_fm_private; 15331 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15332 sd_ssc_extract_info(&sfip->fm_ssc, un, 15333 xp->xb_pktp, bp, xp); 15334 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15335 } 15336 15337 /* 15338 * We must use sd_return_failed_command_no_restart() to 15339 * avoid a recursive call back into sd_start_cmds(). 15340 * However this also means that we must keep processing 15341 * the waitq here in order to avoid stalling. 15342 */ 15343 sd_return_failed_command_no_restart(un, bp, EIO); 15344 15345 /* 15346 * Notify any threads waiting in sd_ddi_suspend() that 15347 * a command completion has occurred. 15348 */ 15349 if (un->un_state == SD_STATE_SUSPENDED) { 15350 cv_broadcast(&un->un_disk_busy_cv); 15351 } 15352 15353 if (bp == immed_bp) { 15354 /* immed_bp is gone by now, so clear this */ 15355 immed_bp = NULL; 15356 } 15357 break; 15358 } 15359 15360 } while (immed_bp == NULL); 15361 15362 exit: 15363 ASSERT(mutex_owned(SD_MUTEX(un))); 15364 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15365 } 15366 15367 15368 /* 15369 * Function: sd_return_command 15370 * 15371 * Description: Returns a command to its originator (with or without an 15372 * error). Also starts commands waiting to be transported 15373 * to the target. 15374 * 15375 * Context: May be called from interrupt, kernel, or timeout context 15376 */ 15377 15378 static void 15379 sd_return_command(struct sd_lun *un, struct buf *bp) 15380 { 15381 struct sd_xbuf *xp; 15382 struct scsi_pkt *pktp; 15383 struct sd_fm_internal *sfip; 15384 15385 ASSERT(bp != NULL); 15386 ASSERT(un != NULL); 15387 ASSERT(mutex_owned(SD_MUTEX(un))); 15388 ASSERT(bp != un->un_rqs_bp); 15389 xp = SD_GET_XBUF(bp); 15390 ASSERT(xp != NULL); 15391 15392 pktp = SD_GET_PKTP(bp); 15393 sfip = (struct sd_fm_internal *)un->un_fm_private; 15394 ASSERT(sfip != NULL); 15395 15396 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15397 15398 /* 15399 * Note: check for the "sdrestart failed" case. 15400 */ 15401 if ((un->un_partial_dma_supported == 1) && 15402 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15403 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15404 (xp->xb_pktp->pkt_resid == 0)) { 15405 15406 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15407 /* 15408 * Successfully set up next portion of cmd 15409 * transfer, try sending it 15410 */ 15411 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15412 NULL, NULL, 0, (clock_t)0, NULL); 15413 sd_start_cmds(un, NULL); 15414 return; /* Note:x86: need a return here? */ 15415 } 15416 } 15417 15418 /* 15419 * If this is the failfast bp, clear it from un_failfast_bp. This 15420 * can happen if upon being re-tried the failfast bp either 15421 * succeeded or encountered another error (possibly even a different 15422 * error than the one that precipitated the failfast state, but in 15423 * that case it would have had to exhaust retries as well). Regardless, 15424 * this should not occur whenever the instance is in the active 15425 * failfast state. 15426 */ 15427 if (bp == un->un_failfast_bp) { 15428 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15429 un->un_failfast_bp = NULL; 15430 } 15431 15432 /* 15433 * Clear the failfast state upon successful completion of ANY cmd. 15434 */ 15435 if (bp->b_error == 0) { 15436 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15437 /* 15438 * If this is a successful command, but used to be retried, 15439 * we will take it as a recovered command and post an 15440 * ereport with driver-assessment of "recovered". 15441 */ 15442 if (xp->xb_ena > 0) { 15443 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15444 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15445 } 15446 } else { 15447 /* 15448 * If this is a failed non-USCSI command we will post an 15449 * ereport with driver-assessment set accordingly("fail" or 15450 * "fatal"). 15451 */ 15452 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15453 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15454 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15455 } 15456 } 15457 15458 /* 15459 * This is used if the command was retried one or more times. Show that 15460 * we are done with it, and allow processing of the waitq to resume. 15461 */ 15462 if (bp == un->un_retry_bp) { 15463 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15464 "sd_return_command: un:0x%p: " 15465 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15466 un->un_retry_bp = NULL; 15467 un->un_retry_statp = NULL; 15468 } 15469 15470 SD_UPDATE_RDWR_STATS(un, bp); 15471 SD_UPDATE_PARTITION_STATS(un, bp); 15472 15473 switch (un->un_state) { 15474 case SD_STATE_SUSPENDED: 15475 /* 15476 * Notify any threads waiting in sd_ddi_suspend() that 15477 * a command completion has occurred. 15478 */ 15479 cv_broadcast(&un->un_disk_busy_cv); 15480 break; 15481 default: 15482 sd_start_cmds(un, NULL); 15483 break; 15484 } 15485 15486 /* Return this command up the iodone chain to its originator. */ 15487 mutex_exit(SD_MUTEX(un)); 15488 15489 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15490 xp->xb_pktp = NULL; 15491 15492 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15493 15494 ASSERT(!mutex_owned(SD_MUTEX(un))); 15495 mutex_enter(SD_MUTEX(un)); 15496 15497 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15498 } 15499 15500 15501 /* 15502 * Function: sd_return_failed_command 15503 * 15504 * Description: Command completion when an error occurred. 15505 * 15506 * Context: May be called from interrupt context 15507 */ 15508 15509 static void 15510 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15511 { 15512 ASSERT(bp != NULL); 15513 ASSERT(un != NULL); 15514 ASSERT(mutex_owned(SD_MUTEX(un))); 15515 15516 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15517 "sd_return_failed_command: entry\n"); 15518 15519 /* 15520 * b_resid could already be nonzero due to a partial data 15521 * transfer, so do not change it here. 15522 */ 15523 SD_BIOERROR(bp, errcode); 15524 15525 sd_return_command(un, bp); 15526 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15527 "sd_return_failed_command: exit\n"); 15528 } 15529 15530 15531 /* 15532 * Function: sd_return_failed_command_no_restart 15533 * 15534 * Description: Same as sd_return_failed_command, but ensures that no 15535 * call back into sd_start_cmds will be issued. 15536 * 15537 * Context: May be called from interrupt context 15538 */ 15539 15540 static void 15541 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15542 int errcode) 15543 { 15544 struct sd_xbuf *xp; 15545 15546 ASSERT(bp != NULL); 15547 ASSERT(un != NULL); 15548 ASSERT(mutex_owned(SD_MUTEX(un))); 15549 xp = SD_GET_XBUF(bp); 15550 ASSERT(xp != NULL); 15551 ASSERT(errcode != 0); 15552 15553 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15554 "sd_return_failed_command_no_restart: entry\n"); 15555 15556 /* 15557 * b_resid could already be nonzero due to a partial data 15558 * transfer, so do not change it here. 15559 */ 15560 SD_BIOERROR(bp, errcode); 15561 15562 /* 15563 * If this is the failfast bp, clear it. This can happen if the 15564 * failfast bp encounterd a fatal error when we attempted to 15565 * re-try it (such as a scsi_transport(9F) failure). However 15566 * we should NOT be in an active failfast state if the failfast 15567 * bp is not NULL. 15568 */ 15569 if (bp == un->un_failfast_bp) { 15570 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15571 un->un_failfast_bp = NULL; 15572 } 15573 15574 if (bp == un->un_retry_bp) { 15575 /* 15576 * This command was retried one or more times. Show that we are 15577 * done with it, and allow processing of the waitq to resume. 15578 */ 15579 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15580 "sd_return_failed_command_no_restart: " 15581 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15582 un->un_retry_bp = NULL; 15583 un->un_retry_statp = NULL; 15584 } 15585 15586 SD_UPDATE_RDWR_STATS(un, bp); 15587 SD_UPDATE_PARTITION_STATS(un, bp); 15588 15589 mutex_exit(SD_MUTEX(un)); 15590 15591 if (xp->xb_pktp != NULL) { 15592 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15593 xp->xb_pktp = NULL; 15594 } 15595 15596 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15597 15598 mutex_enter(SD_MUTEX(un)); 15599 15600 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15601 "sd_return_failed_command_no_restart: exit\n"); 15602 } 15603 15604 15605 /* 15606 * Function: sd_retry_command 15607 * 15608 * Description: queue up a command for retry, or (optionally) fail it 15609 * if retry counts are exhausted. 15610 * 15611 * Arguments: un - Pointer to the sd_lun struct for the target. 15612 * 15613 * bp - Pointer to the buf for the command to be retried. 15614 * 15615 * retry_check_flag - Flag to see which (if any) of the retry 15616 * counts should be decremented/checked. If the indicated 15617 * retry count is exhausted, then the command will not be 15618 * retried; it will be failed instead. This should use a 15619 * value equal to one of the following: 15620 * 15621 * SD_RETRIES_NOCHECK 15622 * SD_RESD_RETRIES_STANDARD 15623 * SD_RETRIES_VICTIM 15624 * 15625 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15626 * if the check should be made to see of FLAG_ISOLATE is set 15627 * in the pkt. If FLAG_ISOLATE is set, then the command is 15628 * not retried, it is simply failed. 15629 * 15630 * user_funcp - Ptr to function to call before dispatching the 15631 * command. May be NULL if no action needs to be performed. 15632 * (Primarily intended for printing messages.) 15633 * 15634 * user_arg - Optional argument to be passed along to 15635 * the user_funcp call. 15636 * 15637 * failure_code - errno return code to set in the bp if the 15638 * command is going to be failed. 15639 * 15640 * retry_delay - Retry delay interval in (clock_t) units. May 15641 * be zero which indicates that the retry should be retried 15642 * immediately (ie, without an intervening delay). 15643 * 15644 * statp - Ptr to kstat function to be updated if the command 15645 * is queued for a delayed retry. May be NULL if no kstat 15646 * update is desired. 15647 * 15648 * Context: May be called from interrupt context. 15649 */ 15650 15651 static void 15652 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15653 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int code), 15654 void *user_arg, int failure_code, clock_t retry_delay, 15655 void (*statp)(kstat_io_t *)) 15656 { 15657 struct sd_xbuf *xp; 15658 struct scsi_pkt *pktp; 15659 struct sd_fm_internal *sfip; 15660 15661 ASSERT(un != NULL); 15662 ASSERT(mutex_owned(SD_MUTEX(un))); 15663 ASSERT(bp != NULL); 15664 xp = SD_GET_XBUF(bp); 15665 ASSERT(xp != NULL); 15666 pktp = SD_GET_PKTP(bp); 15667 ASSERT(pktp != NULL); 15668 15669 sfip = (struct sd_fm_internal *)un->un_fm_private; 15670 ASSERT(sfip != NULL); 15671 15672 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15673 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15674 15675 /* 15676 * If we are syncing or dumping, fail the command to avoid 15677 * recursively calling back into scsi_transport(). 15678 */ 15679 if (ddi_in_panic()) { 15680 goto fail_command_no_log; 15681 } 15682 15683 /* 15684 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15685 * log an error and fail the command. 15686 */ 15687 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15688 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15689 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15690 sd_dump_memory(un, SD_LOG_IO, "CDB", 15691 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15692 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15693 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15694 goto fail_command; 15695 } 15696 15697 /* 15698 * If we are suspended, then put the command onto head of the 15699 * wait queue since we don't want to start more commands, and 15700 * clear the un_retry_bp. Next time when we are resumed, will 15701 * handle the command in the wait queue. 15702 */ 15703 switch (un->un_state) { 15704 case SD_STATE_SUSPENDED: 15705 case SD_STATE_DUMPING: 15706 bp->av_forw = un->un_waitq_headp; 15707 un->un_waitq_headp = bp; 15708 if (un->un_waitq_tailp == NULL) { 15709 un->un_waitq_tailp = bp; 15710 } 15711 if (bp == un->un_retry_bp) { 15712 un->un_retry_bp = NULL; 15713 un->un_retry_statp = NULL; 15714 } 15715 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15716 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15717 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15718 return; 15719 default: 15720 break; 15721 } 15722 15723 /* 15724 * If the caller wants us to check FLAG_ISOLATE, then see if that 15725 * is set; if it is then we do not want to retry the command. 15726 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15727 */ 15728 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15729 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15730 goto fail_command; 15731 } 15732 } 15733 15734 15735 /* 15736 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15737 * command timeout or a selection timeout has occurred. This means 15738 * that we were unable to establish an kind of communication with 15739 * the target, and subsequent retries and/or commands are likely 15740 * to encounter similar results and take a long time to complete. 15741 * 15742 * If this is a failfast error condition, we need to update the 15743 * failfast state, even if this bp does not have B_FAILFAST set. 15744 */ 15745 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15746 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15747 ASSERT(un->un_failfast_bp == NULL); 15748 /* 15749 * If we are already in the active failfast state, and 15750 * another failfast error condition has been detected, 15751 * then fail this command if it has B_FAILFAST set. 15752 * If B_FAILFAST is clear, then maintain the legacy 15753 * behavior of retrying heroically, even tho this will 15754 * take a lot more time to fail the command. 15755 */ 15756 if (bp->b_flags & B_FAILFAST) { 15757 goto fail_command; 15758 } 15759 } else { 15760 /* 15761 * We're not in the active failfast state, but we 15762 * have a failfast error condition, so we must begin 15763 * transition to the next state. We do this regardless 15764 * of whether or not this bp has B_FAILFAST set. 15765 */ 15766 if (un->un_failfast_bp == NULL) { 15767 /* 15768 * This is the first bp to meet a failfast 15769 * condition so save it on un_failfast_bp & 15770 * do normal retry processing. Do not enter 15771 * active failfast state yet. This marks 15772 * entry into the "failfast pending" state. 15773 */ 15774 un->un_failfast_bp = bp; 15775 15776 } else if (un->un_failfast_bp == bp) { 15777 /* 15778 * This is the second time *this* bp has 15779 * encountered a failfast error condition, 15780 * so enter active failfast state & flush 15781 * queues as appropriate. 15782 */ 15783 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15784 un->un_failfast_bp = NULL; 15785 sd_failfast_flushq(un); 15786 15787 /* 15788 * Fail this bp now if B_FAILFAST set; 15789 * otherwise continue with retries. (It would 15790 * be pretty ironic if this bp succeeded on a 15791 * subsequent retry after we just flushed all 15792 * the queues). 15793 */ 15794 if (bp->b_flags & B_FAILFAST) { 15795 goto fail_command; 15796 } 15797 15798 #if !defined(lint) && !defined(__lint) 15799 } else { 15800 /* 15801 * If neither of the preceeding conditionals 15802 * was true, it means that there is some 15803 * *other* bp that has met an inital failfast 15804 * condition and is currently either being 15805 * retried or is waiting to be retried. In 15806 * that case we should perform normal retry 15807 * processing on *this* bp, since there is a 15808 * chance that the current failfast condition 15809 * is transient and recoverable. If that does 15810 * not turn out to be the case, then retries 15811 * will be cleared when the wait queue is 15812 * flushed anyway. 15813 */ 15814 #endif 15815 } 15816 } 15817 } else { 15818 /* 15819 * SD_RETRIES_FAILFAST is clear, which indicates that we 15820 * likely were able to at least establish some level of 15821 * communication with the target and subsequent commands 15822 * and/or retries are likely to get through to the target, 15823 * In this case we want to be aggressive about clearing 15824 * the failfast state. Note that this does not affect 15825 * the "failfast pending" condition. 15826 */ 15827 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15828 } 15829 15830 15831 /* 15832 * Check the specified retry count to see if we can still do 15833 * any retries with this pkt before we should fail it. 15834 */ 15835 switch (retry_check_flag & SD_RETRIES_MASK) { 15836 case SD_RETRIES_VICTIM: 15837 /* 15838 * Check the victim retry count. If exhausted, then fall 15839 * thru & check against the standard retry count. 15840 */ 15841 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15842 /* Increment count & proceed with the retry */ 15843 xp->xb_victim_retry_count++; 15844 break; 15845 } 15846 /* Victim retries exhausted, fall back to std. retries... */ 15847 /* FALLTHRU */ 15848 15849 case SD_RETRIES_STANDARD: 15850 if (xp->xb_retry_count >= un->un_retry_count) { 15851 /* Retries exhausted, fail the command */ 15852 SD_TRACE(SD_LOG_IO_CORE, un, 15853 "sd_retry_command: retries exhausted!\n"); 15854 /* 15855 * update b_resid for failed SCMD_READ & SCMD_WRITE 15856 * commands with nonzero pkt_resid. 15857 */ 15858 if ((pktp->pkt_reason == CMD_CMPLT) && 15859 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15860 (pktp->pkt_resid != 0)) { 15861 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15862 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15863 SD_UPDATE_B_RESID(bp, pktp); 15864 } 15865 } 15866 goto fail_command; 15867 } 15868 xp->xb_retry_count++; 15869 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15870 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15871 break; 15872 15873 case SD_RETRIES_UA: 15874 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15875 /* Retries exhausted, fail the command */ 15876 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15877 "Unit Attention retries exhausted. " 15878 "Check the target.\n"); 15879 goto fail_command; 15880 } 15881 xp->xb_ua_retry_count++; 15882 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15883 "sd_retry_command: retry count:%d\n", 15884 xp->xb_ua_retry_count); 15885 break; 15886 15887 case SD_RETRIES_BUSY: 15888 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15889 /* Retries exhausted, fail the command */ 15890 SD_TRACE(SD_LOG_IO_CORE, un, 15891 "sd_retry_command: retries exhausted!\n"); 15892 goto fail_command; 15893 } 15894 xp->xb_retry_count++; 15895 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15896 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15897 break; 15898 15899 case SD_RETRIES_NOCHECK: 15900 default: 15901 /* No retry count to check. Just proceed with the retry */ 15902 break; 15903 } 15904 15905 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15906 15907 /* 15908 * If this is a non-USCSI command being retried 15909 * during execution last time, we should post an ereport with 15910 * driver-assessment of the value "retry". 15911 * For partial DMA, request sense and STATUS_QFULL, there are no 15912 * hardware errors, we bypass ereport posting. 15913 */ 15914 if (failure_code != 0) { 15915 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15916 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15917 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15918 } 15919 } 15920 15921 /* 15922 * If we were given a zero timeout, we must attempt to retry the 15923 * command immediately (ie, without a delay). 15924 */ 15925 if (retry_delay == 0) { 15926 /* 15927 * Check some limiting conditions to see if we can actually 15928 * do the immediate retry. If we cannot, then we must 15929 * fall back to queueing up a delayed retry. 15930 */ 15931 if (un->un_ncmds_in_transport >= un->un_throttle) { 15932 /* 15933 * We are at the throttle limit for the target, 15934 * fall back to delayed retry. 15935 */ 15936 retry_delay = un->un_busy_timeout; 15937 statp = kstat_waitq_enter; 15938 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15939 "sd_retry_command: immed. retry hit " 15940 "throttle!\n"); 15941 } else { 15942 /* 15943 * We're clear to proceed with the immediate retry. 15944 * First call the user-provided function (if any) 15945 */ 15946 if (user_funcp != NULL) { 15947 (*user_funcp)(un, bp, user_arg, 15948 SD_IMMEDIATE_RETRY_ISSUED); 15949 #ifdef __lock_lint 15950 sd_print_incomplete_msg(un, bp, user_arg, 15951 SD_IMMEDIATE_RETRY_ISSUED); 15952 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15953 SD_IMMEDIATE_RETRY_ISSUED); 15954 sd_print_sense_failed_msg(un, bp, user_arg, 15955 SD_IMMEDIATE_RETRY_ISSUED); 15956 #endif 15957 } 15958 15959 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15960 "sd_retry_command: issuing immediate retry\n"); 15961 15962 /* 15963 * Call sd_start_cmds() to transport the command to 15964 * the target. 15965 */ 15966 sd_start_cmds(un, bp); 15967 15968 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15969 "sd_retry_command exit\n"); 15970 return; 15971 } 15972 } 15973 15974 /* 15975 * Set up to retry the command after a delay. 15976 * First call the user-provided function (if any) 15977 */ 15978 if (user_funcp != NULL) { 15979 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15980 } 15981 15982 sd_set_retry_bp(un, bp, retry_delay, statp); 15983 15984 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15985 return; 15986 15987 fail_command: 15988 15989 if (user_funcp != NULL) { 15990 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15991 } 15992 15993 fail_command_no_log: 15994 15995 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15996 "sd_retry_command: returning failed command\n"); 15997 15998 sd_return_failed_command(un, bp, failure_code); 15999 16000 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 16001 } 16002 16003 16004 /* 16005 * Function: sd_set_retry_bp 16006 * 16007 * Description: Set up the given bp for retry. 16008 * 16009 * Arguments: un - ptr to associated softstate 16010 * bp - ptr to buf(9S) for the command 16011 * retry_delay - time interval before issuing retry (may be 0) 16012 * statp - optional pointer to kstat function 16013 * 16014 * Context: May be called under interrupt context 16015 */ 16016 16017 static void 16018 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 16019 void (*statp)(kstat_io_t *)) 16020 { 16021 ASSERT(un != NULL); 16022 ASSERT(mutex_owned(SD_MUTEX(un))); 16023 ASSERT(bp != NULL); 16024 16025 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16026 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 16027 16028 /* 16029 * Indicate that the command is being retried. This will not allow any 16030 * other commands on the wait queue to be transported to the target 16031 * until this command has been completed (success or failure). The 16032 * "retry command" is not transported to the target until the given 16033 * time delay expires, unless the user specified a 0 retry_delay. 16034 * 16035 * Note: the timeout(9F) callback routine is what actually calls 16036 * sd_start_cmds() to transport the command, with the exception of a 16037 * zero retry_delay. The only current implementor of a zero retry delay 16038 * is the case where a START_STOP_UNIT is sent to spin-up a device. 16039 */ 16040 if (un->un_retry_bp == NULL) { 16041 ASSERT(un->un_retry_statp == NULL); 16042 un->un_retry_bp = bp; 16043 16044 /* 16045 * If the user has not specified a delay the command should 16046 * be queued and no timeout should be scheduled. 16047 */ 16048 if (retry_delay == 0) { 16049 /* 16050 * Save the kstat pointer that will be used in the 16051 * call to SD_UPDATE_KSTATS() below, so that 16052 * sd_start_cmds() can correctly decrement the waitq 16053 * count when it is time to transport this command. 16054 */ 16055 un->un_retry_statp = statp; 16056 goto done; 16057 } 16058 } 16059 16060 if (un->un_retry_bp == bp) { 16061 /* 16062 * Save the kstat pointer that will be used in the call to 16063 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 16064 * correctly decrement the waitq count when it is time to 16065 * transport this command. 16066 */ 16067 un->un_retry_statp = statp; 16068 16069 /* 16070 * Schedule a timeout if: 16071 * 1) The user has specified a delay. 16072 * 2) There is not a START_STOP_UNIT callback pending. 16073 * 16074 * If no delay has been specified, then it is up to the caller 16075 * to ensure that IO processing continues without stalling. 16076 * Effectively, this means that the caller will issue the 16077 * required call to sd_start_cmds(). The START_STOP_UNIT 16078 * callback does this after the START STOP UNIT command has 16079 * completed. In either of these cases we should not schedule 16080 * a timeout callback here. Also don't schedule the timeout if 16081 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 16082 */ 16083 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 16084 (un->un_direct_priority_timeid == NULL)) { 16085 un->un_retry_timeid = 16086 timeout(sd_start_retry_command, un, retry_delay); 16087 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16088 "sd_set_retry_bp: setting timeout: un: 0x%p" 16089 " bp:0x%p un_retry_timeid:0x%p\n", 16090 un, bp, un->un_retry_timeid); 16091 } 16092 } else { 16093 /* 16094 * We only get in here if there is already another command 16095 * waiting to be retried. In this case, we just put the 16096 * given command onto the wait queue, so it can be transported 16097 * after the current retry command has completed. 16098 * 16099 * Also we have to make sure that if the command at the head 16100 * of the wait queue is the un_failfast_bp, that we do not 16101 * put ahead of it any other commands that are to be retried. 16102 */ 16103 if ((un->un_failfast_bp != NULL) && 16104 (un->un_failfast_bp == un->un_waitq_headp)) { 16105 /* 16106 * Enqueue this command AFTER the first command on 16107 * the wait queue (which is also un_failfast_bp). 16108 */ 16109 bp->av_forw = un->un_waitq_headp->av_forw; 16110 un->un_waitq_headp->av_forw = bp; 16111 if (un->un_waitq_headp == un->un_waitq_tailp) { 16112 un->un_waitq_tailp = bp; 16113 } 16114 } else { 16115 /* Enqueue this command at the head of the waitq. */ 16116 bp->av_forw = un->un_waitq_headp; 16117 un->un_waitq_headp = bp; 16118 if (un->un_waitq_tailp == NULL) { 16119 un->un_waitq_tailp = bp; 16120 } 16121 } 16122 16123 if (statp == NULL) { 16124 statp = kstat_waitq_enter; 16125 } 16126 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16127 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 16128 } 16129 16130 done: 16131 if (statp != NULL) { 16132 SD_UPDATE_KSTATS(un, statp, bp); 16133 } 16134 16135 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16136 "sd_set_retry_bp: exit un:0x%p\n", un); 16137 } 16138 16139 16140 /* 16141 * Function: sd_start_retry_command 16142 * 16143 * Description: Start the command that has been waiting on the target's 16144 * retry queue. Called from timeout(9F) context after the 16145 * retry delay interval has expired. 16146 * 16147 * Arguments: arg - pointer to associated softstate for the device. 16148 * 16149 * Context: timeout(9F) thread context. May not sleep. 16150 */ 16151 16152 static void 16153 sd_start_retry_command(void *arg) 16154 { 16155 struct sd_lun *un = arg; 16156 16157 ASSERT(un != NULL); 16158 ASSERT(!mutex_owned(SD_MUTEX(un))); 16159 16160 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16161 "sd_start_retry_command: entry\n"); 16162 16163 mutex_enter(SD_MUTEX(un)); 16164 16165 un->un_retry_timeid = NULL; 16166 16167 if (un->un_retry_bp != NULL) { 16168 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16169 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 16170 un, un->un_retry_bp); 16171 sd_start_cmds(un, un->un_retry_bp); 16172 } 16173 16174 mutex_exit(SD_MUTEX(un)); 16175 16176 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16177 "sd_start_retry_command: exit\n"); 16178 } 16179 16180 /* 16181 * Function: sd_rmw_msg_print_handler 16182 * 16183 * Description: If RMW mode is enabled and warning message is triggered 16184 * print I/O count during a fixed interval. 16185 * 16186 * Arguments: arg - pointer to associated softstate for the device. 16187 * 16188 * Context: timeout(9F) thread context. May not sleep. 16189 */ 16190 static void 16191 sd_rmw_msg_print_handler(void *arg) 16192 { 16193 struct sd_lun *un = arg; 16194 16195 ASSERT(un != NULL); 16196 ASSERT(!mutex_owned(SD_MUTEX(un))); 16197 16198 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16199 "sd_rmw_msg_print_handler: entry\n"); 16200 16201 mutex_enter(SD_MUTEX(un)); 16202 16203 if (un->un_rmw_incre_count > 0) { 16204 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16205 "%"PRIu64" I/O requests are not aligned with %d disk " 16206 "sector size in %ld seconds. They are handled through " 16207 "Read Modify Write but the performance is very low!\n", 16208 un->un_rmw_incre_count, un->un_tgt_blocksize, 16209 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 16210 un->un_rmw_incre_count = 0; 16211 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 16212 un, SD_RMW_MSG_PRINT_TIMEOUT); 16213 } else { 16214 un->un_rmw_msg_timeid = NULL; 16215 } 16216 16217 mutex_exit(SD_MUTEX(un)); 16218 16219 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16220 "sd_rmw_msg_print_handler: exit\n"); 16221 } 16222 16223 /* 16224 * Function: sd_start_direct_priority_command 16225 * 16226 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 16227 * received TRAN_BUSY when we called scsi_transport() to send it 16228 * to the underlying HBA. This function is called from timeout(9F) 16229 * context after the delay interval has expired. 16230 * 16231 * Arguments: arg - pointer to associated buf(9S) to be restarted. 16232 * 16233 * Context: timeout(9F) thread context. May not sleep. 16234 */ 16235 16236 static void 16237 sd_start_direct_priority_command(void *arg) 16238 { 16239 struct buf *priority_bp = arg; 16240 struct sd_lun *un; 16241 16242 ASSERT(priority_bp != NULL); 16243 un = SD_GET_UN(priority_bp); 16244 ASSERT(un != NULL); 16245 ASSERT(!mutex_owned(SD_MUTEX(un))); 16246 16247 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16248 "sd_start_direct_priority_command: entry\n"); 16249 16250 mutex_enter(SD_MUTEX(un)); 16251 un->un_direct_priority_timeid = NULL; 16252 sd_start_cmds(un, priority_bp); 16253 mutex_exit(SD_MUTEX(un)); 16254 16255 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16256 "sd_start_direct_priority_command: exit\n"); 16257 } 16258 16259 16260 /* 16261 * Function: sd_send_request_sense_command 16262 * 16263 * Description: Sends a REQUEST SENSE command to the target 16264 * 16265 * Context: May be called from interrupt context. 16266 */ 16267 16268 static void 16269 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 16270 struct scsi_pkt *pktp) 16271 { 16272 ASSERT(bp != NULL); 16273 ASSERT(un != NULL); 16274 ASSERT(mutex_owned(SD_MUTEX(un))); 16275 16276 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 16277 "entry: buf:0x%p\n", bp); 16278 16279 /* 16280 * If we are syncing or dumping, then fail the command to avoid a 16281 * recursive callback into scsi_transport(). Also fail the command 16282 * if we are suspended (legacy behavior). 16283 */ 16284 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 16285 (un->un_state == SD_STATE_DUMPING)) { 16286 sd_return_failed_command(un, bp, EIO); 16287 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16288 "sd_send_request_sense_command: syncing/dumping, exit\n"); 16289 return; 16290 } 16291 16292 /* 16293 * Retry the failed command and don't issue the request sense if: 16294 * 1) the sense buf is busy 16295 * 2) we have 1 or more outstanding commands on the target 16296 * (the sense data will be cleared or invalidated any way) 16297 * 16298 * Note: There could be an issue with not checking a retry limit here, 16299 * the problem is determining which retry limit to check. 16300 */ 16301 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 16302 /* Don't retry if the command is flagged as non-retryable */ 16303 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16304 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16305 NULL, NULL, 0, un->un_busy_timeout, 16306 kstat_waitq_enter); 16307 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16308 "sd_send_request_sense_command: " 16309 "at full throttle, retrying exit\n"); 16310 } else { 16311 sd_return_failed_command(un, bp, EIO); 16312 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16313 "sd_send_request_sense_command: " 16314 "at full throttle, non-retryable exit\n"); 16315 } 16316 return; 16317 } 16318 16319 sd_mark_rqs_busy(un, bp); 16320 sd_start_cmds(un, un->un_rqs_bp); 16321 16322 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16323 "sd_send_request_sense_command: exit\n"); 16324 } 16325 16326 16327 /* 16328 * Function: sd_mark_rqs_busy 16329 * 16330 * Description: Indicate that the request sense bp for this instance is 16331 * in use. 16332 * 16333 * Context: May be called under interrupt context 16334 */ 16335 16336 static void 16337 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16338 { 16339 struct sd_xbuf *sense_xp; 16340 16341 ASSERT(un != NULL); 16342 ASSERT(bp != NULL); 16343 ASSERT(mutex_owned(SD_MUTEX(un))); 16344 ASSERT(un->un_sense_isbusy == 0); 16345 16346 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16347 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16348 16349 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16350 ASSERT(sense_xp != NULL); 16351 16352 SD_INFO(SD_LOG_IO, un, 16353 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16354 16355 ASSERT(sense_xp->xb_pktp != NULL); 16356 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16357 == (FLAG_SENSING | FLAG_HEAD)); 16358 16359 un->un_sense_isbusy = 1; 16360 un->un_rqs_bp->b_resid = 0; 16361 sense_xp->xb_pktp->pkt_resid = 0; 16362 sense_xp->xb_pktp->pkt_reason = 0; 16363 16364 /* So we can get back the bp at interrupt time! */ 16365 sense_xp->xb_sense_bp = bp; 16366 16367 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16368 16369 /* 16370 * Mark this buf as awaiting sense data. (This is already set in 16371 * the pkt_flags for the RQS packet.) 16372 */ 16373 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16374 16375 /* Request sense down same path */ 16376 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16377 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16378 sense_xp->xb_pktp->pkt_path_instance = 16379 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16380 16381 sense_xp->xb_retry_count = 0; 16382 sense_xp->xb_victim_retry_count = 0; 16383 sense_xp->xb_ua_retry_count = 0; 16384 sense_xp->xb_nr_retry_count = 0; 16385 sense_xp->xb_dma_resid = 0; 16386 16387 /* Clean up the fields for auto-request sense */ 16388 sense_xp->xb_sense_status = 0; 16389 sense_xp->xb_sense_state = 0; 16390 sense_xp->xb_sense_resid = 0; 16391 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16392 16393 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16394 } 16395 16396 16397 /* 16398 * Function: sd_mark_rqs_idle 16399 * 16400 * Description: SD_MUTEX must be held continuously through this routine 16401 * to prevent reuse of the rqs struct before the caller can 16402 * complete it's processing. 16403 * 16404 * Return Code: Pointer to the RQS buf 16405 * 16406 * Context: May be called under interrupt context 16407 */ 16408 16409 static struct buf * 16410 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16411 { 16412 struct buf *bp; 16413 ASSERT(un != NULL); 16414 ASSERT(sense_xp != NULL); 16415 ASSERT(mutex_owned(SD_MUTEX(un))); 16416 ASSERT(un->un_sense_isbusy != 0); 16417 16418 un->un_sense_isbusy = 0; 16419 bp = sense_xp->xb_sense_bp; 16420 sense_xp->xb_sense_bp = NULL; 16421 16422 /* This pkt is no longer interested in getting sense data */ 16423 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16424 16425 return (bp); 16426 } 16427 16428 16429 16430 /* 16431 * Function: sd_alloc_rqs 16432 * 16433 * Description: Set up the unit to receive auto request sense data 16434 * 16435 * Return Code: DDI_SUCCESS or DDI_FAILURE 16436 * 16437 * Context: Called under attach(9E) context 16438 */ 16439 16440 static int 16441 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16442 { 16443 struct sd_xbuf *xp; 16444 16445 ASSERT(un != NULL); 16446 ASSERT(!mutex_owned(SD_MUTEX(un))); 16447 ASSERT(un->un_rqs_bp == NULL); 16448 ASSERT(un->un_rqs_pktp == NULL); 16449 16450 /* 16451 * First allocate the required buf and scsi_pkt structs, then set up 16452 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16453 */ 16454 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16455 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16456 if (un->un_rqs_bp == NULL) { 16457 return (DDI_FAILURE); 16458 } 16459 16460 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16461 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16462 16463 if (un->un_rqs_pktp == NULL) { 16464 sd_free_rqs(un); 16465 return (DDI_FAILURE); 16466 } 16467 16468 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16469 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16470 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16471 16472 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16473 16474 /* Set up the other needed members in the ARQ scsi_pkt. */ 16475 un->un_rqs_pktp->pkt_comp = sdintr; 16476 un->un_rqs_pktp->pkt_time = sd_io_time; 16477 un->un_rqs_pktp->pkt_flags |= 16478 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16479 16480 /* 16481 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16482 * provide any intpkt, destroypkt routines as we take care of 16483 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16484 */ 16485 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16486 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16487 xp->xb_pktp = un->un_rqs_pktp; 16488 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16489 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16490 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16491 16492 /* 16493 * Save the pointer to the request sense private bp so it can 16494 * be retrieved in sdintr. 16495 */ 16496 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16497 ASSERT(un->un_rqs_bp->b_private == xp); 16498 16499 /* 16500 * See if the HBA supports auto-request sense for the specified 16501 * target/lun. If it does, then try to enable it (if not already 16502 * enabled). 16503 * 16504 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16505 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16506 * return success. However, in both of these cases ARQ is always 16507 * enabled and scsi_ifgetcap will always return true. The best approach 16508 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16509 * 16510 * The 3rd case is the HBA (adp) always return enabled on 16511 * scsi_ifgetgetcap even when it's not enable, the best approach 16512 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16513 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16514 */ 16515 16516 if (un->un_f_is_fibre == TRUE) { 16517 un->un_f_arq_enabled = TRUE; 16518 } else { 16519 #if defined(__i386) || defined(__amd64) 16520 /* 16521 * Circumvent the Adaptec bug, remove this code when 16522 * the bug is fixed 16523 */ 16524 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16525 #endif 16526 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16527 case 0: 16528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16529 "sd_alloc_rqs: HBA supports ARQ\n"); 16530 /* 16531 * ARQ is supported by this HBA but currently is not 16532 * enabled. Attempt to enable it and if successful then 16533 * mark this instance as ARQ enabled. 16534 */ 16535 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16536 == 1) { 16537 /* Successfully enabled ARQ in the HBA */ 16538 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16539 "sd_alloc_rqs: ARQ enabled\n"); 16540 un->un_f_arq_enabled = TRUE; 16541 } else { 16542 /* Could not enable ARQ in the HBA */ 16543 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16544 "sd_alloc_rqs: failed ARQ enable\n"); 16545 un->un_f_arq_enabled = FALSE; 16546 } 16547 break; 16548 case 1: 16549 /* 16550 * ARQ is supported by this HBA and is already enabled. 16551 * Just mark ARQ as enabled for this instance. 16552 */ 16553 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16554 "sd_alloc_rqs: ARQ already enabled\n"); 16555 un->un_f_arq_enabled = TRUE; 16556 break; 16557 default: 16558 /* 16559 * ARQ is not supported by this HBA; disable it for this 16560 * instance. 16561 */ 16562 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16563 "sd_alloc_rqs: HBA does not support ARQ\n"); 16564 un->un_f_arq_enabled = FALSE; 16565 break; 16566 } 16567 } 16568 16569 return (DDI_SUCCESS); 16570 } 16571 16572 16573 /* 16574 * Function: sd_free_rqs 16575 * 16576 * Description: Cleanup for the pre-instance RQS command. 16577 * 16578 * Context: Kernel thread context 16579 */ 16580 16581 static void 16582 sd_free_rqs(struct sd_lun *un) 16583 { 16584 ASSERT(un != NULL); 16585 16586 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16587 16588 /* 16589 * If consistent memory is bound to a scsi_pkt, the pkt 16590 * has to be destroyed *before* freeing the consistent memory. 16591 * Don't change the sequence of this operations. 16592 * scsi_destroy_pkt() might access memory, which isn't allowed, 16593 * after it was freed in scsi_free_consistent_buf(). 16594 */ 16595 if (un->un_rqs_pktp != NULL) { 16596 scsi_destroy_pkt(un->un_rqs_pktp); 16597 un->un_rqs_pktp = NULL; 16598 } 16599 16600 if (un->un_rqs_bp != NULL) { 16601 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16602 if (xp != NULL) { 16603 kmem_free(xp, sizeof (struct sd_xbuf)); 16604 } 16605 scsi_free_consistent_buf(un->un_rqs_bp); 16606 un->un_rqs_bp = NULL; 16607 } 16608 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16609 } 16610 16611 16612 16613 /* 16614 * Function: sd_reduce_throttle 16615 * 16616 * Description: Reduces the maximum # of outstanding commands on a 16617 * target to the current number of outstanding commands. 16618 * Queues a tiemout(9F) callback to restore the limit 16619 * after a specified interval has elapsed. 16620 * Typically used when we get a TRAN_BUSY return code 16621 * back from scsi_transport(). 16622 * 16623 * Arguments: un - ptr to the sd_lun softstate struct 16624 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16625 * 16626 * Context: May be called from interrupt context 16627 */ 16628 16629 static void 16630 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16631 { 16632 ASSERT(un != NULL); 16633 ASSERT(mutex_owned(SD_MUTEX(un))); 16634 ASSERT(un->un_ncmds_in_transport >= 0); 16635 16636 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16637 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16638 un, un->un_throttle, un->un_ncmds_in_transport); 16639 16640 if (un->un_throttle > 1) { 16641 if (un->un_f_use_adaptive_throttle == TRUE) { 16642 switch (throttle_type) { 16643 case SD_THROTTLE_TRAN_BUSY: 16644 if (un->un_busy_throttle == 0) { 16645 un->un_busy_throttle = un->un_throttle; 16646 } 16647 break; 16648 case SD_THROTTLE_QFULL: 16649 un->un_busy_throttle = 0; 16650 break; 16651 default: 16652 ASSERT(FALSE); 16653 } 16654 16655 if (un->un_ncmds_in_transport > 0) { 16656 un->un_throttle = un->un_ncmds_in_transport; 16657 } 16658 16659 } else { 16660 if (un->un_ncmds_in_transport == 0) { 16661 un->un_throttle = 1; 16662 } else { 16663 un->un_throttle = un->un_ncmds_in_transport; 16664 } 16665 } 16666 } 16667 16668 /* Reschedule the timeout if none is currently active */ 16669 if (un->un_reset_throttle_timeid == NULL) { 16670 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16671 un, SD_THROTTLE_RESET_INTERVAL); 16672 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16673 "sd_reduce_throttle: timeout scheduled!\n"); 16674 } 16675 16676 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16677 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16678 } 16679 16680 16681 16682 /* 16683 * Function: sd_restore_throttle 16684 * 16685 * Description: Callback function for timeout(9F). Resets the current 16686 * value of un->un_throttle to its default. 16687 * 16688 * Arguments: arg - pointer to associated softstate for the device. 16689 * 16690 * Context: May be called from interrupt context 16691 */ 16692 16693 static void 16694 sd_restore_throttle(void *arg) 16695 { 16696 struct sd_lun *un = arg; 16697 16698 ASSERT(un != NULL); 16699 ASSERT(!mutex_owned(SD_MUTEX(un))); 16700 16701 mutex_enter(SD_MUTEX(un)); 16702 16703 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16704 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16705 16706 un->un_reset_throttle_timeid = NULL; 16707 16708 if (un->un_f_use_adaptive_throttle == TRUE) { 16709 /* 16710 * If un_busy_throttle is nonzero, then it contains the 16711 * value that un_throttle was when we got a TRAN_BUSY back 16712 * from scsi_transport(). We want to revert back to this 16713 * value. 16714 * 16715 * In the QFULL case, the throttle limit will incrementally 16716 * increase until it reaches max throttle. 16717 */ 16718 if (un->un_busy_throttle > 0) { 16719 un->un_throttle = un->un_busy_throttle; 16720 un->un_busy_throttle = 0; 16721 } else { 16722 /* 16723 * increase throttle by 10% open gate slowly, schedule 16724 * another restore if saved throttle has not been 16725 * reached 16726 */ 16727 short throttle; 16728 if (sd_qfull_throttle_enable) { 16729 throttle = un->un_throttle + 16730 max((un->un_throttle / 10), 1); 16731 un->un_throttle = 16732 (throttle < un->un_saved_throttle) ? 16733 throttle : un->un_saved_throttle; 16734 if (un->un_throttle < un->un_saved_throttle) { 16735 un->un_reset_throttle_timeid = 16736 timeout(sd_restore_throttle, 16737 un, 16738 SD_QFULL_THROTTLE_RESET_INTERVAL); 16739 } 16740 } 16741 } 16742 16743 /* 16744 * If un_throttle has fallen below the low-water mark, we 16745 * restore the maximum value here (and allow it to ratchet 16746 * down again if necessary). 16747 */ 16748 if (un->un_throttle < un->un_min_throttle) { 16749 un->un_throttle = un->un_saved_throttle; 16750 } 16751 } else { 16752 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16753 "restoring limit from 0x%x to 0x%x\n", 16754 un->un_throttle, un->un_saved_throttle); 16755 un->un_throttle = un->un_saved_throttle; 16756 } 16757 16758 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16759 "sd_restore_throttle: calling sd_start_cmds!\n"); 16760 16761 sd_start_cmds(un, NULL); 16762 16763 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16764 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16765 un, un->un_throttle); 16766 16767 mutex_exit(SD_MUTEX(un)); 16768 16769 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16770 } 16771 16772 /* 16773 * Function: sdrunout 16774 * 16775 * Description: Callback routine for scsi_init_pkt when a resource allocation 16776 * fails. 16777 * 16778 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16779 * soft state instance. 16780 * 16781 * Return Code: The scsi_init_pkt routine allows for the callback function to 16782 * return a 0 indicating the callback should be rescheduled or a 1 16783 * indicating not to reschedule. This routine always returns 1 16784 * because the driver always provides a callback function to 16785 * scsi_init_pkt. This results in a callback always being scheduled 16786 * (via the scsi_init_pkt callback implementation) if a resource 16787 * failure occurs. 16788 * 16789 * Context: This callback function may not block or call routines that block 16790 * 16791 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16792 * request persisting at the head of the list which cannot be 16793 * satisfied even after multiple retries. In the future the driver 16794 * may implement some time of maximum runout count before failing 16795 * an I/O. 16796 */ 16797 16798 static int 16799 sdrunout(caddr_t arg) 16800 { 16801 struct sd_lun *un = (struct sd_lun *)arg; 16802 16803 ASSERT(un != NULL); 16804 ASSERT(!mutex_owned(SD_MUTEX(un))); 16805 16806 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16807 16808 mutex_enter(SD_MUTEX(un)); 16809 sd_start_cmds(un, NULL); 16810 mutex_exit(SD_MUTEX(un)); 16811 /* 16812 * This callback routine always returns 1 (i.e. do not reschedule) 16813 * because we always specify sdrunout as the callback handler for 16814 * scsi_init_pkt inside the call to sd_start_cmds. 16815 */ 16816 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16817 return (1); 16818 } 16819 16820 16821 /* 16822 * Function: sdintr 16823 * 16824 * Description: Completion callback routine for scsi_pkt(9S) structs 16825 * sent to the HBA driver via scsi_transport(9F). 16826 * 16827 * Context: Interrupt context 16828 */ 16829 16830 static void 16831 sdintr(struct scsi_pkt *pktp) 16832 { 16833 struct buf *bp; 16834 struct sd_xbuf *xp; 16835 struct sd_lun *un; 16836 size_t actual_len; 16837 sd_ssc_t *sscp; 16838 16839 ASSERT(pktp != NULL); 16840 bp = (struct buf *)pktp->pkt_private; 16841 ASSERT(bp != NULL); 16842 xp = SD_GET_XBUF(bp); 16843 ASSERT(xp != NULL); 16844 ASSERT(xp->xb_pktp != NULL); 16845 un = SD_GET_UN(bp); 16846 ASSERT(un != NULL); 16847 ASSERT(!mutex_owned(SD_MUTEX(un))); 16848 16849 #ifdef SD_FAULT_INJECTION 16850 16851 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16852 /* SD FaultInjection */ 16853 sd_faultinjection(pktp); 16854 16855 #endif /* SD_FAULT_INJECTION */ 16856 16857 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16858 " xp:0x%p, un:0x%p\n", bp, xp, un); 16859 16860 mutex_enter(SD_MUTEX(un)); 16861 16862 ASSERT(un->un_fm_private != NULL); 16863 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16864 ASSERT(sscp != NULL); 16865 16866 /* Reduce the count of the #commands currently in transport */ 16867 un->un_ncmds_in_transport--; 16868 ASSERT(un->un_ncmds_in_transport >= 0); 16869 16870 /* Increment counter to indicate that the callback routine is active */ 16871 un->un_in_callback++; 16872 16873 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16874 16875 #ifdef SDDEBUG 16876 if (bp == un->un_retry_bp) { 16877 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16878 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16879 un, un->un_retry_bp, un->un_ncmds_in_transport); 16880 } 16881 #endif 16882 16883 /* 16884 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16885 * state if needed. 16886 */ 16887 if (pktp->pkt_reason == CMD_DEV_GONE) { 16888 /* Prevent multiple console messages for the same failure. */ 16889 if (un->un_last_pkt_reason != CMD_DEV_GONE) { 16890 un->un_last_pkt_reason = CMD_DEV_GONE; 16891 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16892 "Command failed to complete...Device is gone\n"); 16893 } 16894 if (un->un_mediastate != DKIO_DEV_GONE) { 16895 un->un_mediastate = DKIO_DEV_GONE; 16896 cv_broadcast(&un->un_state_cv); 16897 } 16898 /* 16899 * If the command happens to be the REQUEST SENSE command, 16900 * free up the rqs buf and fail the original command. 16901 */ 16902 if (bp == un->un_rqs_bp) { 16903 bp = sd_mark_rqs_idle(un, xp); 16904 } 16905 sd_return_failed_command(un, bp, EIO); 16906 goto exit; 16907 } 16908 16909 if (pktp->pkt_state & STATE_XARQ_DONE) { 16910 SD_TRACE(SD_LOG_COMMON, un, 16911 "sdintr: extra sense data received. pkt=%p\n", pktp); 16912 } 16913 16914 /* 16915 * First see if the pkt has auto-request sense data with it.... 16916 * Look at the packet state first so we don't take a performance 16917 * hit looking at the arq enabled flag unless absolutely necessary. 16918 */ 16919 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16920 (un->un_f_arq_enabled == TRUE)) { 16921 /* 16922 * The HBA did an auto request sense for this command so check 16923 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16924 * driver command that should not be retried. 16925 */ 16926 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16927 /* 16928 * Save the relevant sense info into the xp for the 16929 * original cmd. 16930 */ 16931 struct scsi_arq_status *asp; 16932 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16933 xp->xb_sense_status = 16934 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16935 xp->xb_sense_state = asp->sts_rqpkt_state; 16936 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16937 if (pktp->pkt_state & STATE_XARQ_DONE) { 16938 actual_len = MAX_SENSE_LENGTH - 16939 xp->xb_sense_resid; 16940 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16941 MAX_SENSE_LENGTH); 16942 } else { 16943 if (xp->xb_sense_resid > SENSE_LENGTH) { 16944 actual_len = MAX_SENSE_LENGTH - 16945 xp->xb_sense_resid; 16946 } else { 16947 actual_len = SENSE_LENGTH - 16948 xp->xb_sense_resid; 16949 } 16950 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16951 if ((((struct uscsi_cmd *) 16952 (xp->xb_pktinfo))->uscsi_rqlen) > 16953 actual_len) { 16954 xp->xb_sense_resid = 16955 (((struct uscsi_cmd *) 16956 (xp->xb_pktinfo))-> 16957 uscsi_rqlen) - actual_len; 16958 } else { 16959 xp->xb_sense_resid = 0; 16960 } 16961 } 16962 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16963 SENSE_LENGTH); 16964 } 16965 16966 /* fail the command */ 16967 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16968 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16969 sd_return_failed_command(un, bp, EIO); 16970 goto exit; 16971 } 16972 16973 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16974 /* 16975 * We want to either retry or fail this command, so free 16976 * the DMA resources here. If we retry the command then 16977 * the DMA resources will be reallocated in sd_start_cmds(). 16978 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16979 * causes the *entire* transfer to start over again from the 16980 * beginning of the request, even for PARTIAL chunks that 16981 * have already transferred successfully. 16982 */ 16983 if ((un->un_f_is_fibre == TRUE) && 16984 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16985 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16986 scsi_dmafree(pktp); 16987 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16988 } 16989 #endif 16990 16991 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16992 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16993 16994 sd_handle_auto_request_sense(un, bp, xp, pktp); 16995 goto exit; 16996 } 16997 16998 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16999 if (pktp->pkt_flags & FLAG_SENSING) { 17000 /* This pktp is from the unit's REQUEST_SENSE command */ 17001 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17002 "sdintr: sd_handle_request_sense\n"); 17003 sd_handle_request_sense(un, bp, xp, pktp); 17004 goto exit; 17005 } 17006 17007 /* 17008 * Check to see if the command successfully completed as requested; 17009 * this is the most common case (and also the hot performance path). 17010 * 17011 * Requirements for successful completion are: 17012 * pkt_reason is CMD_CMPLT and packet status is status good. 17013 * In addition: 17014 * - A residual of zero indicates successful completion no matter what 17015 * the command is. 17016 * - If the residual is not zero and the command is not a read or 17017 * write, then it's still defined as successful completion. In other 17018 * words, if the command is a read or write the residual must be 17019 * zero for successful completion. 17020 * - If the residual is not zero and the command is a read or 17021 * write, and it's a USCSICMD, then it's still defined as 17022 * successful completion. 17023 */ 17024 if ((pktp->pkt_reason == CMD_CMPLT) && 17025 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 17026 17027 /* 17028 * Since this command is returned with a good status, we 17029 * can reset the count for Sonoma failover. 17030 */ 17031 un->un_sonoma_failure_count = 0; 17032 17033 /* 17034 * Return all USCSI commands on good status 17035 */ 17036 if (pktp->pkt_resid == 0) { 17037 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17038 "sdintr: returning command for resid == 0\n"); 17039 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 17040 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 17041 SD_UPDATE_B_RESID(bp, pktp); 17042 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17043 "sdintr: returning command for resid != 0\n"); 17044 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17045 SD_UPDATE_B_RESID(bp, pktp); 17046 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17047 "sdintr: returning uscsi command\n"); 17048 } else { 17049 goto not_successful; 17050 } 17051 sd_return_command(un, bp); 17052 17053 /* 17054 * Decrement counter to indicate that the callback routine 17055 * is done. 17056 */ 17057 un->un_in_callback--; 17058 ASSERT(un->un_in_callback >= 0); 17059 mutex_exit(SD_MUTEX(un)); 17060 17061 return; 17062 } 17063 17064 not_successful: 17065 17066 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 17067 /* 17068 * The following is based upon knowledge of the underlying transport 17069 * and its use of DMA resources. This code should be removed when 17070 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 17071 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 17072 * and sd_start_cmds(). 17073 * 17074 * Free any DMA resources associated with this command if there 17075 * is a chance it could be retried or enqueued for later retry. 17076 * If we keep the DMA binding then mpxio cannot reissue the 17077 * command on another path whenever a path failure occurs. 17078 * 17079 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 17080 * causes the *entire* transfer to start over again from the 17081 * beginning of the request, even for PARTIAL chunks that 17082 * have already transferred successfully. 17083 * 17084 * This is only done for non-uscsi commands (and also skipped for the 17085 * driver's internal RQS command). Also just do this for Fibre Channel 17086 * devices as these are the only ones that support mpxio. 17087 */ 17088 if ((un->un_f_is_fibre == TRUE) && 17089 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 17090 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 17091 scsi_dmafree(pktp); 17092 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 17093 } 17094 #endif 17095 17096 /* 17097 * The command did not successfully complete as requested so check 17098 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 17099 * driver command that should not be retried so just return. If 17100 * FLAG_DIAGNOSE is not set the error will be processed below. 17101 */ 17102 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 17103 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17104 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 17105 /* 17106 * Issue a request sense if a check condition caused the error 17107 * (we handle the auto request sense case above), otherwise 17108 * just fail the command. 17109 */ 17110 if ((pktp->pkt_reason == CMD_CMPLT) && 17111 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 17112 sd_send_request_sense_command(un, bp, pktp); 17113 } else { 17114 sd_return_failed_command(un, bp, EIO); 17115 } 17116 goto exit; 17117 } 17118 17119 /* 17120 * The command did not successfully complete as requested so process 17121 * the error, retry, and/or attempt recovery. 17122 */ 17123 switch (pktp->pkt_reason) { 17124 case CMD_CMPLT: 17125 switch (SD_GET_PKT_STATUS(pktp)) { 17126 case STATUS_GOOD: 17127 /* 17128 * The command completed successfully with a non-zero 17129 * residual 17130 */ 17131 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17132 "sdintr: STATUS_GOOD \n"); 17133 sd_pkt_status_good(un, bp, xp, pktp); 17134 break; 17135 17136 case STATUS_CHECK: 17137 case STATUS_TERMINATED: 17138 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17139 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 17140 sd_pkt_status_check_condition(un, bp, xp, pktp); 17141 break; 17142 17143 case STATUS_BUSY: 17144 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17145 "sdintr: STATUS_BUSY\n"); 17146 sd_pkt_status_busy(un, bp, xp, pktp); 17147 break; 17148 17149 case STATUS_RESERVATION_CONFLICT: 17150 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17151 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 17152 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17153 break; 17154 17155 case STATUS_QFULL: 17156 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17157 "sdintr: STATUS_QFULL\n"); 17158 sd_pkt_status_qfull(un, bp, xp, pktp); 17159 break; 17160 17161 case STATUS_MET: 17162 case STATUS_INTERMEDIATE: 17163 case STATUS_SCSI2: 17164 case STATUS_INTERMEDIATE_MET: 17165 case STATUS_ACA_ACTIVE: 17166 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17167 "Unexpected SCSI status received: 0x%x\n", 17168 SD_GET_PKT_STATUS(pktp)); 17169 /* 17170 * Mark the ssc_flags when detected invalid status 17171 * code for non-USCSI command. 17172 */ 17173 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17174 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17175 0, "stat-code"); 17176 } 17177 sd_return_failed_command(un, bp, EIO); 17178 break; 17179 17180 default: 17181 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17182 "Invalid SCSI status received: 0x%x\n", 17183 SD_GET_PKT_STATUS(pktp)); 17184 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17185 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17186 0, "stat-code"); 17187 } 17188 sd_return_failed_command(un, bp, EIO); 17189 break; 17190 17191 } 17192 break; 17193 17194 case CMD_INCOMPLETE: 17195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17196 "sdintr: CMD_INCOMPLETE\n"); 17197 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 17198 break; 17199 case CMD_TRAN_ERR: 17200 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17201 "sdintr: CMD_TRAN_ERR\n"); 17202 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 17203 break; 17204 case CMD_RESET: 17205 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17206 "sdintr: CMD_RESET \n"); 17207 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 17208 break; 17209 case CMD_ABORTED: 17210 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17211 "sdintr: CMD_ABORTED \n"); 17212 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 17213 break; 17214 case CMD_TIMEOUT: 17215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17216 "sdintr: CMD_TIMEOUT\n"); 17217 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 17218 break; 17219 case CMD_UNX_BUS_FREE: 17220 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17221 "sdintr: CMD_UNX_BUS_FREE \n"); 17222 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 17223 break; 17224 case CMD_TAG_REJECT: 17225 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17226 "sdintr: CMD_TAG_REJECT\n"); 17227 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 17228 break; 17229 default: 17230 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17231 "sdintr: default\n"); 17232 /* 17233 * Mark the ssc_flags for detecting invliad pkt_reason. 17234 */ 17235 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17236 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 17237 0, "pkt-reason"); 17238 } 17239 sd_pkt_reason_default(un, bp, xp, pktp); 17240 break; 17241 } 17242 17243 exit: 17244 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 17245 17246 /* Decrement counter to indicate that the callback routine is done. */ 17247 un->un_in_callback--; 17248 ASSERT(un->un_in_callback >= 0); 17249 17250 /* 17251 * At this point, the pkt has been dispatched, ie, it is either 17252 * being re-tried or has been returned to its caller and should 17253 * not be referenced. 17254 */ 17255 17256 mutex_exit(SD_MUTEX(un)); 17257 } 17258 17259 17260 /* 17261 * Function: sd_print_incomplete_msg 17262 * 17263 * Description: Prints the error message for a CMD_INCOMPLETE error. 17264 * 17265 * Arguments: un - ptr to associated softstate for the device. 17266 * bp - ptr to the buf(9S) for the command. 17267 * arg - message string ptr 17268 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 17269 * or SD_NO_RETRY_ISSUED. 17270 * 17271 * Context: May be called under interrupt context 17272 */ 17273 17274 static void 17275 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17276 { 17277 struct scsi_pkt *pktp; 17278 char *msgp; 17279 char *cmdp = arg; 17280 17281 ASSERT(un != NULL); 17282 ASSERT(mutex_owned(SD_MUTEX(un))); 17283 ASSERT(bp != NULL); 17284 ASSERT(arg != NULL); 17285 pktp = SD_GET_PKTP(bp); 17286 ASSERT(pktp != NULL); 17287 17288 switch (code) { 17289 case SD_DELAYED_RETRY_ISSUED: 17290 case SD_IMMEDIATE_RETRY_ISSUED: 17291 msgp = "retrying"; 17292 break; 17293 case SD_NO_RETRY_ISSUED: 17294 default: 17295 msgp = "giving up"; 17296 break; 17297 } 17298 17299 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17300 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17301 "incomplete %s- %s\n", cmdp, msgp); 17302 } 17303 } 17304 17305 17306 17307 /* 17308 * Function: sd_pkt_status_good 17309 * 17310 * Description: Processing for a STATUS_GOOD code in pkt_status. 17311 * 17312 * Context: May be called under interrupt context 17313 */ 17314 17315 static void 17316 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17317 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17318 { 17319 char *cmdp; 17320 17321 ASSERT(un != NULL); 17322 ASSERT(mutex_owned(SD_MUTEX(un))); 17323 ASSERT(bp != NULL); 17324 ASSERT(xp != NULL); 17325 ASSERT(pktp != NULL); 17326 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17327 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17328 ASSERT(pktp->pkt_resid != 0); 17329 17330 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17331 17332 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17333 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17334 case SCMD_READ: 17335 cmdp = "read"; 17336 break; 17337 case SCMD_WRITE: 17338 cmdp = "write"; 17339 break; 17340 default: 17341 SD_UPDATE_B_RESID(bp, pktp); 17342 sd_return_command(un, bp); 17343 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17344 return; 17345 } 17346 17347 /* 17348 * See if we can retry the read/write, preferrably immediately. 17349 * If retries are exhaused, then sd_retry_command() will update 17350 * the b_resid count. 17351 */ 17352 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17353 cmdp, EIO, (clock_t)0, NULL); 17354 17355 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17356 } 17357 17358 17359 17360 17361 17362 /* 17363 * Function: sd_handle_request_sense 17364 * 17365 * Description: Processing for non-auto Request Sense command. 17366 * 17367 * Arguments: un - ptr to associated softstate 17368 * sense_bp - ptr to buf(9S) for the RQS command 17369 * sense_xp - ptr to the sd_xbuf for the RQS command 17370 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17371 * 17372 * Context: May be called under interrupt context 17373 */ 17374 17375 static void 17376 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17377 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17378 { 17379 struct buf *cmd_bp; /* buf for the original command */ 17380 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17381 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17382 size_t actual_len; /* actual sense data length */ 17383 17384 ASSERT(un != NULL); 17385 ASSERT(mutex_owned(SD_MUTEX(un))); 17386 ASSERT(sense_bp != NULL); 17387 ASSERT(sense_xp != NULL); 17388 ASSERT(sense_pktp != NULL); 17389 17390 /* 17391 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17392 * RQS command and not the original command. 17393 */ 17394 ASSERT(sense_pktp == un->un_rqs_pktp); 17395 ASSERT(sense_bp == un->un_rqs_bp); 17396 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17397 (FLAG_SENSING | FLAG_HEAD)); 17398 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17399 FLAG_SENSING) == FLAG_SENSING); 17400 17401 /* These are the bp, xp, and pktp for the original command */ 17402 cmd_bp = sense_xp->xb_sense_bp; 17403 cmd_xp = SD_GET_XBUF(cmd_bp); 17404 cmd_pktp = SD_GET_PKTP(cmd_bp); 17405 17406 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17407 /* 17408 * The REQUEST SENSE command failed. Release the REQUEST 17409 * SENSE command for re-use, get back the bp for the original 17410 * command, and attempt to re-try the original command if 17411 * FLAG_DIAGNOSE is not set in the original packet. 17412 */ 17413 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17414 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17415 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17416 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17417 NULL, NULL, EIO, (clock_t)0, NULL); 17418 return; 17419 } 17420 } 17421 17422 /* 17423 * Save the relevant sense info into the xp for the original cmd. 17424 * 17425 * Note: if the request sense failed the state info will be zero 17426 * as set in sd_mark_rqs_busy() 17427 */ 17428 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17429 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17430 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17431 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17432 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17433 SENSE_LENGTH)) { 17434 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17435 MAX_SENSE_LENGTH); 17436 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17437 } else { 17438 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17439 SENSE_LENGTH); 17440 if (actual_len < SENSE_LENGTH) { 17441 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17442 } else { 17443 cmd_xp->xb_sense_resid = 0; 17444 } 17445 } 17446 17447 /* 17448 * Free up the RQS command.... 17449 * NOTE: 17450 * Must do this BEFORE calling sd_validate_sense_data! 17451 * sd_validate_sense_data may return the original command in 17452 * which case the pkt will be freed and the flags can no 17453 * longer be touched. 17454 * SD_MUTEX is held through this process until the command 17455 * is dispatched based upon the sense data, so there are 17456 * no race conditions. 17457 */ 17458 (void) sd_mark_rqs_idle(un, sense_xp); 17459 17460 /* 17461 * For a retryable command see if we have valid sense data, if so then 17462 * turn it over to sd_decode_sense() to figure out the right course of 17463 * action. Just fail a non-retryable command. 17464 */ 17465 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17466 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17467 SD_SENSE_DATA_IS_VALID) { 17468 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17469 } 17470 } else { 17471 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17472 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17473 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17474 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17475 sd_return_failed_command(un, cmd_bp, EIO); 17476 } 17477 } 17478 17479 17480 17481 17482 /* 17483 * Function: sd_handle_auto_request_sense 17484 * 17485 * Description: Processing for auto-request sense information. 17486 * 17487 * Arguments: un - ptr to associated softstate 17488 * bp - ptr to buf(9S) for the command 17489 * xp - ptr to the sd_xbuf for the command 17490 * pktp - ptr to the scsi_pkt(9S) for the command 17491 * 17492 * Context: May be called under interrupt context 17493 */ 17494 17495 static void 17496 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17497 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17498 { 17499 struct scsi_arq_status *asp; 17500 size_t actual_len; 17501 17502 ASSERT(un != NULL); 17503 ASSERT(mutex_owned(SD_MUTEX(un))); 17504 ASSERT(bp != NULL); 17505 ASSERT(xp != NULL); 17506 ASSERT(pktp != NULL); 17507 ASSERT(pktp != un->un_rqs_pktp); 17508 ASSERT(bp != un->un_rqs_bp); 17509 17510 /* 17511 * For auto-request sense, we get a scsi_arq_status back from 17512 * the HBA, with the sense data in the sts_sensedata member. 17513 * The pkt_scbp of the packet points to this scsi_arq_status. 17514 */ 17515 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17516 17517 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17518 /* 17519 * The auto REQUEST SENSE failed; see if we can re-try 17520 * the original command. 17521 */ 17522 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17523 "auto request sense failed (reason=%s)\n", 17524 scsi_rname(asp->sts_rqpkt_reason)); 17525 17526 sd_reset_target(un, pktp); 17527 17528 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17529 NULL, NULL, EIO, (clock_t)0, NULL); 17530 return; 17531 } 17532 17533 /* Save the relevant sense info into the xp for the original cmd. */ 17534 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17535 xp->xb_sense_state = asp->sts_rqpkt_state; 17536 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17537 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17538 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17539 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17540 MAX_SENSE_LENGTH); 17541 } else { 17542 if (xp->xb_sense_resid > SENSE_LENGTH) { 17543 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17544 } else { 17545 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17546 } 17547 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17548 if ((((struct uscsi_cmd *) 17549 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17550 xp->xb_sense_resid = (((struct uscsi_cmd *) 17551 (xp->xb_pktinfo))->uscsi_rqlen) - 17552 actual_len; 17553 } else { 17554 xp->xb_sense_resid = 0; 17555 } 17556 } 17557 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17558 } 17559 17560 /* 17561 * See if we have valid sense data, if so then turn it over to 17562 * sd_decode_sense() to figure out the right course of action. 17563 */ 17564 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17565 SD_SENSE_DATA_IS_VALID) { 17566 sd_decode_sense(un, bp, xp, pktp); 17567 } 17568 } 17569 17570 17571 /* 17572 * Function: sd_print_sense_failed_msg 17573 * 17574 * Description: Print log message when RQS has failed. 17575 * 17576 * Arguments: un - ptr to associated softstate 17577 * bp - ptr to buf(9S) for the command 17578 * arg - generic message string ptr 17579 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17580 * or SD_NO_RETRY_ISSUED 17581 * 17582 * Context: May be called from interrupt context 17583 */ 17584 17585 static void 17586 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17587 int code) 17588 { 17589 char *msgp = arg; 17590 17591 ASSERT(un != NULL); 17592 ASSERT(mutex_owned(SD_MUTEX(un))); 17593 ASSERT(bp != NULL); 17594 17595 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17596 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17597 } 17598 } 17599 17600 17601 /* 17602 * Function: sd_validate_sense_data 17603 * 17604 * Description: Check the given sense data for validity. 17605 * If the sense data is not valid, the command will 17606 * be either failed or retried! 17607 * 17608 * Return Code: SD_SENSE_DATA_IS_INVALID 17609 * SD_SENSE_DATA_IS_VALID 17610 * 17611 * Context: May be called from interrupt context 17612 */ 17613 17614 static int 17615 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17616 size_t actual_len) 17617 { 17618 struct scsi_extended_sense *esp; 17619 struct scsi_pkt *pktp; 17620 char *msgp = NULL; 17621 sd_ssc_t *sscp; 17622 17623 ASSERT(un != NULL); 17624 ASSERT(mutex_owned(SD_MUTEX(un))); 17625 ASSERT(bp != NULL); 17626 ASSERT(bp != un->un_rqs_bp); 17627 ASSERT(xp != NULL); 17628 ASSERT(un->un_fm_private != NULL); 17629 17630 pktp = SD_GET_PKTP(bp); 17631 ASSERT(pktp != NULL); 17632 17633 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17634 ASSERT(sscp != NULL); 17635 17636 /* 17637 * Check the status of the RQS command (auto or manual). 17638 */ 17639 switch (xp->xb_sense_status & STATUS_MASK) { 17640 case STATUS_GOOD: 17641 break; 17642 17643 case STATUS_RESERVATION_CONFLICT: 17644 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17645 return (SD_SENSE_DATA_IS_INVALID); 17646 17647 case STATUS_BUSY: 17648 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17649 "Busy Status on REQUEST SENSE\n"); 17650 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17651 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17652 return (SD_SENSE_DATA_IS_INVALID); 17653 17654 case STATUS_QFULL: 17655 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17656 "QFULL Status on REQUEST SENSE\n"); 17657 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17658 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17659 return (SD_SENSE_DATA_IS_INVALID); 17660 17661 case STATUS_CHECK: 17662 case STATUS_TERMINATED: 17663 msgp = "Check Condition on REQUEST SENSE\n"; 17664 goto sense_failed; 17665 17666 default: 17667 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17668 goto sense_failed; 17669 } 17670 17671 /* 17672 * See if we got the minimum required amount of sense data. 17673 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17674 * or less. 17675 */ 17676 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17677 (actual_len == 0)) { 17678 msgp = "Request Sense couldn't get sense data\n"; 17679 goto sense_failed; 17680 } 17681 17682 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17683 msgp = "Not enough sense information\n"; 17684 /* Mark the ssc_flags for detecting invalid sense data */ 17685 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17686 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17687 "sense-data"); 17688 } 17689 goto sense_failed; 17690 } 17691 17692 /* 17693 * We require the extended sense data 17694 */ 17695 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17696 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17697 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17698 static char tmp[8]; 17699 static char buf[148]; 17700 char *p = (char *)(xp->xb_sense_data); 17701 int i; 17702 17703 mutex_enter(&sd_sense_mutex); 17704 (void) strcpy(buf, "undecodable sense information:"); 17705 for (i = 0; i < actual_len; i++) { 17706 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17707 (void) strcpy(&buf[strlen(buf)], tmp); 17708 } 17709 i = strlen(buf); 17710 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17711 17712 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17713 scsi_log(SD_DEVINFO(un), sd_label, 17714 CE_WARN, buf); 17715 } 17716 mutex_exit(&sd_sense_mutex); 17717 } 17718 17719 /* Mark the ssc_flags for detecting invalid sense data */ 17720 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17721 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17722 "sense-data"); 17723 } 17724 17725 /* Note: Legacy behavior, fail the command with no retry */ 17726 sd_return_failed_command(un, bp, EIO); 17727 return (SD_SENSE_DATA_IS_INVALID); 17728 } 17729 17730 /* 17731 * Check that es_code is valid (es_class concatenated with es_code 17732 * make up the "response code" field. es_class will always be 7, so 17733 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17734 * format. 17735 */ 17736 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17737 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17738 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17739 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17740 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17741 /* Mark the ssc_flags for detecting invalid sense data */ 17742 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17743 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17744 "sense-data"); 17745 } 17746 goto sense_failed; 17747 } 17748 17749 return (SD_SENSE_DATA_IS_VALID); 17750 17751 sense_failed: 17752 /* 17753 * If the request sense failed (for whatever reason), attempt 17754 * to retry the original command. 17755 */ 17756 #if defined(__i386) || defined(__amd64) 17757 /* 17758 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17759 * sddef.h for Sparc platform, and x86 uses 1 binary 17760 * for both SCSI/FC. 17761 * The SD_RETRY_DELAY value need to be adjusted here 17762 * when SD_RETRY_DELAY change in sddef.h 17763 */ 17764 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17765 sd_print_sense_failed_msg, msgp, EIO, 17766 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17767 #else 17768 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17769 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17770 #endif 17771 17772 return (SD_SENSE_DATA_IS_INVALID); 17773 } 17774 17775 /* 17776 * Function: sd_decode_sense 17777 * 17778 * Description: Take recovery action(s) when SCSI Sense Data is received. 17779 * 17780 * Context: Interrupt context. 17781 */ 17782 17783 static void 17784 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17785 struct scsi_pkt *pktp) 17786 { 17787 uint8_t sense_key; 17788 17789 ASSERT(un != NULL); 17790 ASSERT(mutex_owned(SD_MUTEX(un))); 17791 ASSERT(bp != NULL); 17792 ASSERT(bp != un->un_rqs_bp); 17793 ASSERT(xp != NULL); 17794 ASSERT(pktp != NULL); 17795 17796 sense_key = scsi_sense_key(xp->xb_sense_data); 17797 17798 switch (sense_key) { 17799 case KEY_NO_SENSE: 17800 sd_sense_key_no_sense(un, bp, xp, pktp); 17801 break; 17802 case KEY_RECOVERABLE_ERROR: 17803 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17804 bp, xp, pktp); 17805 break; 17806 case KEY_NOT_READY: 17807 sd_sense_key_not_ready(un, xp->xb_sense_data, 17808 bp, xp, pktp); 17809 break; 17810 case KEY_MEDIUM_ERROR: 17811 case KEY_HARDWARE_ERROR: 17812 sd_sense_key_medium_or_hardware_error(un, 17813 xp->xb_sense_data, bp, xp, pktp); 17814 break; 17815 case KEY_ILLEGAL_REQUEST: 17816 sd_sense_key_illegal_request(un, bp, xp, pktp); 17817 break; 17818 case KEY_UNIT_ATTENTION: 17819 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17820 bp, xp, pktp); 17821 break; 17822 case KEY_WRITE_PROTECT: 17823 case KEY_VOLUME_OVERFLOW: 17824 case KEY_MISCOMPARE: 17825 sd_sense_key_fail_command(un, bp, xp, pktp); 17826 break; 17827 case KEY_BLANK_CHECK: 17828 sd_sense_key_blank_check(un, bp, xp, pktp); 17829 break; 17830 case KEY_ABORTED_COMMAND: 17831 sd_sense_key_aborted_command(un, bp, xp, pktp); 17832 break; 17833 case KEY_VENDOR_UNIQUE: 17834 case KEY_COPY_ABORTED: 17835 case KEY_EQUAL: 17836 case KEY_RESERVED: 17837 default: 17838 sd_sense_key_default(un, xp->xb_sense_data, 17839 bp, xp, pktp); 17840 break; 17841 } 17842 } 17843 17844 17845 /* 17846 * Function: sd_dump_memory 17847 * 17848 * Description: Debug logging routine to print the contents of a user provided 17849 * buffer. The output of the buffer is broken up into 256 byte 17850 * segments due to a size constraint of the scsi_log. 17851 * implementation. 17852 * 17853 * Arguments: un - ptr to softstate 17854 * comp - component mask 17855 * title - "title" string to preceed data when printed 17856 * data - ptr to data block to be printed 17857 * len - size of data block to be printed 17858 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17859 * 17860 * Context: May be called from interrupt context 17861 */ 17862 17863 #define SD_DUMP_MEMORY_BUF_SIZE 256 17864 17865 static char *sd_dump_format_string[] = { 17866 " 0x%02x", 17867 " %c" 17868 }; 17869 17870 static void 17871 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17872 int len, int fmt) 17873 { 17874 int i, j; 17875 int avail_count; 17876 int start_offset; 17877 int end_offset; 17878 size_t entry_len; 17879 char *bufp; 17880 char *local_buf; 17881 char *format_string; 17882 17883 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17884 17885 /* 17886 * In the debug version of the driver, this function is called from a 17887 * number of places which are NOPs in the release driver. 17888 * The debug driver therefore has additional methods of filtering 17889 * debug output. 17890 */ 17891 #ifdef SDDEBUG 17892 /* 17893 * In the debug version of the driver we can reduce the amount of debug 17894 * messages by setting sd_error_level to something other than 17895 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17896 * sd_component_mask. 17897 */ 17898 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17899 (sd_error_level != SCSI_ERR_ALL)) { 17900 return; 17901 } 17902 if (((sd_component_mask & comp) == 0) || 17903 (sd_error_level != SCSI_ERR_ALL)) { 17904 return; 17905 } 17906 #else 17907 if (sd_error_level != SCSI_ERR_ALL) { 17908 return; 17909 } 17910 #endif 17911 17912 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17913 bufp = local_buf; 17914 /* 17915 * Available length is the length of local_buf[], minus the 17916 * length of the title string, minus one for the ":", minus 17917 * one for the newline, minus one for the NULL terminator. 17918 * This gives the #bytes available for holding the printed 17919 * values from the given data buffer. 17920 */ 17921 if (fmt == SD_LOG_HEX) { 17922 format_string = sd_dump_format_string[0]; 17923 } else /* SD_LOG_CHAR */ { 17924 format_string = sd_dump_format_string[1]; 17925 } 17926 /* 17927 * Available count is the number of elements from the given 17928 * data buffer that we can fit into the available length. 17929 * This is based upon the size of the format string used. 17930 * Make one entry and find it's size. 17931 */ 17932 (void) sprintf(bufp, format_string, data[0]); 17933 entry_len = strlen(bufp); 17934 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17935 17936 j = 0; 17937 while (j < len) { 17938 bufp = local_buf; 17939 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17940 start_offset = j; 17941 17942 end_offset = start_offset + avail_count; 17943 17944 (void) sprintf(bufp, "%s:", title); 17945 bufp += strlen(bufp); 17946 for (i = start_offset; ((i < end_offset) && (j < len)); 17947 i++, j++) { 17948 (void) sprintf(bufp, format_string, data[i]); 17949 bufp += entry_len; 17950 } 17951 (void) sprintf(bufp, "\n"); 17952 17953 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17954 } 17955 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17956 } 17957 17958 /* 17959 * Function: sd_print_sense_msg 17960 * 17961 * Description: Log a message based upon the given sense data. 17962 * 17963 * Arguments: un - ptr to associated softstate 17964 * bp - ptr to buf(9S) for the command 17965 * arg - ptr to associate sd_sense_info struct 17966 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17967 * or SD_NO_RETRY_ISSUED 17968 * 17969 * Context: May be called from interrupt context 17970 */ 17971 17972 static void 17973 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17974 { 17975 struct sd_xbuf *xp; 17976 struct scsi_pkt *pktp; 17977 uint8_t *sensep; 17978 daddr_t request_blkno; 17979 diskaddr_t err_blkno; 17980 int severity; 17981 int pfa_flag; 17982 extern struct scsi_key_strings scsi_cmds[]; 17983 17984 ASSERT(un != NULL); 17985 ASSERT(mutex_owned(SD_MUTEX(un))); 17986 ASSERT(bp != NULL); 17987 xp = SD_GET_XBUF(bp); 17988 ASSERT(xp != NULL); 17989 pktp = SD_GET_PKTP(bp); 17990 ASSERT(pktp != NULL); 17991 ASSERT(arg != NULL); 17992 17993 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17994 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17995 17996 if ((code == SD_DELAYED_RETRY_ISSUED) || 17997 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17998 severity = SCSI_ERR_RETRYABLE; 17999 } 18000 18001 /* Use absolute block number for the request block number */ 18002 request_blkno = xp->xb_blkno; 18003 18004 /* 18005 * Now try to get the error block number from the sense data 18006 */ 18007 sensep = xp->xb_sense_data; 18008 18009 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 18010 (uint64_t *)&err_blkno)) { 18011 /* 18012 * We retrieved the error block number from the information 18013 * portion of the sense data. 18014 * 18015 * For USCSI commands we are better off using the error 18016 * block no. as the requested block no. (This is the best 18017 * we can estimate.) 18018 */ 18019 if ((SD_IS_BUFIO(xp) == FALSE) && 18020 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 18021 request_blkno = err_blkno; 18022 } 18023 } else { 18024 /* 18025 * Without the es_valid bit set (for fixed format) or an 18026 * information descriptor (for descriptor format) we cannot 18027 * be certain of the error blkno, so just use the 18028 * request_blkno. 18029 */ 18030 err_blkno = (diskaddr_t)request_blkno; 18031 } 18032 18033 /* 18034 * The following will log the buffer contents for the release driver 18035 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 18036 * level is set to verbose. 18037 */ 18038 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 18039 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 18040 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 18041 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 18042 18043 if (pfa_flag == FALSE) { 18044 /* This is normally only set for USCSI */ 18045 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 18046 return; 18047 } 18048 18049 if ((SD_IS_BUFIO(xp) == TRUE) && 18050 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 18051 (severity < sd_error_level))) { 18052 return; 18053 } 18054 } 18055 /* 18056 * Check for Sonoma Failover and keep a count of how many failed I/O's 18057 */ 18058 if ((SD_IS_LSI(un)) && 18059 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 18060 (scsi_sense_asc(sensep) == 0x94) && 18061 (scsi_sense_ascq(sensep) == 0x01)) { 18062 un->un_sonoma_failure_count++; 18063 if (un->un_sonoma_failure_count > 1) { 18064 return; 18065 } 18066 } 18067 18068 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 18069 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 18070 (pktp->pkt_resid == 0))) { 18071 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 18072 request_blkno, err_blkno, scsi_cmds, 18073 (struct scsi_extended_sense *)sensep, 18074 un->un_additional_codes, NULL); 18075 } 18076 } 18077 18078 /* 18079 * Function: sd_sense_key_no_sense 18080 * 18081 * Description: Recovery action when sense data was not received. 18082 * 18083 * Context: May be called from interrupt context 18084 */ 18085 18086 static void 18087 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18088 struct scsi_pkt *pktp) 18089 { 18090 struct sd_sense_info si; 18091 18092 ASSERT(un != NULL); 18093 ASSERT(mutex_owned(SD_MUTEX(un))); 18094 ASSERT(bp != NULL); 18095 ASSERT(xp != NULL); 18096 ASSERT(pktp != NULL); 18097 18098 si.ssi_severity = SCSI_ERR_FATAL; 18099 si.ssi_pfa_flag = FALSE; 18100 18101 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18102 18103 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18104 &si, EIO, (clock_t)0, NULL); 18105 } 18106 18107 18108 /* 18109 * Function: sd_sense_key_recoverable_error 18110 * 18111 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 18112 * 18113 * Context: May be called from interrupt context 18114 */ 18115 18116 static void 18117 sd_sense_key_recoverable_error(struct sd_lun *un, uint8_t *sense_datap, 18118 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18119 { 18120 struct sd_sense_info si; 18121 uint8_t asc = scsi_sense_asc(sense_datap); 18122 uint8_t ascq = scsi_sense_ascq(sense_datap); 18123 18124 ASSERT(un != NULL); 18125 ASSERT(mutex_owned(SD_MUTEX(un))); 18126 ASSERT(bp != NULL); 18127 ASSERT(xp != NULL); 18128 ASSERT(pktp != NULL); 18129 18130 /* 18131 * 0x00, 0x1D: ATA PASSTHROUGH INFORMATION AVAILABLE 18132 */ 18133 if (asc == 0x00 && ascq == 0x1D) { 18134 sd_return_command(un, bp); 18135 return; 18136 } 18137 18138 /* 18139 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 18140 */ 18141 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 18142 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18143 si.ssi_severity = SCSI_ERR_INFO; 18144 si.ssi_pfa_flag = TRUE; 18145 } else { 18146 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18147 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 18148 si.ssi_severity = SCSI_ERR_RECOVERED; 18149 si.ssi_pfa_flag = FALSE; 18150 } 18151 18152 if (pktp->pkt_resid == 0) { 18153 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18154 sd_return_command(un, bp); 18155 return; 18156 } 18157 18158 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18159 &si, EIO, (clock_t)0, NULL); 18160 } 18161 18162 18163 18164 18165 /* 18166 * Function: sd_sense_key_not_ready 18167 * 18168 * Description: Recovery actions for a SCSI "Not Ready" sense key. 18169 * 18170 * Context: May be called from interrupt context 18171 */ 18172 18173 static void 18174 sd_sense_key_not_ready(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp, 18175 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18176 { 18177 struct sd_sense_info si; 18178 uint8_t asc = scsi_sense_asc(sense_datap); 18179 uint8_t ascq = scsi_sense_ascq(sense_datap); 18180 18181 ASSERT(un != NULL); 18182 ASSERT(mutex_owned(SD_MUTEX(un))); 18183 ASSERT(bp != NULL); 18184 ASSERT(xp != NULL); 18185 ASSERT(pktp != NULL); 18186 18187 si.ssi_severity = SCSI_ERR_FATAL; 18188 si.ssi_pfa_flag = FALSE; 18189 18190 /* 18191 * Update error stats after first NOT READY error. Disks may have 18192 * been powered down and may need to be restarted. For CDROMs, 18193 * report NOT READY errors only if media is present. 18194 */ 18195 if ((ISCD(un) && (asc == 0x3A)) || 18196 (xp->xb_nr_retry_count > 0)) { 18197 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18198 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 18199 } 18200 18201 /* 18202 * Just fail if the "not ready" retry limit has been reached. 18203 */ 18204 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 18205 /* Special check for error message printing for removables. */ 18206 if (un->un_f_has_removable_media && (asc == 0x04) && 18207 (ascq >= 0x04)) { 18208 si.ssi_severity = SCSI_ERR_ALL; 18209 } 18210 goto fail_command; 18211 } 18212 18213 /* 18214 * Check the ASC and ASCQ in the sense data as needed, to determine 18215 * what to do. 18216 */ 18217 switch (asc) { 18218 case 0x04: /* LOGICAL UNIT NOT READY */ 18219 /* 18220 * disk drives that don't spin up result in a very long delay 18221 * in format without warning messages. We will log a message 18222 * if the error level is set to verbose. 18223 */ 18224 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18225 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18226 "logical unit not ready, resetting disk\n"); 18227 } 18228 18229 /* 18230 * There are different requirements for CDROMs and disks for 18231 * the number of retries. If a CD-ROM is giving this, it is 18232 * probably reading TOC and is in the process of getting 18233 * ready, so we should keep on trying for a long time to make 18234 * sure that all types of media are taken in account (for 18235 * some media the drive takes a long time to read TOC). For 18236 * disks we do not want to retry this too many times as this 18237 * can cause a long hang in format when the drive refuses to 18238 * spin up (a very common failure). 18239 */ 18240 switch (ascq) { 18241 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 18242 /* 18243 * Disk drives frequently refuse to spin up which 18244 * results in a very long hang in format without 18245 * warning messages. 18246 * 18247 * Note: This code preserves the legacy behavior of 18248 * comparing xb_nr_retry_count against zero for fibre 18249 * channel targets instead of comparing against the 18250 * un_reset_retry_count value. The reason for this 18251 * discrepancy has been so utterly lost beneath the 18252 * Sands of Time that even Indiana Jones could not 18253 * find it. 18254 */ 18255 if (un->un_f_is_fibre == TRUE) { 18256 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18257 (xp->xb_nr_retry_count > 0)) && 18258 (un->un_startstop_timeid == NULL)) { 18259 scsi_log(SD_DEVINFO(un), sd_label, 18260 CE_WARN, "logical unit not ready, " 18261 "resetting disk\n"); 18262 sd_reset_target(un, pktp); 18263 } 18264 } else { 18265 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18266 (xp->xb_nr_retry_count > 18267 un->un_reset_retry_count)) && 18268 (un->un_startstop_timeid == NULL)) { 18269 scsi_log(SD_DEVINFO(un), sd_label, 18270 CE_WARN, "logical unit not ready, " 18271 "resetting disk\n"); 18272 sd_reset_target(un, pktp); 18273 } 18274 } 18275 break; 18276 18277 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 18278 /* 18279 * If the target is in the process of becoming 18280 * ready, just proceed with the retry. This can 18281 * happen with CD-ROMs that take a long time to 18282 * read TOC after a power cycle or reset. 18283 */ 18284 goto do_retry; 18285 18286 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 18287 break; 18288 18289 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 18290 /* 18291 * Retries cannot help here so just fail right away. 18292 */ 18293 goto fail_command; 18294 18295 case 0x88: 18296 /* 18297 * Vendor-unique code for T3/T4: it indicates a 18298 * path problem in a mutipathed config, but as far as 18299 * the target driver is concerned it equates to a fatal 18300 * error, so we should just fail the command right away 18301 * (without printing anything to the console). If this 18302 * is not a T3/T4, fall thru to the default recovery 18303 * action. 18304 * T3/T4 is FC only, don't need to check is_fibre 18305 */ 18306 if (SD_IS_T3(un) || SD_IS_T4(un)) { 18307 sd_return_failed_command(un, bp, EIO); 18308 return; 18309 } 18310 /* FALLTHRU */ 18311 18312 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 18313 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 18314 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 18315 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 18316 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 18317 default: /* Possible future codes in SCSI spec? */ 18318 /* 18319 * For removable-media devices, do not retry if 18320 * ASCQ > 2 as these result mostly from USCSI commands 18321 * on MMC devices issued to check status of an 18322 * operation initiated in immediate mode. Also for 18323 * ASCQ >= 4 do not print console messages as these 18324 * mainly represent a user-initiated operation 18325 * instead of a system failure. 18326 */ 18327 if (un->un_f_has_removable_media) { 18328 si.ssi_severity = SCSI_ERR_ALL; 18329 goto fail_command; 18330 } 18331 break; 18332 } 18333 18334 /* 18335 * As part of our recovery attempt for the NOT READY 18336 * condition, we issue a START STOP UNIT command. However 18337 * we want to wait for a short delay before attempting this 18338 * as there may still be more commands coming back from the 18339 * target with the check condition. To do this we use 18340 * timeout(9F) to call sd_start_stop_unit_callback() after 18341 * the delay interval expires. (sd_start_stop_unit_callback() 18342 * dispatches sd_start_stop_unit_task(), which will issue 18343 * the actual START STOP UNIT command. The delay interval 18344 * is one-half of the delay that we will use to retry the 18345 * command that generated the NOT READY condition. 18346 * 18347 * Note that we could just dispatch sd_start_stop_unit_task() 18348 * from here and allow it to sleep for the delay interval, 18349 * but then we would be tying up the taskq thread 18350 * uncesessarily for the duration of the delay. 18351 * 18352 * Do not issue the START STOP UNIT if the current command 18353 * is already a START STOP UNIT. 18354 */ 18355 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18356 break; 18357 } 18358 18359 /* 18360 * Do not schedule the timeout if one is already pending. 18361 */ 18362 if (un->un_startstop_timeid != NULL) { 18363 SD_INFO(SD_LOG_ERROR, un, 18364 "sd_sense_key_not_ready: restart already issued to" 18365 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18366 ddi_get_instance(SD_DEVINFO(un))); 18367 break; 18368 } 18369 18370 /* 18371 * Schedule the START STOP UNIT command, then queue the command 18372 * for a retry. 18373 * 18374 * Note: A timeout is not scheduled for this retry because we 18375 * want the retry to be serial with the START_STOP_UNIT. The 18376 * retry will be started when the START_STOP_UNIT is completed 18377 * in sd_start_stop_unit_task. 18378 */ 18379 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18380 un, un->un_busy_timeout / 2); 18381 xp->xb_nr_retry_count++; 18382 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18383 return; 18384 18385 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18386 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18387 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18388 "unit does not respond to selection\n"); 18389 } 18390 break; 18391 18392 case 0x3A: /* MEDIUM NOT PRESENT */ 18393 if (sd_error_level >= SCSI_ERR_FATAL) { 18394 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18395 "Caddy not inserted in drive\n"); 18396 } 18397 18398 sr_ejected(un); 18399 un->un_mediastate = DKIO_EJECTED; 18400 /* The state has changed, inform the media watch routines */ 18401 cv_broadcast(&un->un_state_cv); 18402 /* Just fail if no media is present in the drive. */ 18403 goto fail_command; 18404 18405 default: 18406 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18407 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18408 "Unit not Ready. Additional sense code 0x%x\n", 18409 asc); 18410 } 18411 break; 18412 } 18413 18414 do_retry: 18415 18416 /* 18417 * Retry the command, as some targets may report NOT READY for 18418 * several seconds after being reset. 18419 */ 18420 xp->xb_nr_retry_count++; 18421 si.ssi_severity = SCSI_ERR_RETRYABLE; 18422 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18423 &si, EIO, un->un_busy_timeout, NULL); 18424 18425 return; 18426 18427 fail_command: 18428 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18429 sd_return_failed_command(un, bp, EIO); 18430 } 18431 18432 18433 18434 /* 18435 * Function: sd_sense_key_medium_or_hardware_error 18436 * 18437 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18438 * sense key. 18439 * 18440 * Context: May be called from interrupt context 18441 */ 18442 18443 static void 18444 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, uint8_t *sense_datap, 18445 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18446 { 18447 struct sd_sense_info si; 18448 uint8_t sense_key = scsi_sense_key(sense_datap); 18449 uint8_t asc = scsi_sense_asc(sense_datap); 18450 18451 ASSERT(un != NULL); 18452 ASSERT(mutex_owned(SD_MUTEX(un))); 18453 ASSERT(bp != NULL); 18454 ASSERT(xp != NULL); 18455 ASSERT(pktp != NULL); 18456 18457 si.ssi_severity = SCSI_ERR_FATAL; 18458 si.ssi_pfa_flag = FALSE; 18459 18460 if (sense_key == KEY_MEDIUM_ERROR) { 18461 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18462 } 18463 18464 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18465 18466 if ((un->un_reset_retry_count != 0) && 18467 (xp->xb_retry_count == un->un_reset_retry_count)) { 18468 mutex_exit(SD_MUTEX(un)); 18469 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18470 if (un->un_f_allow_bus_device_reset == TRUE) { 18471 18472 boolean_t try_resetting_target = B_TRUE; 18473 18474 /* 18475 * We need to be able to handle specific ASC when we are 18476 * handling a KEY_HARDWARE_ERROR. In particular 18477 * taking the default action of resetting the target may 18478 * not be the appropriate way to attempt recovery. 18479 * Resetting a target because of a single LUN failure 18480 * victimizes all LUNs on that target. 18481 * 18482 * This is true for the LSI arrays, if an LSI 18483 * array controller returns an ASC of 0x84 (LUN Dead) we 18484 * should trust it. 18485 */ 18486 18487 if (sense_key == KEY_HARDWARE_ERROR) { 18488 switch (asc) { 18489 case 0x84: 18490 if (SD_IS_LSI(un)) { 18491 try_resetting_target = B_FALSE; 18492 } 18493 break; 18494 default: 18495 break; 18496 } 18497 } 18498 18499 if (try_resetting_target == B_TRUE) { 18500 int reset_retval = 0; 18501 if (un->un_f_lun_reset_enabled == TRUE) { 18502 SD_TRACE(SD_LOG_IO_CORE, un, 18503 "sd_sense_key_medium_or_hardware_" 18504 "error: issuing RESET_LUN\n"); 18505 reset_retval = 18506 scsi_reset(SD_ADDRESS(un), 18507 RESET_LUN); 18508 } 18509 if (reset_retval == 0) { 18510 SD_TRACE(SD_LOG_IO_CORE, un, 18511 "sd_sense_key_medium_or_hardware_" 18512 "error: issuing RESET_TARGET\n"); 18513 (void) scsi_reset(SD_ADDRESS(un), 18514 RESET_TARGET); 18515 } 18516 } 18517 } 18518 mutex_enter(SD_MUTEX(un)); 18519 } 18520 18521 /* 18522 * This really ought to be a fatal error, but we will retry anyway 18523 * as some drives report this as a spurious error. 18524 */ 18525 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18526 &si, EIO, (clock_t)0, NULL); 18527 } 18528 18529 18530 18531 /* 18532 * Function: sd_sense_key_illegal_request 18533 * 18534 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18535 * 18536 * Context: May be called from interrupt context 18537 */ 18538 18539 static void 18540 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18541 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18542 { 18543 struct sd_sense_info si; 18544 18545 ASSERT(un != NULL); 18546 ASSERT(mutex_owned(SD_MUTEX(un))); 18547 ASSERT(bp != NULL); 18548 ASSERT(xp != NULL); 18549 ASSERT(pktp != NULL); 18550 18551 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18552 18553 si.ssi_severity = SCSI_ERR_INFO; 18554 si.ssi_pfa_flag = FALSE; 18555 18556 /* Pointless to retry if the target thinks it's an illegal request */ 18557 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18558 sd_return_failed_command(un, bp, EIO); 18559 } 18560 18561 18562 18563 18564 /* 18565 * Function: sd_sense_key_unit_attention 18566 * 18567 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18568 * 18569 * Context: May be called from interrupt context 18570 */ 18571 18572 static void 18573 sd_sense_key_unit_attention(struct sd_lun *un, uint8_t *sense_datap, 18574 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18575 { 18576 /* 18577 * For UNIT ATTENTION we allow retries for one minute. Devices 18578 * like Sonoma can return UNIT ATTENTION close to a minute 18579 * under certain conditions. 18580 */ 18581 int retry_check_flag = SD_RETRIES_UA; 18582 boolean_t kstat_updated = B_FALSE; 18583 struct sd_sense_info si; 18584 uint8_t asc = scsi_sense_asc(sense_datap); 18585 uint8_t ascq = scsi_sense_ascq(sense_datap); 18586 18587 ASSERT(un != NULL); 18588 ASSERT(mutex_owned(SD_MUTEX(un))); 18589 ASSERT(bp != NULL); 18590 ASSERT(xp != NULL); 18591 ASSERT(pktp != NULL); 18592 18593 si.ssi_severity = SCSI_ERR_INFO; 18594 si.ssi_pfa_flag = FALSE; 18595 18596 18597 switch (asc) { 18598 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18599 if (sd_report_pfa != 0) { 18600 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18601 si.ssi_pfa_flag = TRUE; 18602 retry_check_flag = SD_RETRIES_STANDARD; 18603 goto do_retry; 18604 } 18605 18606 break; 18607 18608 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18609 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18610 un->un_resvd_status |= 18611 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18612 } 18613 #ifdef _LP64 18614 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18615 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18616 un, KM_NOSLEEP) == 0) { 18617 /* 18618 * If we can't dispatch the task we'll just 18619 * live without descriptor sense. We can 18620 * try again on the next "unit attention" 18621 */ 18622 SD_ERROR(SD_LOG_ERROR, un, 18623 "sd_sense_key_unit_attention: " 18624 "Could not dispatch " 18625 "sd_reenable_dsense_task\n"); 18626 } 18627 } 18628 #endif /* _LP64 */ 18629 /* FALLTHRU */ 18630 18631 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18632 if (!un->un_f_has_removable_media) { 18633 break; 18634 } 18635 18636 /* 18637 * When we get a unit attention from a removable-media device, 18638 * it may be in a state that will take a long time to recover 18639 * (e.g., from a reset). Since we are executing in interrupt 18640 * context here, we cannot wait around for the device to come 18641 * back. So hand this command off to sd_media_change_task() 18642 * for deferred processing under taskq thread context. (Note 18643 * that the command still may be failed if a problem is 18644 * encountered at a later time.) 18645 */ 18646 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18647 KM_NOSLEEP) == 0) { 18648 /* 18649 * Cannot dispatch the request so fail the command. 18650 */ 18651 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18652 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18653 si.ssi_severity = SCSI_ERR_FATAL; 18654 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18655 sd_return_failed_command(un, bp, EIO); 18656 } 18657 18658 /* 18659 * If failed to dispatch sd_media_change_task(), we already 18660 * updated kstat. If succeed to dispatch sd_media_change_task(), 18661 * we should update kstat later if it encounters an error. So, 18662 * we update kstat_updated flag here. 18663 */ 18664 kstat_updated = B_TRUE; 18665 18666 /* 18667 * Either the command has been successfully dispatched to a 18668 * task Q for retrying, or the dispatch failed. In either case 18669 * do NOT retry again by calling sd_retry_command. This sets up 18670 * two retries of the same command and when one completes and 18671 * frees the resources the other will access freed memory, 18672 * a bad thing. 18673 */ 18674 return; 18675 18676 default: 18677 break; 18678 } 18679 18680 /* 18681 * ASC ASCQ 18682 * 2A 09 Capacity data has changed 18683 * 2A 01 Mode parameters changed 18684 * 3F 0E Reported luns data has changed 18685 * Arrays that support logical unit expansion should report 18686 * capacity changes(2Ah/09). Mode parameters changed and 18687 * reported luns data has changed are the approximation. 18688 */ 18689 if (((asc == 0x2a) && (ascq == 0x09)) || 18690 ((asc == 0x2a) && (ascq == 0x01)) || 18691 ((asc == 0x3f) && (ascq == 0x0e))) { 18692 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18693 KM_NOSLEEP) == 0) { 18694 SD_ERROR(SD_LOG_ERROR, un, 18695 "sd_sense_key_unit_attention: " 18696 "Could not dispatch sd_target_change_task\n"); 18697 } 18698 } 18699 18700 /* 18701 * Update kstat if we haven't done that. 18702 */ 18703 if (!kstat_updated) { 18704 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18705 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18706 } 18707 18708 do_retry: 18709 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18710 EIO, SD_UA_RETRY_DELAY, NULL); 18711 } 18712 18713 18714 18715 /* 18716 * Function: sd_sense_key_fail_command 18717 * 18718 * Description: Use to fail a command when we don't like the sense key that 18719 * was returned. 18720 * 18721 * Context: May be called from interrupt context 18722 */ 18723 18724 static void 18725 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18726 struct scsi_pkt *pktp) 18727 { 18728 struct sd_sense_info si; 18729 18730 ASSERT(un != NULL); 18731 ASSERT(mutex_owned(SD_MUTEX(un))); 18732 ASSERT(bp != NULL); 18733 ASSERT(xp != NULL); 18734 ASSERT(pktp != NULL); 18735 18736 si.ssi_severity = SCSI_ERR_FATAL; 18737 si.ssi_pfa_flag = FALSE; 18738 18739 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18740 sd_return_failed_command(un, bp, EIO); 18741 } 18742 18743 18744 18745 /* 18746 * Function: sd_sense_key_blank_check 18747 * 18748 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18749 * Has no monetary connotation. 18750 * 18751 * Context: May be called from interrupt context 18752 */ 18753 18754 static void 18755 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18756 struct scsi_pkt *pktp) 18757 { 18758 struct sd_sense_info si; 18759 18760 ASSERT(un != NULL); 18761 ASSERT(mutex_owned(SD_MUTEX(un))); 18762 ASSERT(bp != NULL); 18763 ASSERT(xp != NULL); 18764 ASSERT(pktp != NULL); 18765 18766 /* 18767 * Blank check is not fatal for removable devices, therefore 18768 * it does not require a console message. 18769 */ 18770 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18771 SCSI_ERR_FATAL; 18772 si.ssi_pfa_flag = FALSE; 18773 18774 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18775 sd_return_failed_command(un, bp, EIO); 18776 } 18777 18778 18779 18780 18781 /* 18782 * Function: sd_sense_key_aborted_command 18783 * 18784 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18785 * 18786 * Context: May be called from interrupt context 18787 */ 18788 18789 static void 18790 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18791 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18792 { 18793 struct sd_sense_info si; 18794 18795 ASSERT(un != NULL); 18796 ASSERT(mutex_owned(SD_MUTEX(un))); 18797 ASSERT(bp != NULL); 18798 ASSERT(xp != NULL); 18799 ASSERT(pktp != NULL); 18800 18801 si.ssi_severity = SCSI_ERR_FATAL; 18802 si.ssi_pfa_flag = FALSE; 18803 18804 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18805 18806 /* 18807 * This really ought to be a fatal error, but we will retry anyway 18808 * as some drives report this as a spurious error. 18809 */ 18810 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18811 &si, EIO, drv_usectohz(100000), NULL); 18812 } 18813 18814 18815 18816 /* 18817 * Function: sd_sense_key_default 18818 * 18819 * Description: Default recovery action for several SCSI sense keys (basically 18820 * attempts a retry). 18821 * 18822 * Context: May be called from interrupt context 18823 */ 18824 18825 static void 18826 sd_sense_key_default(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp, 18827 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18828 { 18829 struct sd_sense_info si; 18830 uint8_t sense_key = scsi_sense_key(sense_datap); 18831 18832 ASSERT(un != NULL); 18833 ASSERT(mutex_owned(SD_MUTEX(un))); 18834 ASSERT(bp != NULL); 18835 ASSERT(xp != NULL); 18836 ASSERT(pktp != NULL); 18837 18838 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18839 18840 /* 18841 * Undecoded sense key. Attempt retries and hope that will fix 18842 * the problem. Otherwise, we're dead. 18843 */ 18844 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18845 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18846 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18847 } 18848 18849 si.ssi_severity = SCSI_ERR_FATAL; 18850 si.ssi_pfa_flag = FALSE; 18851 18852 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18853 &si, EIO, (clock_t)0, NULL); 18854 } 18855 18856 18857 18858 /* 18859 * Function: sd_print_retry_msg 18860 * 18861 * Description: Print a message indicating the retry action being taken. 18862 * 18863 * Arguments: un - ptr to associated softstate 18864 * bp - ptr to buf(9S) for the command 18865 * arg - not used. 18866 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18867 * or SD_NO_RETRY_ISSUED 18868 * 18869 * Context: May be called from interrupt context 18870 */ 18871 /* ARGSUSED */ 18872 static void 18873 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18874 { 18875 struct sd_xbuf *xp; 18876 struct scsi_pkt *pktp; 18877 char *reasonp; 18878 char *msgp; 18879 18880 ASSERT(un != NULL); 18881 ASSERT(mutex_owned(SD_MUTEX(un))); 18882 ASSERT(bp != NULL); 18883 pktp = SD_GET_PKTP(bp); 18884 ASSERT(pktp != NULL); 18885 xp = SD_GET_XBUF(bp); 18886 ASSERT(xp != NULL); 18887 18888 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18889 mutex_enter(&un->un_pm_mutex); 18890 if ((un->un_state == SD_STATE_SUSPENDED) || 18891 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18892 (pktp->pkt_flags & FLAG_SILENT)) { 18893 mutex_exit(&un->un_pm_mutex); 18894 goto update_pkt_reason; 18895 } 18896 mutex_exit(&un->un_pm_mutex); 18897 18898 /* 18899 * Suppress messages if they are all the same pkt_reason; with 18900 * TQ, many (up to 256) are returned with the same pkt_reason. 18901 * If we are in panic, then suppress the retry messages. 18902 */ 18903 switch (flag) { 18904 case SD_NO_RETRY_ISSUED: 18905 msgp = "giving up"; 18906 break; 18907 case SD_IMMEDIATE_RETRY_ISSUED: 18908 case SD_DELAYED_RETRY_ISSUED: 18909 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18910 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18911 (sd_error_level != SCSI_ERR_ALL))) { 18912 return; 18913 } 18914 msgp = "retrying command"; 18915 break; 18916 default: 18917 goto update_pkt_reason; 18918 } 18919 18920 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18921 scsi_rname(pktp->pkt_reason)); 18922 18923 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18925 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18926 } 18927 18928 update_pkt_reason: 18929 /* 18930 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18931 * This is to prevent multiple console messages for the same failure 18932 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18933 * when the command is retried successfully because there still may be 18934 * more commands coming back with the same value of pktp->pkt_reason. 18935 */ 18936 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18937 un->un_last_pkt_reason = pktp->pkt_reason; 18938 } 18939 } 18940 18941 18942 /* 18943 * Function: sd_print_cmd_incomplete_msg 18944 * 18945 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18946 * 18947 * Arguments: un - ptr to associated softstate 18948 * bp - ptr to buf(9S) for the command 18949 * arg - passed to sd_print_retry_msg() 18950 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18951 * or SD_NO_RETRY_ISSUED 18952 * 18953 * Context: May be called from interrupt context 18954 */ 18955 18956 static void 18957 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18958 int code) 18959 { 18960 dev_info_t *dip; 18961 18962 ASSERT(un != NULL); 18963 ASSERT(mutex_owned(SD_MUTEX(un))); 18964 ASSERT(bp != NULL); 18965 18966 switch (code) { 18967 case SD_NO_RETRY_ISSUED: 18968 /* Command was failed. Someone turned off this target? */ 18969 if (un->un_state != SD_STATE_OFFLINE) { 18970 /* 18971 * Suppress message if we are detaching and 18972 * device has been disconnected 18973 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18974 * private interface and not part of the DDI 18975 */ 18976 dip = un->un_sd->sd_dev; 18977 if (!(DEVI_IS_DETACHING(dip) && 18978 DEVI_IS_DEVICE_REMOVED(dip))) { 18979 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18980 "disk not responding to selection\n"); 18981 } 18982 New_state(un, SD_STATE_OFFLINE); 18983 } 18984 break; 18985 18986 case SD_DELAYED_RETRY_ISSUED: 18987 case SD_IMMEDIATE_RETRY_ISSUED: 18988 default: 18989 /* Command was successfully queued for retry */ 18990 sd_print_retry_msg(un, bp, arg, code); 18991 break; 18992 } 18993 } 18994 18995 18996 /* 18997 * Function: sd_pkt_reason_cmd_incomplete 18998 * 18999 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 19000 * 19001 * Context: May be called from interrupt context 19002 */ 19003 19004 static void 19005 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 19006 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19007 { 19008 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 19009 19010 ASSERT(un != NULL); 19011 ASSERT(mutex_owned(SD_MUTEX(un))); 19012 ASSERT(bp != NULL); 19013 ASSERT(xp != NULL); 19014 ASSERT(pktp != NULL); 19015 19016 /* Do not do a reset if selection did not complete */ 19017 /* Note: Should this not just check the bit? */ 19018 if (pktp->pkt_state != STATE_GOT_BUS) { 19019 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19020 sd_reset_target(un, pktp); 19021 } 19022 19023 /* 19024 * If the target was not successfully selected, then set 19025 * SD_RETRIES_FAILFAST to indicate that we lost communication 19026 * with the target, and further retries and/or commands are 19027 * likely to take a long time. 19028 */ 19029 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 19030 flag |= SD_RETRIES_FAILFAST; 19031 } 19032 19033 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19034 19035 sd_retry_command(un, bp, flag, 19036 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19037 } 19038 19039 19040 19041 /* 19042 * Function: sd_pkt_reason_cmd_tran_err 19043 * 19044 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 19045 * 19046 * Context: May be called from interrupt context 19047 */ 19048 19049 static void 19050 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 19051 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19052 { 19053 ASSERT(un != NULL); 19054 ASSERT(mutex_owned(SD_MUTEX(un))); 19055 ASSERT(bp != NULL); 19056 ASSERT(xp != NULL); 19057 ASSERT(pktp != NULL); 19058 19059 /* 19060 * Do not reset if we got a parity error, or if 19061 * selection did not complete. 19062 */ 19063 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19064 /* Note: Should this not just check the bit for pkt_state? */ 19065 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 19066 (pktp->pkt_state != STATE_GOT_BUS)) { 19067 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19068 sd_reset_target(un, pktp); 19069 } 19070 19071 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19072 19073 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19074 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19075 } 19076 19077 19078 19079 /* 19080 * Function: sd_pkt_reason_cmd_reset 19081 * 19082 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 19083 * 19084 * Context: May be called from interrupt context 19085 */ 19086 19087 static void 19088 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19089 struct scsi_pkt *pktp) 19090 { 19091 ASSERT(un != NULL); 19092 ASSERT(mutex_owned(SD_MUTEX(un))); 19093 ASSERT(bp != NULL); 19094 ASSERT(xp != NULL); 19095 ASSERT(pktp != NULL); 19096 19097 /* The target may still be running the command, so try to reset. */ 19098 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19099 sd_reset_target(un, pktp); 19100 19101 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19102 19103 /* 19104 * If pkt_reason is CMD_RESET chances are that this pkt got 19105 * reset because another target on this bus caused it. The target 19106 * that caused it should get CMD_TIMEOUT with pkt_statistics 19107 * of STAT_TIMEOUT/STAT_DEV_RESET. 19108 */ 19109 19110 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19111 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19112 } 19113 19114 19115 19116 19117 /* 19118 * Function: sd_pkt_reason_cmd_aborted 19119 * 19120 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 19121 * 19122 * Context: May be called from interrupt context 19123 */ 19124 19125 static void 19126 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19127 struct scsi_pkt *pktp) 19128 { 19129 ASSERT(un != NULL); 19130 ASSERT(mutex_owned(SD_MUTEX(un))); 19131 ASSERT(bp != NULL); 19132 ASSERT(xp != NULL); 19133 ASSERT(pktp != NULL); 19134 19135 /* The target may still be running the command, so try to reset. */ 19136 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19137 sd_reset_target(un, pktp); 19138 19139 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19140 19141 /* 19142 * If pkt_reason is CMD_ABORTED chances are that this pkt got 19143 * aborted because another target on this bus caused it. The target 19144 * that caused it should get CMD_TIMEOUT with pkt_statistics 19145 * of STAT_TIMEOUT/STAT_DEV_RESET. 19146 */ 19147 19148 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19149 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19150 } 19151 19152 19153 19154 /* 19155 * Function: sd_pkt_reason_cmd_timeout 19156 * 19157 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 19158 * 19159 * Context: May be called from interrupt context 19160 */ 19161 19162 static void 19163 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19164 struct scsi_pkt *pktp) 19165 { 19166 ASSERT(un != NULL); 19167 ASSERT(mutex_owned(SD_MUTEX(un))); 19168 ASSERT(bp != NULL); 19169 ASSERT(xp != NULL); 19170 ASSERT(pktp != NULL); 19171 19172 19173 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19174 sd_reset_target(un, pktp); 19175 19176 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19177 19178 /* 19179 * A command timeout indicates that we could not establish 19180 * communication with the target, so set SD_RETRIES_FAILFAST 19181 * as further retries/commands are likely to take a long time. 19182 */ 19183 sd_retry_command(un, bp, 19184 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 19185 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19186 } 19187 19188 19189 19190 /* 19191 * Function: sd_pkt_reason_cmd_unx_bus_free 19192 * 19193 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 19194 * 19195 * Context: May be called from interrupt context 19196 */ 19197 19198 static void 19199 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 19200 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19201 { 19202 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 19203 19204 ASSERT(un != NULL); 19205 ASSERT(mutex_owned(SD_MUTEX(un))); 19206 ASSERT(bp != NULL); 19207 ASSERT(xp != NULL); 19208 ASSERT(pktp != NULL); 19209 19210 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19211 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19212 19213 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 19214 sd_print_retry_msg : NULL; 19215 19216 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19217 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19218 } 19219 19220 19221 /* 19222 * Function: sd_pkt_reason_cmd_tag_reject 19223 * 19224 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 19225 * 19226 * Context: May be called from interrupt context 19227 */ 19228 19229 static void 19230 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 19231 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19232 { 19233 ASSERT(un != NULL); 19234 ASSERT(mutex_owned(SD_MUTEX(un))); 19235 ASSERT(bp != NULL); 19236 ASSERT(xp != NULL); 19237 ASSERT(pktp != NULL); 19238 19239 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19240 pktp->pkt_flags = 0; 19241 un->un_tagflags = 0; 19242 if (un->un_f_opt_queueing == TRUE) { 19243 un->un_throttle = min(un->un_throttle, 3); 19244 } else { 19245 un->un_throttle = 1; 19246 } 19247 mutex_exit(SD_MUTEX(un)); 19248 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 19249 mutex_enter(SD_MUTEX(un)); 19250 19251 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19252 19253 /* Legacy behavior not to check retry counts here. */ 19254 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 19255 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19256 } 19257 19258 19259 /* 19260 * Function: sd_pkt_reason_default 19261 * 19262 * Description: Default recovery actions for SCSA pkt_reason values that 19263 * do not have more explicit recovery actions. 19264 * 19265 * Context: May be called from interrupt context 19266 */ 19267 19268 static void 19269 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19270 struct scsi_pkt *pktp) 19271 { 19272 ASSERT(un != NULL); 19273 ASSERT(mutex_owned(SD_MUTEX(un))); 19274 ASSERT(bp != NULL); 19275 ASSERT(xp != NULL); 19276 ASSERT(pktp != NULL); 19277 19278 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19279 sd_reset_target(un, pktp); 19280 19281 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19282 19283 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19284 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19285 } 19286 19287 19288 19289 /* 19290 * Function: sd_pkt_status_check_condition 19291 * 19292 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 19293 * 19294 * Context: May be called from interrupt context 19295 */ 19296 19297 static void 19298 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 19299 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19300 { 19301 ASSERT(un != NULL); 19302 ASSERT(mutex_owned(SD_MUTEX(un))); 19303 ASSERT(bp != NULL); 19304 ASSERT(xp != NULL); 19305 ASSERT(pktp != NULL); 19306 19307 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 19308 "entry: buf:0x%p xp:0x%p\n", bp, xp); 19309 19310 /* 19311 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 19312 * command will be retried after the request sense). Otherwise, retry 19313 * the command. Note: we are issuing the request sense even though the 19314 * retry limit may have been reached for the failed command. 19315 */ 19316 if (un->un_f_arq_enabled == FALSE) { 19317 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19318 "no ARQ, sending request sense command\n"); 19319 sd_send_request_sense_command(un, bp, pktp); 19320 } else { 19321 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19322 "ARQ,retrying request sense command\n"); 19323 #if defined(__i386) || defined(__amd64) 19324 /* 19325 * The SD_RETRY_DELAY value need to be adjusted here 19326 * when SD_RETRY_DELAY change in sddef.h 19327 */ 19328 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19329 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19330 NULL); 19331 #else 19332 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19333 EIO, SD_RETRY_DELAY, NULL); 19334 #endif 19335 } 19336 19337 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19338 } 19339 19340 19341 /* 19342 * Function: sd_pkt_status_busy 19343 * 19344 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19345 * 19346 * Context: May be called from interrupt context 19347 */ 19348 19349 static void 19350 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19351 struct scsi_pkt *pktp) 19352 { 19353 ASSERT(un != NULL); 19354 ASSERT(mutex_owned(SD_MUTEX(un))); 19355 ASSERT(bp != NULL); 19356 ASSERT(xp != NULL); 19357 ASSERT(pktp != NULL); 19358 19359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19360 "sd_pkt_status_busy: entry\n"); 19361 19362 /* If retries are exhausted, just fail the command. */ 19363 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19364 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19365 "device busy too long\n"); 19366 sd_return_failed_command(un, bp, EIO); 19367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19368 "sd_pkt_status_busy: exit\n"); 19369 return; 19370 } 19371 xp->xb_retry_count++; 19372 19373 /* 19374 * Try to reset the target. However, we do not want to perform 19375 * more than one reset if the device continues to fail. The reset 19376 * will be performed when the retry count reaches the reset 19377 * threshold. This threshold should be set such that at least 19378 * one retry is issued before the reset is performed. 19379 */ 19380 if (xp->xb_retry_count == 19381 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19382 int rval = 0; 19383 mutex_exit(SD_MUTEX(un)); 19384 if (un->un_f_allow_bus_device_reset == TRUE) { 19385 /* 19386 * First try to reset the LUN; if we cannot then 19387 * try to reset the target. 19388 */ 19389 if (un->un_f_lun_reset_enabled == TRUE) { 19390 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19391 "sd_pkt_status_busy: RESET_LUN\n"); 19392 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19393 } 19394 if (rval == 0) { 19395 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19396 "sd_pkt_status_busy: RESET_TARGET\n"); 19397 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19398 } 19399 } 19400 if (rval == 0) { 19401 /* 19402 * If the RESET_LUN and/or RESET_TARGET failed, 19403 * try RESET_ALL 19404 */ 19405 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19406 "sd_pkt_status_busy: RESET_ALL\n"); 19407 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19408 } 19409 mutex_enter(SD_MUTEX(un)); 19410 if (rval == 0) { 19411 /* 19412 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19413 * At this point we give up & fail the command. 19414 */ 19415 sd_return_failed_command(un, bp, EIO); 19416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19417 "sd_pkt_status_busy: exit (failed cmd)\n"); 19418 return; 19419 } 19420 } 19421 19422 /* 19423 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19424 * we have already checked the retry counts above. 19425 */ 19426 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19427 EIO, un->un_busy_timeout, NULL); 19428 19429 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19430 "sd_pkt_status_busy: exit\n"); 19431 } 19432 19433 19434 /* 19435 * Function: sd_pkt_status_reservation_conflict 19436 * 19437 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19438 * command status. 19439 * 19440 * Context: May be called from interrupt context 19441 */ 19442 19443 static void 19444 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19445 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19446 { 19447 ASSERT(un != NULL); 19448 ASSERT(mutex_owned(SD_MUTEX(un))); 19449 ASSERT(bp != NULL); 19450 ASSERT(xp != NULL); 19451 ASSERT(pktp != NULL); 19452 19453 /* 19454 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19455 * conflict could be due to various reasons like incorrect keys, not 19456 * registered or not reserved etc. So, we return EACCES to the caller. 19457 */ 19458 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19459 int cmd = SD_GET_PKT_OPCODE(pktp); 19460 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19461 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19462 sd_return_failed_command(un, bp, EACCES); 19463 return; 19464 } 19465 } 19466 19467 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19468 19469 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19470 if (sd_failfast_enable != 0) { 19471 /* By definition, we must panic here.... */ 19472 sd_panic_for_res_conflict(un); 19473 /*NOTREACHED*/ 19474 } 19475 SD_ERROR(SD_LOG_IO, un, 19476 "sd_handle_resv_conflict: Disk Reserved\n"); 19477 sd_return_failed_command(un, bp, EACCES); 19478 return; 19479 } 19480 19481 /* 19482 * 1147670: retry only if sd_retry_on_reservation_conflict 19483 * property is set (default is 1). Retries will not succeed 19484 * on a disk reserved by another initiator. HA systems 19485 * may reset this via sd.conf to avoid these retries. 19486 * 19487 * Note: The legacy return code for this failure is EIO, however EACCES 19488 * seems more appropriate for a reservation conflict. 19489 */ 19490 if (sd_retry_on_reservation_conflict == 0) { 19491 SD_ERROR(SD_LOG_IO, un, 19492 "sd_handle_resv_conflict: Device Reserved\n"); 19493 sd_return_failed_command(un, bp, EIO); 19494 return; 19495 } 19496 19497 /* 19498 * Retry the command if we can. 19499 * 19500 * Note: The legacy return code for this failure is EIO, however EACCES 19501 * seems more appropriate for a reservation conflict. 19502 */ 19503 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19504 (clock_t)2, NULL); 19505 } 19506 19507 19508 19509 /* 19510 * Function: sd_pkt_status_qfull 19511 * 19512 * Description: Handle a QUEUE FULL condition from the target. This can 19513 * occur if the HBA does not handle the queue full condition. 19514 * (Basically this means third-party HBAs as Sun HBAs will 19515 * handle the queue full condition.) Note that if there are 19516 * some commands already in the transport, then the queue full 19517 * has occurred because the queue for this nexus is actually 19518 * full. If there are no commands in the transport, then the 19519 * queue full is resulting from some other initiator or lun 19520 * consuming all the resources at the target. 19521 * 19522 * Context: May be called from interrupt context 19523 */ 19524 19525 static void 19526 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19527 struct scsi_pkt *pktp) 19528 { 19529 ASSERT(un != NULL); 19530 ASSERT(mutex_owned(SD_MUTEX(un))); 19531 ASSERT(bp != NULL); 19532 ASSERT(xp != NULL); 19533 ASSERT(pktp != NULL); 19534 19535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19536 "sd_pkt_status_qfull: entry\n"); 19537 19538 /* 19539 * Just lower the QFULL throttle and retry the command. Note that 19540 * we do not limit the number of retries here. 19541 */ 19542 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19543 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19544 SD_RESTART_TIMEOUT, NULL); 19545 19546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19547 "sd_pkt_status_qfull: exit\n"); 19548 } 19549 19550 19551 /* 19552 * Function: sd_reset_target 19553 * 19554 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19555 * RESET_TARGET, or RESET_ALL. 19556 * 19557 * Context: May be called under interrupt context. 19558 */ 19559 19560 static void 19561 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19562 { 19563 int rval = 0; 19564 19565 ASSERT(un != NULL); 19566 ASSERT(mutex_owned(SD_MUTEX(un))); 19567 ASSERT(pktp != NULL); 19568 19569 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19570 19571 /* 19572 * No need to reset if the transport layer has already done so. 19573 */ 19574 if ((pktp->pkt_statistics & 19575 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19576 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19577 "sd_reset_target: no reset\n"); 19578 return; 19579 } 19580 19581 mutex_exit(SD_MUTEX(un)); 19582 19583 if (un->un_f_allow_bus_device_reset == TRUE) { 19584 if (un->un_f_lun_reset_enabled == TRUE) { 19585 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19586 "sd_reset_target: RESET_LUN\n"); 19587 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19588 } 19589 if (rval == 0) { 19590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19591 "sd_reset_target: RESET_TARGET\n"); 19592 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19593 } 19594 } 19595 19596 if (rval == 0) { 19597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19598 "sd_reset_target: RESET_ALL\n"); 19599 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19600 } 19601 19602 mutex_enter(SD_MUTEX(un)); 19603 19604 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19605 } 19606 19607 /* 19608 * Function: sd_target_change_task 19609 * 19610 * Description: Handle dynamic target change 19611 * 19612 * Context: Executes in a taskq() thread context 19613 */ 19614 static void 19615 sd_target_change_task(void *arg) 19616 { 19617 struct sd_lun *un = arg; 19618 uint64_t capacity; 19619 diskaddr_t label_cap; 19620 uint_t lbasize; 19621 sd_ssc_t *ssc; 19622 19623 ASSERT(un != NULL); 19624 ASSERT(!mutex_owned(SD_MUTEX(un))); 19625 19626 if ((un->un_f_blockcount_is_valid == FALSE) || 19627 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19628 return; 19629 } 19630 19631 ssc = sd_ssc_init(un); 19632 19633 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19634 &lbasize, SD_PATH_DIRECT) != 0) { 19635 SD_ERROR(SD_LOG_ERROR, un, 19636 "sd_target_change_task: fail to read capacity\n"); 19637 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19638 goto task_exit; 19639 } 19640 19641 mutex_enter(SD_MUTEX(un)); 19642 if (capacity <= un->un_blockcount) { 19643 mutex_exit(SD_MUTEX(un)); 19644 goto task_exit; 19645 } 19646 19647 sd_update_block_info(un, lbasize, capacity); 19648 mutex_exit(SD_MUTEX(un)); 19649 19650 /* 19651 * If lun is EFI labeled and lun capacity is greater than the 19652 * capacity contained in the label, log a sys event. 19653 */ 19654 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19655 (void*)SD_PATH_DIRECT) == 0) { 19656 mutex_enter(SD_MUTEX(un)); 19657 if (un->un_f_blockcount_is_valid && 19658 un->un_blockcount > label_cap) { 19659 mutex_exit(SD_MUTEX(un)); 19660 sd_log_lun_expansion_event(un, KM_SLEEP); 19661 } else { 19662 mutex_exit(SD_MUTEX(un)); 19663 } 19664 } 19665 19666 task_exit: 19667 sd_ssc_fini(ssc); 19668 } 19669 19670 19671 /* 19672 * Function: sd_log_dev_status_event 19673 * 19674 * Description: Log EC_dev_status sysevent 19675 * 19676 * Context: Never called from interrupt context 19677 */ 19678 static void 19679 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag) 19680 { 19681 int err; 19682 char *path; 19683 nvlist_t *attr_list; 19684 19685 /* Allocate and build sysevent attribute list */ 19686 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19687 if (err != 0) { 19688 SD_ERROR(SD_LOG_ERROR, un, 19689 "sd_log_dev_status_event: fail to allocate space\n"); 19690 return; 19691 } 19692 19693 path = kmem_alloc(MAXPATHLEN, km_flag); 19694 if (path == NULL) { 19695 nvlist_free(attr_list); 19696 SD_ERROR(SD_LOG_ERROR, un, 19697 "sd_log_dev_status_event: fail to allocate space\n"); 19698 return; 19699 } 19700 /* 19701 * Add path attribute to identify the lun. 19702 * We are using minor node 'a' as the sysevent attribute. 19703 */ 19704 (void) snprintf(path, MAXPATHLEN, "/devices"); 19705 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19706 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19707 ":a"); 19708 19709 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path); 19710 if (err != 0) { 19711 nvlist_free(attr_list); 19712 kmem_free(path, MAXPATHLEN); 19713 SD_ERROR(SD_LOG_ERROR, un, 19714 "sd_log_dev_status_event: fail to add attribute\n"); 19715 return; 19716 } 19717 19718 /* Log dynamic lun expansion sysevent */ 19719 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19720 esc, attr_list, NULL, km_flag); 19721 if (err != DDI_SUCCESS) { 19722 SD_ERROR(SD_LOG_ERROR, un, 19723 "sd_log_dev_status_event: fail to log sysevent\n"); 19724 } 19725 19726 nvlist_free(attr_list); 19727 kmem_free(path, MAXPATHLEN); 19728 } 19729 19730 19731 /* 19732 * Function: sd_log_lun_expansion_event 19733 * 19734 * Description: Log lun expansion sys event 19735 * 19736 * Context: Never called from interrupt context 19737 */ 19738 static void 19739 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19740 { 19741 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag); 19742 } 19743 19744 19745 /* 19746 * Function: sd_log_eject_request_event 19747 * 19748 * Description: Log eject request sysevent 19749 * 19750 * Context: Never called from interrupt context 19751 */ 19752 static void 19753 sd_log_eject_request_event(struct sd_lun *un, int km_flag) 19754 { 19755 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag); 19756 } 19757 19758 19759 /* 19760 * Function: sd_media_change_task 19761 * 19762 * Description: Recovery action for CDROM to become available. 19763 * 19764 * Context: Executes in a taskq() thread context 19765 */ 19766 19767 static void 19768 sd_media_change_task(void *arg) 19769 { 19770 struct scsi_pkt *pktp = arg; 19771 struct sd_lun *un; 19772 struct buf *bp; 19773 struct sd_xbuf *xp; 19774 int err = 0; 19775 int retry_count = 0; 19776 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19777 struct sd_sense_info si; 19778 19779 ASSERT(pktp != NULL); 19780 bp = (struct buf *)pktp->pkt_private; 19781 ASSERT(bp != NULL); 19782 xp = SD_GET_XBUF(bp); 19783 ASSERT(xp != NULL); 19784 un = SD_GET_UN(bp); 19785 ASSERT(un != NULL); 19786 ASSERT(!mutex_owned(SD_MUTEX(un))); 19787 ASSERT(un->un_f_monitor_media_state); 19788 19789 si.ssi_severity = SCSI_ERR_INFO; 19790 si.ssi_pfa_flag = FALSE; 19791 19792 /* 19793 * When a reset is issued on a CDROM, it takes a long time to 19794 * recover. First few attempts to read capacity and other things 19795 * related to handling unit attention fail (with a ASC 0x4 and 19796 * ASCQ 0x1). In that case we want to do enough retries and we want 19797 * to limit the retries in other cases of genuine failures like 19798 * no media in drive. 19799 */ 19800 while (retry_count++ < retry_limit) { 19801 if ((err = sd_handle_mchange(un)) == 0) { 19802 break; 19803 } 19804 if (err == EAGAIN) { 19805 retry_limit = SD_UNIT_ATTENTION_RETRY; 19806 } 19807 /* Sleep for 0.5 sec. & try again */ 19808 delay(drv_usectohz(500000)); 19809 } 19810 19811 /* 19812 * Dispatch (retry or fail) the original command here, 19813 * along with appropriate console messages.... 19814 * 19815 * Must grab the mutex before calling sd_retry_command, 19816 * sd_print_sense_msg and sd_return_failed_command. 19817 */ 19818 mutex_enter(SD_MUTEX(un)); 19819 if (err != SD_CMD_SUCCESS) { 19820 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19821 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19822 si.ssi_severity = SCSI_ERR_FATAL; 19823 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19824 sd_return_failed_command(un, bp, EIO); 19825 } else { 19826 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg, 19827 &si, EIO, (clock_t)0, NULL); 19828 } 19829 mutex_exit(SD_MUTEX(un)); 19830 } 19831 19832 19833 19834 /* 19835 * Function: sd_handle_mchange 19836 * 19837 * Description: Perform geometry validation & other recovery when CDROM 19838 * has been removed from drive. 19839 * 19840 * Return Code: 0 for success 19841 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19842 * sd_send_scsi_READ_CAPACITY() 19843 * 19844 * Context: Executes in a taskq() thread context 19845 */ 19846 19847 static int 19848 sd_handle_mchange(struct sd_lun *un) 19849 { 19850 uint64_t capacity; 19851 uint32_t lbasize; 19852 int rval; 19853 sd_ssc_t *ssc; 19854 19855 ASSERT(!mutex_owned(SD_MUTEX(un))); 19856 ASSERT(un->un_f_monitor_media_state); 19857 19858 ssc = sd_ssc_init(un); 19859 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19860 SD_PATH_DIRECT_PRIORITY); 19861 19862 if (rval != 0) 19863 goto failed; 19864 19865 mutex_enter(SD_MUTEX(un)); 19866 sd_update_block_info(un, lbasize, capacity); 19867 19868 if (un->un_errstats != NULL) { 19869 struct sd_errstats *stp = 19870 (struct sd_errstats *)un->un_errstats->ks_data; 19871 stp->sd_capacity.value.ui64 = (uint64_t) 19872 ((uint64_t)un->un_blockcount * 19873 (uint64_t)un->un_tgt_blocksize); 19874 } 19875 19876 /* 19877 * Check if the media in the device is writable or not 19878 */ 19879 if (ISCD(un)) { 19880 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19881 } 19882 19883 /* 19884 * Note: Maybe let the strategy/partitioning chain worry about getting 19885 * valid geometry. 19886 */ 19887 mutex_exit(SD_MUTEX(un)); 19888 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19889 19890 19891 if (cmlb_validate(un->un_cmlbhandle, 0, 19892 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19893 sd_ssc_fini(ssc); 19894 return (EIO); 19895 } else { 19896 if (un->un_f_pkstats_enabled) { 19897 sd_set_pstats(un); 19898 SD_TRACE(SD_LOG_IO_PARTITION, un, 19899 "sd_handle_mchange: un:0x%p pstats created and " 19900 "set\n", un); 19901 } 19902 } 19903 19904 /* 19905 * Try to lock the door 19906 */ 19907 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19908 SD_PATH_DIRECT_PRIORITY); 19909 failed: 19910 if (rval != 0) 19911 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19912 sd_ssc_fini(ssc); 19913 return (rval); 19914 } 19915 19916 19917 /* 19918 * Function: sd_send_scsi_DOORLOCK 19919 * 19920 * Description: Issue the scsi DOOR LOCK command 19921 * 19922 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19923 * structure for this target. 19924 * flag - SD_REMOVAL_ALLOW 19925 * SD_REMOVAL_PREVENT 19926 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19927 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19928 * to use the USCSI "direct" chain and bypass the normal 19929 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19930 * command is issued as part of an error recovery action. 19931 * 19932 * Return Code: 0 - Success 19933 * errno return code from sd_ssc_send() 19934 * 19935 * Context: Can sleep. 19936 */ 19937 19938 static int 19939 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19940 { 19941 struct scsi_extended_sense sense_buf; 19942 union scsi_cdb cdb; 19943 struct uscsi_cmd ucmd_buf; 19944 int status; 19945 struct sd_lun *un; 19946 19947 ASSERT(ssc != NULL); 19948 un = ssc->ssc_un; 19949 ASSERT(un != NULL); 19950 ASSERT(!mutex_owned(SD_MUTEX(un))); 19951 19952 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19953 19954 /* already determined doorlock is not supported, fake success */ 19955 if (un->un_f_doorlock_supported == FALSE) { 19956 return (0); 19957 } 19958 19959 /* 19960 * If we are ejecting and see an SD_REMOVAL_PREVENT 19961 * ignore the command so we can complete the eject 19962 * operation. 19963 */ 19964 if (flag == SD_REMOVAL_PREVENT) { 19965 mutex_enter(SD_MUTEX(un)); 19966 if (un->un_f_ejecting == TRUE) { 19967 mutex_exit(SD_MUTEX(un)); 19968 return (EAGAIN); 19969 } 19970 mutex_exit(SD_MUTEX(un)); 19971 } 19972 19973 bzero(&cdb, sizeof (cdb)); 19974 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19975 19976 cdb.scc_cmd = SCMD_DOORLOCK; 19977 cdb.cdb_opaque[4] = (uchar_t)flag; 19978 19979 ucmd_buf.uscsi_cdb = (char *)&cdb; 19980 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19981 ucmd_buf.uscsi_bufaddr = NULL; 19982 ucmd_buf.uscsi_buflen = 0; 19983 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19984 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19985 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19986 ucmd_buf.uscsi_timeout = 15; 19987 19988 SD_TRACE(SD_LOG_IO, un, 19989 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19990 19991 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19992 UIO_SYSSPACE, path_flag); 19993 19994 if (status == 0) 19995 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19996 19997 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19998 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19999 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 20000 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20001 20002 /* fake success and skip subsequent doorlock commands */ 20003 un->un_f_doorlock_supported = FALSE; 20004 return (0); 20005 } 20006 20007 return (status); 20008 } 20009 20010 /* 20011 * Function: sd_send_scsi_READ_CAPACITY 20012 * 20013 * Description: This routine uses the scsi READ CAPACITY command to determine 20014 * the device capacity in number of blocks and the device native 20015 * block size. If this function returns a failure, then the 20016 * values in *capp and *lbap are undefined. If the capacity 20017 * returned is 0xffffffff then the lun is too large for a 20018 * normal READ CAPACITY command and the results of a 20019 * READ CAPACITY 16 will be used instead. 20020 * 20021 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20022 * capp - ptr to unsigned 64-bit variable to receive the 20023 * capacity value from the command. 20024 * lbap - ptr to unsigned 32-bit varaible to receive the 20025 * block size value from the command 20026 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20027 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20028 * to use the USCSI "direct" chain and bypass the normal 20029 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20030 * command is issued as part of an error recovery action. 20031 * 20032 * Return Code: 0 - Success 20033 * EIO - IO error 20034 * EACCES - Reservation conflict detected 20035 * EAGAIN - Device is becoming ready 20036 * errno return code from sd_ssc_send() 20037 * 20038 * Context: Can sleep. Blocks until command completes. 20039 */ 20040 20041 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 20042 20043 static int 20044 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20045 int path_flag) 20046 { 20047 struct scsi_extended_sense sense_buf; 20048 struct uscsi_cmd ucmd_buf; 20049 union scsi_cdb cdb; 20050 uint32_t *capacity_buf; 20051 uint64_t capacity; 20052 uint32_t lbasize; 20053 uint32_t pbsize; 20054 int status; 20055 struct sd_lun *un; 20056 20057 ASSERT(ssc != NULL); 20058 20059 un = ssc->ssc_un; 20060 ASSERT(un != NULL); 20061 ASSERT(!mutex_owned(SD_MUTEX(un))); 20062 ASSERT(capp != NULL); 20063 ASSERT(lbap != NULL); 20064 20065 SD_TRACE(SD_LOG_IO, un, 20066 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20067 20068 /* 20069 * First send a READ_CAPACITY command to the target. 20070 * (This command is mandatory under SCSI-2.) 20071 * 20072 * Set up the CDB for the READ_CAPACITY command. The Partial 20073 * Medium Indicator bit is cleared. The address field must be 20074 * zero if the PMI bit is zero. 20075 */ 20076 bzero(&cdb, sizeof (cdb)); 20077 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20078 20079 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 20080 20081 cdb.scc_cmd = SCMD_READ_CAPACITY; 20082 20083 ucmd_buf.uscsi_cdb = (char *)&cdb; 20084 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20085 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 20086 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 20087 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20088 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20089 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20090 ucmd_buf.uscsi_timeout = 60; 20091 20092 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20093 UIO_SYSSPACE, path_flag); 20094 20095 switch (status) { 20096 case 0: 20097 /* Return failure if we did not get valid capacity data. */ 20098 if (ucmd_buf.uscsi_resid != 0) { 20099 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20100 "sd_send_scsi_READ_CAPACITY received invalid " 20101 "capacity data"); 20102 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20103 return (EIO); 20104 } 20105 /* 20106 * Read capacity and block size from the READ CAPACITY 10 data. 20107 * This data may be adjusted later due to device specific 20108 * issues. 20109 * 20110 * According to the SCSI spec, the READ CAPACITY 10 20111 * command returns the following: 20112 * 20113 * bytes 0-3: Maximum logical block address available. 20114 * (MSB in byte:0 & LSB in byte:3) 20115 * 20116 * bytes 4-7: Block length in bytes 20117 * (MSB in byte:4 & LSB in byte:7) 20118 * 20119 */ 20120 capacity = BE_32(capacity_buf[0]); 20121 lbasize = BE_32(capacity_buf[1]); 20122 20123 /* 20124 * Done with capacity_buf 20125 */ 20126 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20127 20128 /* 20129 * if the reported capacity is set to all 0xf's, then 20130 * this disk is too large and requires SBC-2 commands. 20131 * Reissue the request using READ CAPACITY 16. 20132 */ 20133 if (capacity == 0xffffffff) { 20134 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20135 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 20136 &lbasize, &pbsize, path_flag); 20137 if (status != 0) { 20138 return (status); 20139 } else { 20140 goto rc16_done; 20141 } 20142 } 20143 break; /* Success! */ 20144 case EIO: 20145 switch (ucmd_buf.uscsi_status) { 20146 case STATUS_RESERVATION_CONFLICT: 20147 status = EACCES; 20148 break; 20149 case STATUS_CHECK: 20150 /* 20151 * Check condition; look for ASC/ASCQ of 0x04/0x01 20152 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20153 */ 20154 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20155 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20156 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20157 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20158 return (EAGAIN); 20159 } 20160 break; 20161 default: 20162 break; 20163 } 20164 /* FALLTHRU */ 20165 default: 20166 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20167 return (status); 20168 } 20169 20170 /* 20171 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20172 * (2352 and 0 are common) so for these devices always force the value 20173 * to 2048 as required by the ATAPI specs. 20174 */ 20175 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20176 lbasize = 2048; 20177 } 20178 20179 /* 20180 * Get the maximum LBA value from the READ CAPACITY data. 20181 * Here we assume that the Partial Medium Indicator (PMI) bit 20182 * was cleared when issuing the command. This means that the LBA 20183 * returned from the device is the LBA of the last logical block 20184 * on the logical unit. The actual logical block count will be 20185 * this value plus one. 20186 */ 20187 capacity += 1; 20188 20189 /* 20190 * Currently, for removable media, the capacity is saved in terms 20191 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20192 */ 20193 if (un->un_f_has_removable_media) 20194 capacity *= (lbasize / un->un_sys_blocksize); 20195 20196 rc16_done: 20197 20198 /* 20199 * Copy the values from the READ CAPACITY command into the space 20200 * provided by the caller. 20201 */ 20202 *capp = capacity; 20203 *lbap = lbasize; 20204 20205 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 20206 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 20207 20208 /* 20209 * Both the lbasize and capacity from the device must be nonzero, 20210 * otherwise we assume that the values are not valid and return 20211 * failure to the caller. (4203735) 20212 */ 20213 if ((capacity == 0) || (lbasize == 0)) { 20214 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20215 "sd_send_scsi_READ_CAPACITY received invalid value " 20216 "capacity %llu lbasize %d", capacity, lbasize); 20217 return (EIO); 20218 } 20219 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20220 return (0); 20221 } 20222 20223 /* 20224 * Function: sd_send_scsi_READ_CAPACITY_16 20225 * 20226 * Description: This routine uses the scsi READ CAPACITY 16 command to 20227 * determine the device capacity in number of blocks and the 20228 * device native block size. If this function returns a failure, 20229 * then the values in *capp and *lbap are undefined. 20230 * This routine should be called by sd_send_scsi_READ_CAPACITY 20231 * which will apply any device specific adjustments to capacity 20232 * and lbasize. One exception is it is also called by 20233 * sd_get_media_info_ext. In that function, there is no need to 20234 * adjust the capacity and lbasize. 20235 * 20236 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20237 * capp - ptr to unsigned 64-bit variable to receive the 20238 * capacity value from the command. 20239 * lbap - ptr to unsigned 32-bit varaible to receive the 20240 * block size value from the command 20241 * psp - ptr to unsigned 32-bit variable to receive the 20242 * physical block size value from the command 20243 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20244 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20245 * to use the USCSI "direct" chain and bypass the normal 20246 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 20247 * this command is issued as part of an error recovery 20248 * action. 20249 * 20250 * Return Code: 0 - Success 20251 * EIO - IO error 20252 * EACCES - Reservation conflict detected 20253 * EAGAIN - Device is becoming ready 20254 * errno return code from sd_ssc_send() 20255 * 20256 * Context: Can sleep. Blocks until command completes. 20257 */ 20258 20259 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 20260 20261 static int 20262 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20263 uint32_t *psp, int path_flag) 20264 { 20265 struct scsi_extended_sense sense_buf; 20266 struct uscsi_cmd ucmd_buf; 20267 union scsi_cdb cdb; 20268 uint64_t *capacity16_buf; 20269 uint64_t capacity; 20270 uint32_t lbasize; 20271 uint32_t pbsize; 20272 uint32_t lbpb_exp; 20273 int status; 20274 struct sd_lun *un; 20275 20276 ASSERT(ssc != NULL); 20277 20278 un = ssc->ssc_un; 20279 ASSERT(un != NULL); 20280 ASSERT(!mutex_owned(SD_MUTEX(un))); 20281 ASSERT(capp != NULL); 20282 ASSERT(lbap != NULL); 20283 20284 SD_TRACE(SD_LOG_IO, un, 20285 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20286 20287 /* 20288 * First send a READ_CAPACITY_16 command to the target. 20289 * 20290 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 20291 * Medium Indicator bit is cleared. The address field must be 20292 * zero if the PMI bit is zero. 20293 */ 20294 bzero(&cdb, sizeof (cdb)); 20295 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20296 20297 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 20298 20299 ucmd_buf.uscsi_cdb = (char *)&cdb; 20300 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 20301 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 20302 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 20303 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20304 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20305 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20306 ucmd_buf.uscsi_timeout = 60; 20307 20308 /* 20309 * Read Capacity (16) is a Service Action In command. One 20310 * command byte (0x9E) is overloaded for multiple operations, 20311 * with the second CDB byte specifying the desired operation 20312 */ 20313 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 20314 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 20315 20316 /* 20317 * Fill in allocation length field 20318 */ 20319 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 20320 20321 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20322 UIO_SYSSPACE, path_flag); 20323 20324 switch (status) { 20325 case 0: 20326 /* Return failure if we did not get valid capacity data. */ 20327 if (ucmd_buf.uscsi_resid > 20) { 20328 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20329 "sd_send_scsi_READ_CAPACITY_16 received invalid " 20330 "capacity data"); 20331 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20332 return (EIO); 20333 } 20334 20335 /* 20336 * Read capacity and block size from the READ CAPACITY 16 data. 20337 * This data may be adjusted later due to device specific 20338 * issues. 20339 * 20340 * According to the SCSI spec, the READ CAPACITY 16 20341 * command returns the following: 20342 * 20343 * bytes 0-7: Maximum logical block address available. 20344 * (MSB in byte:0 & LSB in byte:7) 20345 * 20346 * bytes 8-11: Block length in bytes 20347 * (MSB in byte:8 & LSB in byte:11) 20348 * 20349 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 20350 */ 20351 capacity = BE_64(capacity16_buf[0]); 20352 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 20353 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f; 20354 20355 pbsize = lbasize << lbpb_exp; 20356 20357 /* 20358 * Done with capacity16_buf 20359 */ 20360 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20361 20362 /* 20363 * if the reported capacity is set to all 0xf's, then 20364 * this disk is too large. This could only happen with 20365 * a device that supports LBAs larger than 64 bits which 20366 * are not defined by any current T10 standards. 20367 */ 20368 if (capacity == 0xffffffffffffffff) { 20369 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20370 "disk is too large"); 20371 return (EIO); 20372 } 20373 break; /* Success! */ 20374 case EIO: 20375 switch (ucmd_buf.uscsi_status) { 20376 case STATUS_RESERVATION_CONFLICT: 20377 status = EACCES; 20378 break; 20379 case STATUS_CHECK: 20380 /* 20381 * Check condition; look for ASC/ASCQ of 0x04/0x01 20382 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20383 */ 20384 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20385 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20386 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20387 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20388 return (EAGAIN); 20389 } 20390 break; 20391 default: 20392 break; 20393 } 20394 /* FALLTHRU */ 20395 default: 20396 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20397 return (status); 20398 } 20399 20400 /* 20401 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20402 * (2352 and 0 are common) so for these devices always force the value 20403 * to 2048 as required by the ATAPI specs. 20404 */ 20405 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20406 lbasize = 2048; 20407 } 20408 20409 /* 20410 * Get the maximum LBA value from the READ CAPACITY 16 data. 20411 * Here we assume that the Partial Medium Indicator (PMI) bit 20412 * was cleared when issuing the command. This means that the LBA 20413 * returned from the device is the LBA of the last logical block 20414 * on the logical unit. The actual logical block count will be 20415 * this value plus one. 20416 */ 20417 capacity += 1; 20418 20419 /* 20420 * Currently, for removable media, the capacity is saved in terms 20421 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20422 */ 20423 if (un->un_f_has_removable_media) 20424 capacity *= (lbasize / un->un_sys_blocksize); 20425 20426 *capp = capacity; 20427 *lbap = lbasize; 20428 *psp = pbsize; 20429 20430 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20431 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20432 capacity, lbasize, pbsize); 20433 20434 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) { 20435 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20436 "sd_send_scsi_READ_CAPACITY_16 received invalid value " 20437 "capacity %llu lbasize %d pbsize %d", capacity, lbasize); 20438 return (EIO); 20439 } 20440 20441 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20442 return (0); 20443 } 20444 20445 20446 /* 20447 * Function: sd_send_scsi_START_STOP_UNIT 20448 * 20449 * Description: Issue a scsi START STOP UNIT command to the target. 20450 * 20451 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20452 * structure for this target. 20453 * pc_flag - SD_POWER_CONDITION 20454 * SD_START_STOP 20455 * flag - SD_TARGET_START 20456 * SD_TARGET_STOP 20457 * SD_TARGET_EJECT 20458 * SD_TARGET_CLOSE 20459 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20460 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20461 * to use the USCSI "direct" chain and bypass the normal 20462 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20463 * command is issued as part of an error recovery action. 20464 * 20465 * Return Code: 0 - Success 20466 * EIO - IO error 20467 * EACCES - Reservation conflict detected 20468 * ENXIO - Not Ready, medium not present 20469 * errno return code from sd_ssc_send() 20470 * 20471 * Context: Can sleep. 20472 */ 20473 20474 static int 20475 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag, 20476 int path_flag) 20477 { 20478 struct scsi_extended_sense sense_buf; 20479 union scsi_cdb cdb; 20480 struct uscsi_cmd ucmd_buf; 20481 int status; 20482 struct sd_lun *un; 20483 20484 ASSERT(ssc != NULL); 20485 un = ssc->ssc_un; 20486 ASSERT(un != NULL); 20487 ASSERT(!mutex_owned(SD_MUTEX(un))); 20488 20489 SD_TRACE(SD_LOG_IO, un, 20490 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20491 20492 if (un->un_f_check_start_stop && 20493 (pc_flag == SD_START_STOP) && 20494 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20495 (un->un_f_start_stop_supported != TRUE)) { 20496 return (0); 20497 } 20498 20499 /* 20500 * If we are performing an eject operation and 20501 * we receive any command other than SD_TARGET_EJECT 20502 * we should immediately return. 20503 */ 20504 if (flag != SD_TARGET_EJECT) { 20505 mutex_enter(SD_MUTEX(un)); 20506 if (un->un_f_ejecting == TRUE) { 20507 mutex_exit(SD_MUTEX(un)); 20508 return (EAGAIN); 20509 } 20510 mutex_exit(SD_MUTEX(un)); 20511 } 20512 20513 bzero(&cdb, sizeof (cdb)); 20514 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20515 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20516 20517 cdb.scc_cmd = SCMD_START_STOP; 20518 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ? 20519 (uchar_t)(flag << 4) : (uchar_t)flag; 20520 20521 ucmd_buf.uscsi_cdb = (char *)&cdb; 20522 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20523 ucmd_buf.uscsi_bufaddr = NULL; 20524 ucmd_buf.uscsi_buflen = 0; 20525 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20526 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20527 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20528 ucmd_buf.uscsi_timeout = 200; 20529 20530 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20531 UIO_SYSSPACE, path_flag); 20532 20533 switch (status) { 20534 case 0: 20535 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20536 break; /* Success! */ 20537 case EIO: 20538 switch (ucmd_buf.uscsi_status) { 20539 case STATUS_RESERVATION_CONFLICT: 20540 status = EACCES; 20541 break; 20542 case STATUS_CHECK: 20543 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20544 switch (scsi_sense_key( 20545 (uint8_t *)&sense_buf)) { 20546 case KEY_ILLEGAL_REQUEST: 20547 status = ENOTSUP; 20548 break; 20549 case KEY_NOT_READY: 20550 if (scsi_sense_asc( 20551 (uint8_t *)&sense_buf) 20552 == 0x3A) { 20553 status = ENXIO; 20554 } 20555 break; 20556 default: 20557 break; 20558 } 20559 } 20560 break; 20561 default: 20562 break; 20563 } 20564 break; 20565 default: 20566 break; 20567 } 20568 20569 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20570 20571 return (status); 20572 } 20573 20574 20575 /* 20576 * Function: sd_start_stop_unit_callback 20577 * 20578 * Description: timeout(9F) callback to begin recovery process for a 20579 * device that has spun down. 20580 * 20581 * Arguments: arg - pointer to associated softstate struct. 20582 * 20583 * Context: Executes in a timeout(9F) thread context 20584 */ 20585 20586 static void 20587 sd_start_stop_unit_callback(void *arg) 20588 { 20589 struct sd_lun *un = arg; 20590 ASSERT(un != NULL); 20591 ASSERT(!mutex_owned(SD_MUTEX(un))); 20592 20593 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20594 20595 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20596 } 20597 20598 20599 /* 20600 * Function: sd_start_stop_unit_task 20601 * 20602 * Description: Recovery procedure when a drive is spun down. 20603 * 20604 * Arguments: arg - pointer to associated softstate struct. 20605 * 20606 * Context: Executes in a taskq() thread context 20607 */ 20608 20609 static void 20610 sd_start_stop_unit_task(void *arg) 20611 { 20612 struct sd_lun *un = arg; 20613 sd_ssc_t *ssc; 20614 int power_level; 20615 int rval; 20616 20617 ASSERT(un != NULL); 20618 ASSERT(!mutex_owned(SD_MUTEX(un))); 20619 20620 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20621 20622 /* 20623 * Some unformatted drives report not ready error, no need to 20624 * restart if format has been initiated. 20625 */ 20626 mutex_enter(SD_MUTEX(un)); 20627 if (un->un_f_format_in_progress == TRUE) { 20628 mutex_exit(SD_MUTEX(un)); 20629 return; 20630 } 20631 mutex_exit(SD_MUTEX(un)); 20632 20633 ssc = sd_ssc_init(un); 20634 /* 20635 * When a START STOP command is issued from here, it is part of a 20636 * failure recovery operation and must be issued before any other 20637 * commands, including any pending retries. Thus it must be sent 20638 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20639 * succeeds or not, we will start I/O after the attempt. 20640 * If power condition is supported and the current power level 20641 * is capable of performing I/O, we should set the power condition 20642 * to that level. Otherwise, set the power condition to ACTIVE. 20643 */ 20644 if (un->un_f_power_condition_supported) { 20645 mutex_enter(SD_MUTEX(un)); 20646 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level)); 20647 power_level = sd_pwr_pc.ran_perf[un->un_power_level] 20648 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE; 20649 mutex_exit(SD_MUTEX(un)); 20650 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 20651 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY); 20652 } else { 20653 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 20654 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY); 20655 } 20656 20657 if (rval != 0) 20658 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20659 sd_ssc_fini(ssc); 20660 /* 20661 * The above call blocks until the START_STOP_UNIT command completes. 20662 * Now that it has completed, we must re-try the original IO that 20663 * received the NOT READY condition in the first place. There are 20664 * three possible conditions here: 20665 * 20666 * (1) The original IO is on un_retry_bp. 20667 * (2) The original IO is on the regular wait queue, and un_retry_bp 20668 * is NULL. 20669 * (3) The original IO is on the regular wait queue, and un_retry_bp 20670 * points to some other, unrelated bp. 20671 * 20672 * For each case, we must call sd_start_cmds() with un_retry_bp 20673 * as the argument. If un_retry_bp is NULL, this will initiate 20674 * processing of the regular wait queue. If un_retry_bp is not NULL, 20675 * then this will process the bp on un_retry_bp. That may or may not 20676 * be the original IO, but that does not matter: the important thing 20677 * is to keep the IO processing going at this point. 20678 * 20679 * Note: This is a very specific error recovery sequence associated 20680 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20681 * serialize the I/O with completion of the spin-up. 20682 */ 20683 mutex_enter(SD_MUTEX(un)); 20684 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20685 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20686 un, un->un_retry_bp); 20687 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20688 sd_start_cmds(un, un->un_retry_bp); 20689 mutex_exit(SD_MUTEX(un)); 20690 20691 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20692 } 20693 20694 20695 /* 20696 * Function: sd_send_scsi_INQUIRY 20697 * 20698 * Description: Issue the scsi INQUIRY command. 20699 * 20700 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20701 * structure for this target. 20702 * bufaddr 20703 * buflen 20704 * evpd 20705 * page_code 20706 * page_length 20707 * 20708 * Return Code: 0 - Success 20709 * errno return code from sd_ssc_send() 20710 * 20711 * Context: Can sleep. Does not return until command is completed. 20712 */ 20713 20714 static int 20715 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20716 uchar_t evpd, uchar_t page_code, size_t *residp) 20717 { 20718 union scsi_cdb cdb; 20719 struct uscsi_cmd ucmd_buf; 20720 int status; 20721 struct sd_lun *un; 20722 20723 ASSERT(ssc != NULL); 20724 un = ssc->ssc_un; 20725 ASSERT(un != NULL); 20726 ASSERT(!mutex_owned(SD_MUTEX(un))); 20727 ASSERT(bufaddr != NULL); 20728 20729 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20730 20731 bzero(&cdb, sizeof (cdb)); 20732 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20733 bzero(bufaddr, buflen); 20734 20735 cdb.scc_cmd = SCMD_INQUIRY; 20736 cdb.cdb_opaque[1] = evpd; 20737 cdb.cdb_opaque[2] = page_code; 20738 FORMG0COUNT(&cdb, buflen); 20739 20740 ucmd_buf.uscsi_cdb = (char *)&cdb; 20741 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20742 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20743 ucmd_buf.uscsi_buflen = buflen; 20744 ucmd_buf.uscsi_rqbuf = NULL; 20745 ucmd_buf.uscsi_rqlen = 0; 20746 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20747 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20748 20749 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20750 UIO_SYSSPACE, SD_PATH_DIRECT); 20751 20752 /* 20753 * Only handle status == 0, the upper-level caller 20754 * will put different assessment based on the context. 20755 */ 20756 if (status == 0) 20757 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20758 20759 if ((status == 0) && (residp != NULL)) { 20760 *residp = ucmd_buf.uscsi_resid; 20761 } 20762 20763 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20764 20765 return (status); 20766 } 20767 20768 20769 /* 20770 * Function: sd_send_scsi_TEST_UNIT_READY 20771 * 20772 * Description: Issue the scsi TEST UNIT READY command. 20773 * This routine can be told to set the flag USCSI_DIAGNOSE to 20774 * prevent retrying failed commands. Use this when the intent 20775 * is either to check for device readiness, to clear a Unit 20776 * Attention, or to clear any outstanding sense data. 20777 * However under specific conditions the expected behavior 20778 * is for retries to bring a device ready, so use the flag 20779 * with caution. 20780 * 20781 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20782 * structure for this target. 20783 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20784 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20785 * 0: dont check for media present, do retries on cmd. 20786 * 20787 * Return Code: 0 - Success 20788 * EIO - IO error 20789 * EACCES - Reservation conflict detected 20790 * ENXIO - Not Ready, medium not present 20791 * errno return code from sd_ssc_send() 20792 * 20793 * Context: Can sleep. Does not return until command is completed. 20794 */ 20795 20796 static int 20797 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20798 { 20799 struct scsi_extended_sense sense_buf; 20800 union scsi_cdb cdb; 20801 struct uscsi_cmd ucmd_buf; 20802 int status; 20803 struct sd_lun *un; 20804 20805 ASSERT(ssc != NULL); 20806 un = ssc->ssc_un; 20807 ASSERT(un != NULL); 20808 ASSERT(!mutex_owned(SD_MUTEX(un))); 20809 20810 SD_TRACE(SD_LOG_IO, un, 20811 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20812 20813 /* 20814 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20815 * timeouts when they receive a TUR and the queue is not empty. Check 20816 * the configuration flag set during attach (indicating the drive has 20817 * this firmware bug) and un_ncmds_in_transport before issuing the 20818 * TUR. If there are 20819 * pending commands return success, this is a bit arbitrary but is ok 20820 * for non-removables (i.e. the eliteI disks) and non-clustering 20821 * configurations. 20822 */ 20823 if (un->un_f_cfg_tur_check == TRUE) { 20824 mutex_enter(SD_MUTEX(un)); 20825 if (un->un_ncmds_in_transport != 0) { 20826 mutex_exit(SD_MUTEX(un)); 20827 return (0); 20828 } 20829 mutex_exit(SD_MUTEX(un)); 20830 } 20831 20832 bzero(&cdb, sizeof (cdb)); 20833 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20834 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20835 20836 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20837 20838 ucmd_buf.uscsi_cdb = (char *)&cdb; 20839 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20840 ucmd_buf.uscsi_bufaddr = NULL; 20841 ucmd_buf.uscsi_buflen = 0; 20842 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20843 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20844 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20845 20846 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20847 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20848 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20849 } 20850 ucmd_buf.uscsi_timeout = 60; 20851 20852 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20853 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20854 SD_PATH_STANDARD)); 20855 20856 switch (status) { 20857 case 0: 20858 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20859 break; /* Success! */ 20860 case EIO: 20861 switch (ucmd_buf.uscsi_status) { 20862 case STATUS_RESERVATION_CONFLICT: 20863 status = EACCES; 20864 break; 20865 case STATUS_CHECK: 20866 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20867 break; 20868 } 20869 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20870 (scsi_sense_key((uint8_t *)&sense_buf) == 20871 KEY_NOT_READY) && 20872 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20873 status = ENXIO; 20874 } 20875 break; 20876 default: 20877 break; 20878 } 20879 break; 20880 default: 20881 break; 20882 } 20883 20884 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20885 20886 return (status); 20887 } 20888 20889 /* 20890 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20891 * 20892 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20893 * 20894 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20895 * structure for this target. 20896 * 20897 * Return Code: 0 - Success 20898 * EACCES 20899 * ENOTSUP 20900 * errno return code from sd_ssc_send() 20901 * 20902 * Context: Can sleep. Does not return until command is completed. 20903 */ 20904 20905 static int 20906 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20907 uint16_t data_len, uchar_t *data_bufp) 20908 { 20909 struct scsi_extended_sense sense_buf; 20910 union scsi_cdb cdb; 20911 struct uscsi_cmd ucmd_buf; 20912 int status; 20913 int no_caller_buf = FALSE; 20914 struct sd_lun *un; 20915 20916 ASSERT(ssc != NULL); 20917 un = ssc->ssc_un; 20918 ASSERT(un != NULL); 20919 ASSERT(!mutex_owned(SD_MUTEX(un))); 20920 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20921 20922 SD_TRACE(SD_LOG_IO, un, 20923 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20924 20925 bzero(&cdb, sizeof (cdb)); 20926 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20927 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20928 if (data_bufp == NULL) { 20929 /* Allocate a default buf if the caller did not give one */ 20930 ASSERT(data_len == 0); 20931 data_len = MHIOC_RESV_KEY_SIZE; 20932 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20933 no_caller_buf = TRUE; 20934 } 20935 20936 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20937 cdb.cdb_opaque[1] = usr_cmd; 20938 FORMG1COUNT(&cdb, data_len); 20939 20940 ucmd_buf.uscsi_cdb = (char *)&cdb; 20941 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20942 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20943 ucmd_buf.uscsi_buflen = data_len; 20944 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20945 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20946 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20947 ucmd_buf.uscsi_timeout = 60; 20948 20949 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20950 UIO_SYSSPACE, SD_PATH_STANDARD); 20951 20952 switch (status) { 20953 case 0: 20954 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20955 20956 break; /* Success! */ 20957 case EIO: 20958 switch (ucmd_buf.uscsi_status) { 20959 case STATUS_RESERVATION_CONFLICT: 20960 status = EACCES; 20961 break; 20962 case STATUS_CHECK: 20963 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20964 (scsi_sense_key((uint8_t *)&sense_buf) == 20965 KEY_ILLEGAL_REQUEST)) { 20966 status = ENOTSUP; 20967 } 20968 break; 20969 default: 20970 break; 20971 } 20972 break; 20973 default: 20974 break; 20975 } 20976 20977 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20978 20979 if (no_caller_buf == TRUE) { 20980 kmem_free(data_bufp, data_len); 20981 } 20982 20983 return (status); 20984 } 20985 20986 20987 /* 20988 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20989 * 20990 * Description: This routine is the driver entry point for handling CD-ROM 20991 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20992 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20993 * device. 20994 * 20995 * Arguments: ssc - ssc contains un - pointer to soft state struct 20996 * for the target. 20997 * usr_cmd SCSI-3 reservation facility command (one of 20998 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20999 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR) 21000 * usr_bufp - user provided pointer register, reserve descriptor or 21001 * preempt and abort structure (mhioc_register_t, 21002 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 21003 * 21004 * Return Code: 0 - Success 21005 * EACCES 21006 * ENOTSUP 21007 * errno return code from sd_ssc_send() 21008 * 21009 * Context: Can sleep. Does not return until command is completed. 21010 */ 21011 21012 static int 21013 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 21014 uchar_t *usr_bufp) 21015 { 21016 struct scsi_extended_sense sense_buf; 21017 union scsi_cdb cdb; 21018 struct uscsi_cmd ucmd_buf; 21019 int status; 21020 uchar_t data_len = sizeof (sd_prout_t); 21021 sd_prout_t *prp; 21022 struct sd_lun *un; 21023 21024 ASSERT(ssc != NULL); 21025 un = ssc->ssc_un; 21026 ASSERT(un != NULL); 21027 ASSERT(!mutex_owned(SD_MUTEX(un))); 21028 ASSERT(data_len == 24); /* required by scsi spec */ 21029 21030 SD_TRACE(SD_LOG_IO, un, 21031 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 21032 21033 if (usr_bufp == NULL) { 21034 return (EINVAL); 21035 } 21036 21037 bzero(&cdb, sizeof (cdb)); 21038 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21039 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21040 prp = kmem_zalloc(data_len, KM_SLEEP); 21041 21042 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 21043 cdb.cdb_opaque[1] = usr_cmd; 21044 FORMG1COUNT(&cdb, data_len); 21045 21046 ucmd_buf.uscsi_cdb = (char *)&cdb; 21047 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21048 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 21049 ucmd_buf.uscsi_buflen = data_len; 21050 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21051 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21052 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21053 ucmd_buf.uscsi_timeout = 60; 21054 21055 switch (usr_cmd) { 21056 case SD_SCSI3_REGISTER: { 21057 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 21058 21059 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21060 bcopy(ptr->newkey.key, prp->service_key, 21061 MHIOC_RESV_KEY_SIZE); 21062 prp->aptpl = ptr->aptpl; 21063 break; 21064 } 21065 case SD_SCSI3_CLEAR: { 21066 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21067 21068 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21069 break; 21070 } 21071 case SD_SCSI3_RESERVE: 21072 case SD_SCSI3_RELEASE: { 21073 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21074 21075 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21076 prp->scope_address = BE_32(ptr->scope_specific_addr); 21077 cdb.cdb_opaque[2] = ptr->type; 21078 break; 21079 } 21080 case SD_SCSI3_PREEMPTANDABORT: { 21081 mhioc_preemptandabort_t *ptr = 21082 (mhioc_preemptandabort_t *)usr_bufp; 21083 21084 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21085 bcopy(ptr->victim_key.key, prp->service_key, 21086 MHIOC_RESV_KEY_SIZE); 21087 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 21088 cdb.cdb_opaque[2] = ptr->resvdesc.type; 21089 ucmd_buf.uscsi_flags |= USCSI_HEAD; 21090 break; 21091 } 21092 case SD_SCSI3_REGISTERANDIGNOREKEY: 21093 { 21094 mhioc_registerandignorekey_t *ptr; 21095 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 21096 bcopy(ptr->newkey.key, 21097 prp->service_key, MHIOC_RESV_KEY_SIZE); 21098 prp->aptpl = ptr->aptpl; 21099 break; 21100 } 21101 default: 21102 ASSERT(FALSE); 21103 break; 21104 } 21105 21106 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21107 UIO_SYSSPACE, SD_PATH_STANDARD); 21108 21109 switch (status) { 21110 case 0: 21111 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21112 break; /* Success! */ 21113 case EIO: 21114 switch (ucmd_buf.uscsi_status) { 21115 case STATUS_RESERVATION_CONFLICT: 21116 status = EACCES; 21117 break; 21118 case STATUS_CHECK: 21119 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21120 (scsi_sense_key((uint8_t *)&sense_buf) == 21121 KEY_ILLEGAL_REQUEST)) { 21122 status = ENOTSUP; 21123 } 21124 break; 21125 default: 21126 break; 21127 } 21128 break; 21129 default: 21130 break; 21131 } 21132 21133 kmem_free(prp, data_len); 21134 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 21135 return (status); 21136 } 21137 21138 21139 /* 21140 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 21141 * 21142 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 21143 * 21144 * Arguments: un - pointer to the target's soft state struct 21145 * dkc - pointer to the callback structure 21146 * 21147 * Return Code: 0 - success 21148 * errno-type error code 21149 * 21150 * Context: kernel thread context only. 21151 * 21152 * _______________________________________________________________ 21153 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 21154 * |FLUSH_VOLATILE| | operation | 21155 * |______________|______________|_________________________________| 21156 * | 0 | NULL | Synchronous flush on both | 21157 * | | | volatile and non-volatile cache | 21158 * |______________|______________|_________________________________| 21159 * | 1 | NULL | Synchronous flush on volatile | 21160 * | | | cache; disk drivers may suppress| 21161 * | | | flush if disk table indicates | 21162 * | | | non-volatile cache | 21163 * |______________|______________|_________________________________| 21164 * | 0 | !NULL | Asynchronous flush on both | 21165 * | | | volatile and non-volatile cache;| 21166 * |______________|______________|_________________________________| 21167 * | 1 | !NULL | Asynchronous flush on volatile | 21168 * | | | cache; disk drivers may suppress| 21169 * | | | flush if disk table indicates | 21170 * | | | non-volatile cache | 21171 * |______________|______________|_________________________________| 21172 * 21173 */ 21174 21175 static int 21176 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 21177 { 21178 struct sd_uscsi_info *uip; 21179 struct uscsi_cmd *uscmd; 21180 union scsi_cdb *cdb; 21181 struct buf *bp; 21182 int rval = 0; 21183 int is_async; 21184 21185 SD_TRACE(SD_LOG_IO, un, 21186 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 21187 21188 ASSERT(un != NULL); 21189 ASSERT(!mutex_owned(SD_MUTEX(un))); 21190 21191 if (dkc == NULL || dkc->dkc_callback == NULL) { 21192 is_async = FALSE; 21193 } else { 21194 is_async = TRUE; 21195 } 21196 21197 mutex_enter(SD_MUTEX(un)); 21198 /* check whether cache flush should be suppressed */ 21199 if (un->un_f_suppress_cache_flush == TRUE) { 21200 mutex_exit(SD_MUTEX(un)); 21201 /* 21202 * suppress the cache flush if the device is told to do 21203 * so by sd.conf or disk table 21204 */ 21205 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 21206 skip the cache flush since suppress_cache_flush is %d!\n", 21207 un->un_f_suppress_cache_flush); 21208 21209 if (is_async == TRUE) { 21210 /* invoke callback for asynchronous flush */ 21211 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 21212 } 21213 return (rval); 21214 } 21215 mutex_exit(SD_MUTEX(un)); 21216 21217 /* 21218 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 21219 * set properly 21220 */ 21221 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 21222 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 21223 21224 mutex_enter(SD_MUTEX(un)); 21225 if (dkc != NULL && un->un_f_sync_nv_supported && 21226 (dkc->dkc_flag & FLUSH_VOLATILE)) { 21227 /* 21228 * if the device supports SYNC_NV bit, turn on 21229 * the SYNC_NV bit to only flush volatile cache 21230 */ 21231 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 21232 } 21233 mutex_exit(SD_MUTEX(un)); 21234 21235 /* 21236 * First get some memory for the uscsi_cmd struct and cdb 21237 * and initialize for SYNCHRONIZE_CACHE cmd. 21238 */ 21239 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21240 uscmd->uscsi_cdblen = CDB_GROUP1; 21241 uscmd->uscsi_cdb = (caddr_t)cdb; 21242 uscmd->uscsi_bufaddr = NULL; 21243 uscmd->uscsi_buflen = 0; 21244 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21245 uscmd->uscsi_rqlen = SENSE_LENGTH; 21246 uscmd->uscsi_rqresid = SENSE_LENGTH; 21247 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 21248 uscmd->uscsi_timeout = sd_io_time; 21249 21250 /* 21251 * Allocate an sd_uscsi_info struct and fill it with the info 21252 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 21253 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 21254 * since we allocate the buf here in this function, we do not 21255 * need to preserve the prior contents of b_private. 21256 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 21257 */ 21258 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 21259 uip->ui_flags = SD_PATH_DIRECT; 21260 uip->ui_cmdp = uscmd; 21261 21262 bp = getrbuf(KM_SLEEP); 21263 bp->b_private = uip; 21264 21265 /* 21266 * Setup buffer to carry uscsi request. 21267 */ 21268 bp->b_flags = B_BUSY; 21269 bp->b_bcount = 0; 21270 bp->b_blkno = 0; 21271 21272 if (is_async == TRUE) { 21273 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 21274 uip->ui_dkc = *dkc; 21275 } 21276 21277 bp->b_edev = SD_GET_DEV(un); 21278 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 21279 21280 /* 21281 * Unset un_f_sync_cache_required flag 21282 */ 21283 mutex_enter(SD_MUTEX(un)); 21284 un->un_f_sync_cache_required = FALSE; 21285 mutex_exit(SD_MUTEX(un)); 21286 21287 (void) sd_uscsi_strategy(bp); 21288 21289 /* 21290 * If synchronous request, wait for completion 21291 * If async just return and let b_iodone callback 21292 * cleanup. 21293 * NOTE: On return, u_ncmds_in_driver will be decremented, 21294 * but it was also incremented in sd_uscsi_strategy(), so 21295 * we should be ok. 21296 */ 21297 if (is_async == FALSE) { 21298 (void) biowait(bp); 21299 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 21300 } 21301 21302 return (rval); 21303 } 21304 21305 21306 static int 21307 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 21308 { 21309 struct sd_uscsi_info *uip; 21310 struct uscsi_cmd *uscmd; 21311 uint8_t *sense_buf; 21312 struct sd_lun *un; 21313 int status; 21314 union scsi_cdb *cdb; 21315 21316 uip = (struct sd_uscsi_info *)(bp->b_private); 21317 ASSERT(uip != NULL); 21318 21319 uscmd = uip->ui_cmdp; 21320 ASSERT(uscmd != NULL); 21321 21322 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 21323 ASSERT(sense_buf != NULL); 21324 21325 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 21326 ASSERT(un != NULL); 21327 21328 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 21329 21330 status = geterror(bp); 21331 switch (status) { 21332 case 0: 21333 break; /* Success! */ 21334 case EIO: 21335 switch (uscmd->uscsi_status) { 21336 case STATUS_RESERVATION_CONFLICT: 21337 /* Ignore reservation conflict */ 21338 status = 0; 21339 goto done; 21340 21341 case STATUS_CHECK: 21342 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 21343 (scsi_sense_key(sense_buf) == 21344 KEY_ILLEGAL_REQUEST)) { 21345 /* Ignore Illegal Request error */ 21346 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 21347 mutex_enter(SD_MUTEX(un)); 21348 un->un_f_sync_nv_supported = FALSE; 21349 mutex_exit(SD_MUTEX(un)); 21350 status = 0; 21351 SD_TRACE(SD_LOG_IO, un, 21352 "un_f_sync_nv_supported \ 21353 is set to false.\n"); 21354 goto done; 21355 } 21356 21357 mutex_enter(SD_MUTEX(un)); 21358 un->un_f_sync_cache_supported = FALSE; 21359 mutex_exit(SD_MUTEX(un)); 21360 SD_TRACE(SD_LOG_IO, un, 21361 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 21362 un_f_sync_cache_supported set to false \ 21363 with asc = %x, ascq = %x\n", 21364 scsi_sense_asc(sense_buf), 21365 scsi_sense_ascq(sense_buf)); 21366 status = ENOTSUP; 21367 goto done; 21368 } 21369 break; 21370 default: 21371 break; 21372 } 21373 /* FALLTHRU */ 21374 default: 21375 /* 21376 * Turn on the un_f_sync_cache_required flag 21377 * since the SYNC CACHE command failed 21378 */ 21379 mutex_enter(SD_MUTEX(un)); 21380 un->un_f_sync_cache_required = TRUE; 21381 mutex_exit(SD_MUTEX(un)); 21382 21383 /* 21384 * Don't log an error message if this device 21385 * has removable media. 21386 */ 21387 if (!un->un_f_has_removable_media) { 21388 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 21389 "SYNCHRONIZE CACHE command failed (%d)\n", status); 21390 } 21391 break; 21392 } 21393 21394 done: 21395 if (uip->ui_dkc.dkc_callback != NULL) { 21396 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 21397 } 21398 21399 ASSERT((bp->b_flags & B_REMAPPED) == 0); 21400 freerbuf(bp); 21401 kmem_free(uip, sizeof (struct sd_uscsi_info)); 21402 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 21403 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 21404 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 21405 21406 return (status); 21407 } 21408 21409 21410 /* 21411 * Function: sd_send_scsi_GET_CONFIGURATION 21412 * 21413 * Description: Issues the get configuration command to the device. 21414 * Called from sd_check_for_writable_cd & sd_get_media_info 21415 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 21416 * Arguments: ssc 21417 * ucmdbuf 21418 * rqbuf 21419 * rqbuflen 21420 * bufaddr 21421 * buflen 21422 * path_flag 21423 * 21424 * Return Code: 0 - Success 21425 * errno return code from sd_ssc_send() 21426 * 21427 * Context: Can sleep. Does not return until command is completed. 21428 * 21429 */ 21430 21431 static int 21432 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21433 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21434 int path_flag) 21435 { 21436 char cdb[CDB_GROUP1]; 21437 int status; 21438 struct sd_lun *un; 21439 21440 ASSERT(ssc != NULL); 21441 un = ssc->ssc_un; 21442 ASSERT(un != NULL); 21443 ASSERT(!mutex_owned(SD_MUTEX(un))); 21444 ASSERT(bufaddr != NULL); 21445 ASSERT(ucmdbuf != NULL); 21446 ASSERT(rqbuf != NULL); 21447 21448 SD_TRACE(SD_LOG_IO, un, 21449 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21450 21451 bzero(cdb, sizeof (cdb)); 21452 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21453 bzero(rqbuf, rqbuflen); 21454 bzero(bufaddr, buflen); 21455 21456 /* 21457 * Set up cdb field for the get configuration command. 21458 */ 21459 cdb[0] = SCMD_GET_CONFIGURATION; 21460 cdb[1] = 0x02; /* Requested Type */ 21461 cdb[8] = SD_PROFILE_HEADER_LEN; 21462 ucmdbuf->uscsi_cdb = cdb; 21463 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21464 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21465 ucmdbuf->uscsi_buflen = buflen; 21466 ucmdbuf->uscsi_timeout = sd_io_time; 21467 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21468 ucmdbuf->uscsi_rqlen = rqbuflen; 21469 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21470 21471 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21472 UIO_SYSSPACE, path_flag); 21473 21474 switch (status) { 21475 case 0: 21476 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21477 break; /* Success! */ 21478 case EIO: 21479 switch (ucmdbuf->uscsi_status) { 21480 case STATUS_RESERVATION_CONFLICT: 21481 status = EACCES; 21482 break; 21483 default: 21484 break; 21485 } 21486 break; 21487 default: 21488 break; 21489 } 21490 21491 if (status == 0) { 21492 SD_DUMP_MEMORY(un, SD_LOG_IO, 21493 "sd_send_scsi_GET_CONFIGURATION: data", 21494 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21495 } 21496 21497 SD_TRACE(SD_LOG_IO, un, 21498 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21499 21500 return (status); 21501 } 21502 21503 /* 21504 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21505 * 21506 * Description: Issues the get configuration command to the device to 21507 * retrieve a specific feature. Called from 21508 * sd_check_for_writable_cd & sd_set_mmc_caps. 21509 * Arguments: ssc 21510 * ucmdbuf 21511 * rqbuf 21512 * rqbuflen 21513 * bufaddr 21514 * buflen 21515 * feature 21516 * 21517 * Return Code: 0 - Success 21518 * errno return code from sd_ssc_send() 21519 * 21520 * Context: Can sleep. Does not return until command is completed. 21521 * 21522 */ 21523 static int 21524 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21525 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21526 char feature, int path_flag) 21527 { 21528 char cdb[CDB_GROUP1]; 21529 int status; 21530 struct sd_lun *un; 21531 21532 ASSERT(ssc != NULL); 21533 un = ssc->ssc_un; 21534 ASSERT(un != NULL); 21535 ASSERT(!mutex_owned(SD_MUTEX(un))); 21536 ASSERT(bufaddr != NULL); 21537 ASSERT(ucmdbuf != NULL); 21538 ASSERT(rqbuf != NULL); 21539 21540 SD_TRACE(SD_LOG_IO, un, 21541 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21542 21543 bzero(cdb, sizeof (cdb)); 21544 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21545 bzero(rqbuf, rqbuflen); 21546 bzero(bufaddr, buflen); 21547 21548 /* 21549 * Set up cdb field for the get configuration command. 21550 */ 21551 cdb[0] = SCMD_GET_CONFIGURATION; 21552 cdb[1] = 0x02; /* Requested Type */ 21553 cdb[3] = feature; 21554 cdb[8] = buflen; 21555 ucmdbuf->uscsi_cdb = cdb; 21556 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21557 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21558 ucmdbuf->uscsi_buflen = buflen; 21559 ucmdbuf->uscsi_timeout = sd_io_time; 21560 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21561 ucmdbuf->uscsi_rqlen = rqbuflen; 21562 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21563 21564 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21565 UIO_SYSSPACE, path_flag); 21566 21567 switch (status) { 21568 case 0: 21569 21570 break; /* Success! */ 21571 case EIO: 21572 switch (ucmdbuf->uscsi_status) { 21573 case STATUS_RESERVATION_CONFLICT: 21574 status = EACCES; 21575 break; 21576 default: 21577 break; 21578 } 21579 break; 21580 default: 21581 break; 21582 } 21583 21584 if (status == 0) { 21585 SD_DUMP_MEMORY(un, SD_LOG_IO, 21586 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21587 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21588 } 21589 21590 SD_TRACE(SD_LOG_IO, un, 21591 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21592 21593 return (status); 21594 } 21595 21596 21597 /* 21598 * Function: sd_send_scsi_MODE_SENSE 21599 * 21600 * Description: Utility function for issuing a scsi MODE SENSE command. 21601 * Note: This routine uses a consistent implementation for Group0, 21602 * Group1, and Group2 commands across all platforms. ATAPI devices 21603 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21604 * 21605 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21606 * structure for this target. 21607 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21608 * CDB_GROUP[1|2] (10 byte). 21609 * bufaddr - buffer for page data retrieved from the target. 21610 * buflen - size of page to be retrieved. 21611 * page_code - page code of data to be retrieved from the target. 21612 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21613 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21614 * to use the USCSI "direct" chain and bypass the normal 21615 * command waitq. 21616 * 21617 * Return Code: 0 - Success 21618 * errno return code from sd_ssc_send() 21619 * 21620 * Context: Can sleep. Does not return until command is completed. 21621 */ 21622 21623 static int 21624 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21625 size_t buflen, uchar_t page_code, int path_flag) 21626 { 21627 struct scsi_extended_sense sense_buf; 21628 union scsi_cdb cdb; 21629 struct uscsi_cmd ucmd_buf; 21630 int status; 21631 int headlen; 21632 struct sd_lun *un; 21633 21634 ASSERT(ssc != NULL); 21635 un = ssc->ssc_un; 21636 ASSERT(un != NULL); 21637 ASSERT(!mutex_owned(SD_MUTEX(un))); 21638 ASSERT(bufaddr != NULL); 21639 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21640 (cdbsize == CDB_GROUP2)); 21641 21642 SD_TRACE(SD_LOG_IO, un, 21643 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21644 21645 bzero(&cdb, sizeof (cdb)); 21646 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21647 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21648 bzero(bufaddr, buflen); 21649 21650 if (cdbsize == CDB_GROUP0) { 21651 cdb.scc_cmd = SCMD_MODE_SENSE; 21652 cdb.cdb_opaque[2] = page_code; 21653 FORMG0COUNT(&cdb, buflen); 21654 headlen = MODE_HEADER_LENGTH; 21655 } else { 21656 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21657 cdb.cdb_opaque[2] = page_code; 21658 FORMG1COUNT(&cdb, buflen); 21659 headlen = MODE_HEADER_LENGTH_GRP2; 21660 } 21661 21662 ASSERT(headlen <= buflen); 21663 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21664 21665 ucmd_buf.uscsi_cdb = (char *)&cdb; 21666 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21667 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21668 ucmd_buf.uscsi_buflen = buflen; 21669 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21670 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21671 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21672 ucmd_buf.uscsi_timeout = 60; 21673 21674 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21675 UIO_SYSSPACE, path_flag); 21676 21677 switch (status) { 21678 case 0: 21679 /* 21680 * sr_check_wp() uses 0x3f page code and check the header of 21681 * mode page to determine if target device is write-protected. 21682 * But some USB devices return 0 bytes for 0x3f page code. For 21683 * this case, make sure that mode page header is returned at 21684 * least. 21685 */ 21686 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21687 status = EIO; 21688 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21689 "mode page header is not returned"); 21690 } 21691 break; /* Success! */ 21692 case EIO: 21693 switch (ucmd_buf.uscsi_status) { 21694 case STATUS_RESERVATION_CONFLICT: 21695 status = EACCES; 21696 break; 21697 default: 21698 break; 21699 } 21700 break; 21701 default: 21702 break; 21703 } 21704 21705 if (status == 0) { 21706 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21707 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21708 } 21709 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21710 21711 return (status); 21712 } 21713 21714 21715 /* 21716 * Function: sd_send_scsi_MODE_SELECT 21717 * 21718 * Description: Utility function for issuing a scsi MODE SELECT command. 21719 * Note: This routine uses a consistent implementation for Group0, 21720 * Group1, and Group2 commands across all platforms. ATAPI devices 21721 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21722 * 21723 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21724 * structure for this target. 21725 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21726 * CDB_GROUP[1|2] (10 byte). 21727 * bufaddr - buffer for page data retrieved from the target. 21728 * buflen - size of page to be retrieved. 21729 * save_page - boolean to determin if SP bit should be set. 21730 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21731 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21732 * to use the USCSI "direct" chain and bypass the normal 21733 * command waitq. 21734 * 21735 * Return Code: 0 - Success 21736 * errno return code from sd_ssc_send() 21737 * 21738 * Context: Can sleep. Does not return until command is completed. 21739 */ 21740 21741 static int 21742 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21743 size_t buflen, uchar_t save_page, int path_flag) 21744 { 21745 struct scsi_extended_sense sense_buf; 21746 union scsi_cdb cdb; 21747 struct uscsi_cmd ucmd_buf; 21748 int status; 21749 struct sd_lun *un; 21750 21751 ASSERT(ssc != NULL); 21752 un = ssc->ssc_un; 21753 ASSERT(un != NULL); 21754 ASSERT(!mutex_owned(SD_MUTEX(un))); 21755 ASSERT(bufaddr != NULL); 21756 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21757 (cdbsize == CDB_GROUP2)); 21758 21759 SD_TRACE(SD_LOG_IO, un, 21760 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21761 21762 bzero(&cdb, sizeof (cdb)); 21763 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21764 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21765 21766 /* Set the PF bit for many third party drives */ 21767 cdb.cdb_opaque[1] = 0x10; 21768 21769 /* Set the savepage(SP) bit if given */ 21770 if (save_page == SD_SAVE_PAGE) { 21771 cdb.cdb_opaque[1] |= 0x01; 21772 } 21773 21774 if (cdbsize == CDB_GROUP0) { 21775 cdb.scc_cmd = SCMD_MODE_SELECT; 21776 FORMG0COUNT(&cdb, buflen); 21777 } else { 21778 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21779 FORMG1COUNT(&cdb, buflen); 21780 } 21781 21782 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21783 21784 ucmd_buf.uscsi_cdb = (char *)&cdb; 21785 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21786 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21787 ucmd_buf.uscsi_buflen = buflen; 21788 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21789 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21790 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21791 ucmd_buf.uscsi_timeout = 60; 21792 21793 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21794 UIO_SYSSPACE, path_flag); 21795 21796 switch (status) { 21797 case 0: 21798 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21799 break; /* Success! */ 21800 case EIO: 21801 switch (ucmd_buf.uscsi_status) { 21802 case STATUS_RESERVATION_CONFLICT: 21803 status = EACCES; 21804 break; 21805 default: 21806 break; 21807 } 21808 break; 21809 default: 21810 break; 21811 } 21812 21813 if (status == 0) { 21814 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21815 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21816 } 21817 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21818 21819 return (status); 21820 } 21821 21822 21823 /* 21824 * Function: sd_send_scsi_RDWR 21825 * 21826 * Description: Issue a scsi READ or WRITE command with the given parameters. 21827 * 21828 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21829 * structure for this target. 21830 * cmd: SCMD_READ or SCMD_WRITE 21831 * bufaddr: Address of caller's buffer to receive the RDWR data 21832 * buflen: Length of caller's buffer receive the RDWR data. 21833 * start_block: Block number for the start of the RDWR operation. 21834 * (Assumes target-native block size.) 21835 * residp: Pointer to variable to receive the redisual of the 21836 * RDWR operation (may be NULL of no residual requested). 21837 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21838 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21839 * to use the USCSI "direct" chain and bypass the normal 21840 * command waitq. 21841 * 21842 * Return Code: 0 - Success 21843 * errno return code from sd_ssc_send() 21844 * 21845 * Context: Can sleep. Does not return until command is completed. 21846 */ 21847 21848 static int 21849 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21850 size_t buflen, daddr_t start_block, int path_flag) 21851 { 21852 struct scsi_extended_sense sense_buf; 21853 union scsi_cdb cdb; 21854 struct uscsi_cmd ucmd_buf; 21855 uint32_t block_count; 21856 int status; 21857 int cdbsize; 21858 uchar_t flag; 21859 struct sd_lun *un; 21860 21861 ASSERT(ssc != NULL); 21862 un = ssc->ssc_un; 21863 ASSERT(un != NULL); 21864 ASSERT(!mutex_owned(SD_MUTEX(un))); 21865 ASSERT(bufaddr != NULL); 21866 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21867 21868 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21869 21870 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21871 return (EINVAL); 21872 } 21873 21874 mutex_enter(SD_MUTEX(un)); 21875 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21876 mutex_exit(SD_MUTEX(un)); 21877 21878 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21879 21880 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21881 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21882 bufaddr, buflen, start_block, block_count); 21883 21884 bzero(&cdb, sizeof (cdb)); 21885 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21886 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21887 21888 /* Compute CDB size to use */ 21889 if (start_block > 0xffffffff) 21890 cdbsize = CDB_GROUP4; 21891 else if ((start_block & 0xFFE00000) || 21892 (un->un_f_cfg_is_atapi == TRUE)) 21893 cdbsize = CDB_GROUP1; 21894 else 21895 cdbsize = CDB_GROUP0; 21896 21897 switch (cdbsize) { 21898 case CDB_GROUP0: /* 6-byte CDBs */ 21899 cdb.scc_cmd = cmd; 21900 FORMG0ADDR(&cdb, start_block); 21901 FORMG0COUNT(&cdb, block_count); 21902 break; 21903 case CDB_GROUP1: /* 10-byte CDBs */ 21904 cdb.scc_cmd = cmd | SCMD_GROUP1; 21905 FORMG1ADDR(&cdb, start_block); 21906 FORMG1COUNT(&cdb, block_count); 21907 break; 21908 case CDB_GROUP4: /* 16-byte CDBs */ 21909 cdb.scc_cmd = cmd | SCMD_GROUP4; 21910 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21911 FORMG4COUNT(&cdb, block_count); 21912 break; 21913 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21914 default: 21915 /* All others reserved */ 21916 return (EINVAL); 21917 } 21918 21919 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21920 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21921 21922 ucmd_buf.uscsi_cdb = (char *)&cdb; 21923 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21924 ucmd_buf.uscsi_bufaddr = bufaddr; 21925 ucmd_buf.uscsi_buflen = buflen; 21926 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21927 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21928 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21929 ucmd_buf.uscsi_timeout = 60; 21930 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21931 UIO_SYSSPACE, path_flag); 21932 21933 switch (status) { 21934 case 0: 21935 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21936 break; /* Success! */ 21937 case EIO: 21938 switch (ucmd_buf.uscsi_status) { 21939 case STATUS_RESERVATION_CONFLICT: 21940 status = EACCES; 21941 break; 21942 default: 21943 break; 21944 } 21945 break; 21946 default: 21947 break; 21948 } 21949 21950 if (status == 0) { 21951 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21952 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21953 } 21954 21955 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21956 21957 return (status); 21958 } 21959 21960 21961 /* 21962 * Function: sd_send_scsi_LOG_SENSE 21963 * 21964 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21965 * 21966 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21967 * structure for this target. 21968 * 21969 * Return Code: 0 - Success 21970 * errno return code from sd_ssc_send() 21971 * 21972 * Context: Can sleep. Does not return until command is completed. 21973 */ 21974 21975 static int 21976 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21977 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, int path_flag) 21978 { 21979 struct scsi_extended_sense sense_buf; 21980 union scsi_cdb cdb; 21981 struct uscsi_cmd ucmd_buf; 21982 int status; 21983 struct sd_lun *un; 21984 21985 ASSERT(ssc != NULL); 21986 un = ssc->ssc_un; 21987 ASSERT(un != NULL); 21988 ASSERT(!mutex_owned(SD_MUTEX(un))); 21989 21990 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21991 21992 bzero(&cdb, sizeof (cdb)); 21993 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21994 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21995 21996 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21997 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21998 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21999 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 22000 FORMG1COUNT(&cdb, buflen); 22001 22002 ucmd_buf.uscsi_cdb = (char *)&cdb; 22003 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22004 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22005 ucmd_buf.uscsi_buflen = buflen; 22006 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22007 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22008 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 22009 ucmd_buf.uscsi_timeout = 60; 22010 22011 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22012 UIO_SYSSPACE, path_flag); 22013 22014 switch (status) { 22015 case 0: 22016 break; 22017 case EIO: 22018 switch (ucmd_buf.uscsi_status) { 22019 case STATUS_RESERVATION_CONFLICT: 22020 status = EACCES; 22021 break; 22022 case STATUS_CHECK: 22023 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 22024 (scsi_sense_key((uint8_t *)&sense_buf) == 22025 KEY_ILLEGAL_REQUEST) && 22026 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 22027 /* 22028 * ASC 0x24: INVALID FIELD IN CDB 22029 */ 22030 switch (page_code) { 22031 case START_STOP_CYCLE_PAGE: 22032 /* 22033 * The start stop cycle counter is 22034 * implemented as page 0x31 in earlier 22035 * generation disks. In new generation 22036 * disks the start stop cycle counter is 22037 * implemented as page 0xE. To properly 22038 * handle this case if an attempt for 22039 * log page 0xE is made and fails we 22040 * will try again using page 0x31. 22041 * 22042 * Network storage BU committed to 22043 * maintain the page 0x31 for this 22044 * purpose and will not have any other 22045 * page implemented with page code 0x31 22046 * until all disks transition to the 22047 * standard page. 22048 */ 22049 mutex_enter(SD_MUTEX(un)); 22050 un->un_start_stop_cycle_page = 22051 START_STOP_CYCLE_VU_PAGE; 22052 cdb.cdb_opaque[2] = 22053 (char)(page_control << 6) | 22054 un->un_start_stop_cycle_page; 22055 mutex_exit(SD_MUTEX(un)); 22056 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22057 status = sd_ssc_send( 22058 ssc, &ucmd_buf, FKIOCTL, 22059 UIO_SYSSPACE, path_flag); 22060 22061 break; 22062 case TEMPERATURE_PAGE: 22063 status = ENOTTY; 22064 break; 22065 default: 22066 break; 22067 } 22068 } 22069 break; 22070 default: 22071 break; 22072 } 22073 break; 22074 default: 22075 break; 22076 } 22077 22078 if (status == 0) { 22079 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22080 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 22081 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22082 } 22083 22084 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 22085 22086 return (status); 22087 } 22088 22089 22090 /* 22091 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 22092 * 22093 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command. 22094 * 22095 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22096 * structure for this target. 22097 * bufaddr 22098 * buflen 22099 * class_req 22100 * 22101 * Return Code: 0 - Success 22102 * errno return code from sd_ssc_send() 22103 * 22104 * Context: Can sleep. Does not return until command is completed. 22105 */ 22106 22107 static int 22108 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr, 22109 size_t buflen, uchar_t class_req) 22110 { 22111 union scsi_cdb cdb; 22112 struct uscsi_cmd ucmd_buf; 22113 int status; 22114 struct sd_lun *un; 22115 22116 ASSERT(ssc != NULL); 22117 un = ssc->ssc_un; 22118 ASSERT(un != NULL); 22119 ASSERT(!mutex_owned(SD_MUTEX(un))); 22120 ASSERT(bufaddr != NULL); 22121 22122 SD_TRACE(SD_LOG_IO, un, 22123 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un); 22124 22125 bzero(&cdb, sizeof (cdb)); 22126 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22127 bzero(bufaddr, buflen); 22128 22129 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION; 22130 cdb.cdb_opaque[1] = 1; /* polled */ 22131 cdb.cdb_opaque[4] = class_req; 22132 FORMG1COUNT(&cdb, buflen); 22133 22134 ucmd_buf.uscsi_cdb = (char *)&cdb; 22135 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22136 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22137 ucmd_buf.uscsi_buflen = buflen; 22138 ucmd_buf.uscsi_rqbuf = NULL; 22139 ucmd_buf.uscsi_rqlen = 0; 22140 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 22141 ucmd_buf.uscsi_timeout = 60; 22142 22143 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22144 UIO_SYSSPACE, SD_PATH_DIRECT); 22145 22146 /* 22147 * Only handle status == 0, the upper-level caller 22148 * will put different assessment based on the context. 22149 */ 22150 if (status == 0) { 22151 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22152 22153 if (ucmd_buf.uscsi_resid != 0) { 22154 status = EIO; 22155 } 22156 } 22157 22158 SD_TRACE(SD_LOG_IO, un, 22159 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n"); 22160 22161 return (status); 22162 } 22163 22164 22165 static boolean_t 22166 sd_gesn_media_data_valid(uchar_t *data) 22167 { 22168 uint16_t len; 22169 22170 len = (data[1] << 8) | data[0]; 22171 return ((len >= 6) && 22172 ((data[2] & SD_GESN_HEADER_NEA) == 0) && 22173 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) && 22174 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0)); 22175 } 22176 22177 22178 /* 22179 * Function: sdioctl 22180 * 22181 * Description: Driver's ioctl(9e) entry point function. 22182 * 22183 * Arguments: dev - device number 22184 * cmd - ioctl operation to be performed 22185 * arg - user argument, contains data to be set or reference 22186 * parameter for get 22187 * flag - bit flag, indicating open settings, 32/64 bit type 22188 * cred_p - user credential pointer 22189 * rval_p - calling process return value (OPT) 22190 * 22191 * Return Code: EINVAL 22192 * ENOTTY 22193 * ENXIO 22194 * EIO 22195 * EFAULT 22196 * ENOTSUP 22197 * EPERM 22198 * 22199 * Context: Called from the device switch at normal priority. 22200 */ 22201 22202 static int 22203 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 22204 { 22205 struct sd_lun *un = NULL; 22206 int err = 0; 22207 int i = 0; 22208 cred_t *cr; 22209 int tmprval = EINVAL; 22210 boolean_t is_valid; 22211 sd_ssc_t *ssc; 22212 22213 /* 22214 * All device accesses go thru sdstrategy where we check on suspend 22215 * status 22216 */ 22217 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22218 return (ENXIO); 22219 } 22220 22221 ASSERT(!mutex_owned(SD_MUTEX(un))); 22222 22223 /* Initialize sd_ssc_t for internal uscsi commands */ 22224 ssc = sd_ssc_init(un); 22225 22226 is_valid = SD_IS_VALID_LABEL(un); 22227 22228 /* 22229 * Moved this wait from sd_uscsi_strategy to here for 22230 * reasons of deadlock prevention. Internal driver commands, 22231 * specifically those to change a devices power level, result 22232 * in a call to sd_uscsi_strategy. 22233 */ 22234 mutex_enter(SD_MUTEX(un)); 22235 while ((un->un_state == SD_STATE_SUSPENDED) || 22236 (un->un_state == SD_STATE_PM_CHANGING)) { 22237 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 22238 } 22239 /* 22240 * Twiddling the counter here protects commands from now 22241 * through to the top of sd_uscsi_strategy. Without the 22242 * counter inc. a power down, for example, could get in 22243 * after the above check for state is made and before 22244 * execution gets to the top of sd_uscsi_strategy. 22245 * That would cause problems. 22246 */ 22247 un->un_ncmds_in_driver++; 22248 22249 if (!is_valid && 22250 (flag & (FNDELAY | FNONBLOCK))) { 22251 switch (cmd) { 22252 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 22253 case DKIOCGVTOC: 22254 case DKIOCGEXTVTOC: 22255 case DKIOCGAPART: 22256 case DKIOCPARTINFO: 22257 case DKIOCEXTPARTINFO: 22258 case DKIOCSGEOM: 22259 case DKIOCSAPART: 22260 case DKIOCGETEFI: 22261 case DKIOCPARTITION: 22262 case DKIOCSVTOC: 22263 case DKIOCSEXTVTOC: 22264 case DKIOCSETEFI: 22265 case DKIOCGMBOOT: 22266 case DKIOCSMBOOT: 22267 case DKIOCG_PHYGEOM: 22268 case DKIOCG_VIRTGEOM: 22269 #if defined(__i386) || defined(__amd64) 22270 case DKIOCSETEXTPART: 22271 #endif 22272 /* let cmlb handle it */ 22273 goto skip_ready_valid; 22274 22275 case CDROMPAUSE: 22276 case CDROMRESUME: 22277 case CDROMPLAYMSF: 22278 case CDROMPLAYTRKIND: 22279 case CDROMREADTOCHDR: 22280 case CDROMREADTOCENTRY: 22281 case CDROMSTOP: 22282 case CDROMSTART: 22283 case CDROMVOLCTRL: 22284 case CDROMSUBCHNL: 22285 case CDROMREADMODE2: 22286 case CDROMREADMODE1: 22287 case CDROMREADOFFSET: 22288 case CDROMSBLKMODE: 22289 case CDROMGBLKMODE: 22290 case CDROMGDRVSPEED: 22291 case CDROMSDRVSPEED: 22292 case CDROMCDDA: 22293 case CDROMCDXA: 22294 case CDROMSUBCODE: 22295 if (!ISCD(un)) { 22296 un->un_ncmds_in_driver--; 22297 ASSERT(un->un_ncmds_in_driver >= 0); 22298 mutex_exit(SD_MUTEX(un)); 22299 err = ENOTTY; 22300 goto done_without_assess; 22301 } 22302 break; 22303 case FDEJECT: 22304 case DKIOCEJECT: 22305 case CDROMEJECT: 22306 if (!un->un_f_eject_media_supported) { 22307 un->un_ncmds_in_driver--; 22308 ASSERT(un->un_ncmds_in_driver >= 0); 22309 mutex_exit(SD_MUTEX(un)); 22310 err = ENOTTY; 22311 goto done_without_assess; 22312 } 22313 break; 22314 case DKIOCFLUSHWRITECACHE: 22315 mutex_exit(SD_MUTEX(un)); 22316 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22317 if (err != 0) { 22318 mutex_enter(SD_MUTEX(un)); 22319 un->un_ncmds_in_driver--; 22320 ASSERT(un->un_ncmds_in_driver >= 0); 22321 mutex_exit(SD_MUTEX(un)); 22322 err = EIO; 22323 goto done_quick_assess; 22324 } 22325 mutex_enter(SD_MUTEX(un)); 22326 /* FALLTHROUGH */ 22327 case DKIOCREMOVABLE: 22328 case DKIOCHOTPLUGGABLE: 22329 case DKIOCINFO: 22330 case DKIOCGMEDIAINFO: 22331 case DKIOCGMEDIAINFOEXT: 22332 case DKIOCSOLIDSTATE: 22333 case MHIOCENFAILFAST: 22334 case MHIOCSTATUS: 22335 case MHIOCTKOWN: 22336 case MHIOCRELEASE: 22337 case MHIOCGRP_INKEYS: 22338 case MHIOCGRP_INRESV: 22339 case MHIOCGRP_REGISTER: 22340 case MHIOCGRP_CLEAR: 22341 case MHIOCGRP_RESERVE: 22342 case MHIOCGRP_PREEMPTANDABORT: 22343 case MHIOCGRP_REGISTERANDIGNOREKEY: 22344 case CDROMCLOSETRAY: 22345 case USCSICMD: 22346 case USCSIMAXXFER: 22347 goto skip_ready_valid; 22348 default: 22349 break; 22350 } 22351 22352 mutex_exit(SD_MUTEX(un)); 22353 err = sd_ready_and_valid(ssc, SDPART(dev)); 22354 mutex_enter(SD_MUTEX(un)); 22355 22356 if (err != SD_READY_VALID) { 22357 switch (cmd) { 22358 case DKIOCSTATE: 22359 case CDROMGDRVSPEED: 22360 case CDROMSDRVSPEED: 22361 case FDEJECT: /* for eject command */ 22362 case DKIOCEJECT: 22363 case CDROMEJECT: 22364 case DKIOCREMOVABLE: 22365 case DKIOCHOTPLUGGABLE: 22366 break; 22367 default: 22368 if (un->un_f_has_removable_media) { 22369 err = ENXIO; 22370 } else { 22371 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 22372 if (err == SD_RESERVED_BY_OTHERS) { 22373 err = EACCES; 22374 } else { 22375 err = EIO; 22376 } 22377 } 22378 un->un_ncmds_in_driver--; 22379 ASSERT(un->un_ncmds_in_driver >= 0); 22380 mutex_exit(SD_MUTEX(un)); 22381 22382 goto done_without_assess; 22383 } 22384 } 22385 } 22386 22387 skip_ready_valid: 22388 mutex_exit(SD_MUTEX(un)); 22389 22390 switch (cmd) { 22391 case DKIOCINFO: 22392 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 22393 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 22394 break; 22395 22396 case DKIOCGMEDIAINFO: 22397 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 22398 err = sd_get_media_info(dev, (caddr_t)arg, flag); 22399 break; 22400 22401 case DKIOCGMEDIAINFOEXT: 22402 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 22403 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 22404 break; 22405 22406 case DKIOCGGEOM: 22407 case DKIOCGVTOC: 22408 case DKIOCGEXTVTOC: 22409 case DKIOCGAPART: 22410 case DKIOCPARTINFO: 22411 case DKIOCEXTPARTINFO: 22412 case DKIOCSGEOM: 22413 case DKIOCSAPART: 22414 case DKIOCGETEFI: 22415 case DKIOCPARTITION: 22416 case DKIOCSVTOC: 22417 case DKIOCSEXTVTOC: 22418 case DKIOCSETEFI: 22419 case DKIOCGMBOOT: 22420 case DKIOCSMBOOT: 22421 case DKIOCG_PHYGEOM: 22422 case DKIOCG_VIRTGEOM: 22423 #if defined(__i386) || defined(__amd64) 22424 case DKIOCSETEXTPART: 22425 #endif 22426 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 22427 22428 /* TUR should spin up */ 22429 22430 if (un->un_f_has_removable_media) 22431 err = sd_send_scsi_TEST_UNIT_READY(ssc, 22432 SD_CHECK_FOR_MEDIA); 22433 22434 else 22435 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22436 22437 if (err != 0) 22438 goto done_with_assess; 22439 22440 err = cmlb_ioctl(un->un_cmlbhandle, dev, 22441 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 22442 22443 if ((err == 0) && 22444 ((cmd == DKIOCSETEFI) || 22445 ((un->un_f_pkstats_enabled) && 22446 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 22447 cmd == DKIOCSEXTVTOC)))) { 22448 22449 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 22450 (void *)SD_PATH_DIRECT); 22451 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 22452 sd_set_pstats(un); 22453 SD_TRACE(SD_LOG_IO_PARTITION, un, 22454 "sd_ioctl: un:0x%p pstats created and " 22455 "set\n", un); 22456 } 22457 } 22458 22459 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 22460 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 22461 22462 mutex_enter(SD_MUTEX(un)); 22463 if (un->un_f_devid_supported && 22464 (un->un_f_opt_fab_devid == TRUE)) { 22465 if (un->un_devid == NULL) { 22466 sd_register_devid(ssc, SD_DEVINFO(un), 22467 SD_TARGET_IS_UNRESERVED); 22468 } else { 22469 /* 22470 * The device id for this disk 22471 * has been fabricated. The 22472 * device id must be preserved 22473 * by writing it back out to 22474 * disk. 22475 */ 22476 if (sd_write_deviceid(ssc) != 0) { 22477 ddi_devid_free(un->un_devid); 22478 un->un_devid = NULL; 22479 } 22480 } 22481 } 22482 mutex_exit(SD_MUTEX(un)); 22483 } 22484 22485 break; 22486 22487 case DKIOCLOCK: 22488 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 22489 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22490 SD_PATH_STANDARD); 22491 goto done_with_assess; 22492 22493 case DKIOCUNLOCK: 22494 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 22495 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 22496 SD_PATH_STANDARD); 22497 goto done_with_assess; 22498 22499 case DKIOCSTATE: { 22500 enum dkio_state state; 22501 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 22502 22503 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 22504 err = EFAULT; 22505 } else { 22506 err = sd_check_media(dev, state); 22507 if (err == 0) { 22508 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22509 sizeof (int), flag) != 0) 22510 err = EFAULT; 22511 } 22512 } 22513 break; 22514 } 22515 22516 case DKIOCREMOVABLE: 22517 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22518 i = un->un_f_has_removable_media ? 1 : 0; 22519 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22520 err = EFAULT; 22521 } else { 22522 err = 0; 22523 } 22524 break; 22525 22526 case DKIOCSOLIDSTATE: 22527 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n"); 22528 i = un->un_f_is_solid_state ? 1 : 0; 22529 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22530 err = EFAULT; 22531 } else { 22532 err = 0; 22533 } 22534 break; 22535 22536 case DKIOCHOTPLUGGABLE: 22537 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22538 i = un->un_f_is_hotpluggable ? 1 : 0; 22539 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22540 err = EFAULT; 22541 } else { 22542 err = 0; 22543 } 22544 break; 22545 22546 case DKIOCREADONLY: 22547 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n"); 22548 i = 0; 22549 if ((ISCD(un) && !un->un_f_mmc_writable_media) || 22550 (sr_check_wp(dev) != 0)) { 22551 i = 1; 22552 } 22553 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22554 err = EFAULT; 22555 } else { 22556 err = 0; 22557 } 22558 break; 22559 22560 case DKIOCGTEMPERATURE: 22561 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22562 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22563 break; 22564 22565 case MHIOCENFAILFAST: 22566 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22567 if ((err = drv_priv(cred_p)) == 0) { 22568 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22569 } 22570 break; 22571 22572 case MHIOCTKOWN: 22573 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22574 if ((err = drv_priv(cred_p)) == 0) { 22575 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22576 } 22577 break; 22578 22579 case MHIOCRELEASE: 22580 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22581 if ((err = drv_priv(cred_p)) == 0) { 22582 err = sd_mhdioc_release(dev); 22583 } 22584 break; 22585 22586 case MHIOCSTATUS: 22587 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22588 if ((err = drv_priv(cred_p)) == 0) { 22589 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22590 case 0: 22591 err = 0; 22592 break; 22593 case EACCES: 22594 *rval_p = 1; 22595 err = 0; 22596 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22597 break; 22598 default: 22599 err = EIO; 22600 goto done_with_assess; 22601 } 22602 } 22603 break; 22604 22605 case MHIOCQRESERVE: 22606 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22607 if ((err = drv_priv(cred_p)) == 0) { 22608 err = sd_reserve_release(dev, SD_RESERVE); 22609 } 22610 break; 22611 22612 case MHIOCREREGISTERDEVID: 22613 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22614 if (drv_priv(cred_p) == EPERM) { 22615 err = EPERM; 22616 } else if (!un->un_f_devid_supported) { 22617 err = ENOTTY; 22618 } else { 22619 err = sd_mhdioc_register_devid(dev); 22620 } 22621 break; 22622 22623 case MHIOCGRP_INKEYS: 22624 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22625 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22626 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22627 err = ENOTSUP; 22628 } else { 22629 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22630 flag); 22631 } 22632 } 22633 break; 22634 22635 case MHIOCGRP_INRESV: 22636 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22637 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22638 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22639 err = ENOTSUP; 22640 } else { 22641 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22642 } 22643 } 22644 break; 22645 22646 case MHIOCGRP_REGISTER: 22647 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22648 if ((err = drv_priv(cred_p)) != EPERM) { 22649 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22650 err = ENOTSUP; 22651 } else if (arg != NULL) { 22652 mhioc_register_t reg; 22653 if (ddi_copyin((void *)arg, ®, 22654 sizeof (mhioc_register_t), flag) != 0) { 22655 err = EFAULT; 22656 } else { 22657 err = 22658 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22659 ssc, SD_SCSI3_REGISTER, 22660 (uchar_t *)®); 22661 if (err != 0) 22662 goto done_with_assess; 22663 } 22664 } 22665 } 22666 break; 22667 22668 case MHIOCGRP_CLEAR: 22669 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n"); 22670 if ((err = drv_priv(cred_p)) != EPERM) { 22671 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22672 err = ENOTSUP; 22673 } else if (arg != NULL) { 22674 mhioc_register_t reg; 22675 if (ddi_copyin((void *)arg, ®, 22676 sizeof (mhioc_register_t), flag) != 0) { 22677 err = EFAULT; 22678 } else { 22679 err = 22680 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22681 ssc, SD_SCSI3_CLEAR, 22682 (uchar_t *)®); 22683 if (err != 0) 22684 goto done_with_assess; 22685 } 22686 } 22687 } 22688 break; 22689 22690 case MHIOCGRP_RESERVE: 22691 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22692 if ((err = drv_priv(cred_p)) != EPERM) { 22693 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22694 err = ENOTSUP; 22695 } else if (arg != NULL) { 22696 mhioc_resv_desc_t resv_desc; 22697 if (ddi_copyin((void *)arg, &resv_desc, 22698 sizeof (mhioc_resv_desc_t), flag) != 0) { 22699 err = EFAULT; 22700 } else { 22701 err = 22702 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22703 ssc, SD_SCSI3_RESERVE, 22704 (uchar_t *)&resv_desc); 22705 if (err != 0) 22706 goto done_with_assess; 22707 } 22708 } 22709 } 22710 break; 22711 22712 case MHIOCGRP_PREEMPTANDABORT: 22713 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22714 if ((err = drv_priv(cred_p)) != EPERM) { 22715 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22716 err = ENOTSUP; 22717 } else if (arg != NULL) { 22718 mhioc_preemptandabort_t preempt_abort; 22719 if (ddi_copyin((void *)arg, &preempt_abort, 22720 sizeof (mhioc_preemptandabort_t), 22721 flag) != 0) { 22722 err = EFAULT; 22723 } else { 22724 err = 22725 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22726 ssc, SD_SCSI3_PREEMPTANDABORT, 22727 (uchar_t *)&preempt_abort); 22728 if (err != 0) 22729 goto done_with_assess; 22730 } 22731 } 22732 } 22733 break; 22734 22735 case MHIOCGRP_REGISTERANDIGNOREKEY: 22736 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22737 if ((err = drv_priv(cred_p)) != EPERM) { 22738 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22739 err = ENOTSUP; 22740 } else if (arg != NULL) { 22741 mhioc_registerandignorekey_t r_and_i; 22742 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22743 sizeof (mhioc_registerandignorekey_t), 22744 flag) != 0) { 22745 err = EFAULT; 22746 } else { 22747 err = 22748 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22749 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22750 (uchar_t *)&r_and_i); 22751 if (err != 0) 22752 goto done_with_assess; 22753 } 22754 } 22755 } 22756 break; 22757 22758 case USCSICMD: 22759 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22760 cr = ddi_get_cred(); 22761 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22762 err = EPERM; 22763 } else { 22764 enum uio_seg uioseg; 22765 22766 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22767 UIO_USERSPACE; 22768 if (un->un_f_format_in_progress == TRUE) { 22769 err = EAGAIN; 22770 break; 22771 } 22772 22773 err = sd_ssc_send(ssc, 22774 (struct uscsi_cmd *)arg, 22775 flag, uioseg, SD_PATH_STANDARD); 22776 if (err != 0) 22777 goto done_with_assess; 22778 else 22779 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22780 } 22781 break; 22782 22783 case USCSIMAXXFER: 22784 SD_TRACE(SD_LOG_IOCTL, un, "USCSIMAXXFER\n"); 22785 cr = ddi_get_cred(); 22786 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22787 err = EPERM; 22788 } else { 22789 const uscsi_xfer_t xfer = un->un_max_xfer_size; 22790 22791 if (ddi_copyout(&xfer, (void *)arg, sizeof (xfer), 22792 flag) != 0) { 22793 err = EFAULT; 22794 } else { 22795 err = 0; 22796 } 22797 } 22798 break; 22799 22800 case CDROMPAUSE: 22801 case CDROMRESUME: 22802 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22803 if (!ISCD(un)) { 22804 err = ENOTTY; 22805 } else { 22806 err = sr_pause_resume(dev, cmd); 22807 } 22808 break; 22809 22810 case CDROMPLAYMSF: 22811 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22812 if (!ISCD(un)) { 22813 err = ENOTTY; 22814 } else { 22815 err = sr_play_msf(dev, (caddr_t)arg, flag); 22816 } 22817 break; 22818 22819 case CDROMPLAYTRKIND: 22820 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22821 #if defined(__i386) || defined(__amd64) 22822 /* 22823 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22824 */ 22825 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22826 #else 22827 if (!ISCD(un)) { 22828 #endif 22829 err = ENOTTY; 22830 } else { 22831 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22832 } 22833 break; 22834 22835 case CDROMREADTOCHDR: 22836 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22837 if (!ISCD(un)) { 22838 err = ENOTTY; 22839 } else { 22840 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22841 } 22842 break; 22843 22844 case CDROMREADTOCENTRY: 22845 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22846 if (!ISCD(un)) { 22847 err = ENOTTY; 22848 } else { 22849 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22850 } 22851 break; 22852 22853 case CDROMSTOP: 22854 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22855 if (!ISCD(un)) { 22856 err = ENOTTY; 22857 } else { 22858 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22859 SD_TARGET_STOP, SD_PATH_STANDARD); 22860 goto done_with_assess; 22861 } 22862 break; 22863 22864 case CDROMSTART: 22865 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22866 if (!ISCD(un)) { 22867 err = ENOTTY; 22868 } else { 22869 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22870 SD_TARGET_START, SD_PATH_STANDARD); 22871 goto done_with_assess; 22872 } 22873 break; 22874 22875 case CDROMCLOSETRAY: 22876 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22877 if (!ISCD(un)) { 22878 err = ENOTTY; 22879 } else { 22880 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22881 SD_TARGET_CLOSE, SD_PATH_STANDARD); 22882 goto done_with_assess; 22883 } 22884 break; 22885 22886 case FDEJECT: /* for eject command */ 22887 case DKIOCEJECT: 22888 case CDROMEJECT: 22889 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22890 if (!un->un_f_eject_media_supported) { 22891 err = ENOTTY; 22892 } else { 22893 err = sr_eject(dev); 22894 } 22895 break; 22896 22897 case CDROMVOLCTRL: 22898 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22899 if (!ISCD(un)) { 22900 err = ENOTTY; 22901 } else { 22902 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22903 } 22904 break; 22905 22906 case CDROMSUBCHNL: 22907 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22908 if (!ISCD(un)) { 22909 err = ENOTTY; 22910 } else { 22911 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22912 } 22913 break; 22914 22915 case CDROMREADMODE2: 22916 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22917 if (!ISCD(un)) { 22918 err = ENOTTY; 22919 } else if (un->un_f_cfg_is_atapi == TRUE) { 22920 /* 22921 * If the drive supports READ CD, use that instead of 22922 * switching the LBA size via a MODE SELECT 22923 * Block Descriptor 22924 */ 22925 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22926 } else { 22927 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22928 } 22929 break; 22930 22931 case CDROMREADMODE1: 22932 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22933 if (!ISCD(un)) { 22934 err = ENOTTY; 22935 } else { 22936 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22937 } 22938 break; 22939 22940 case CDROMREADOFFSET: 22941 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22942 if (!ISCD(un)) { 22943 err = ENOTTY; 22944 } else { 22945 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22946 flag); 22947 } 22948 break; 22949 22950 case CDROMSBLKMODE: 22951 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22952 /* 22953 * There is no means of changing block size in case of atapi 22954 * drives, thus return ENOTTY if drive type is atapi 22955 */ 22956 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22957 err = ENOTTY; 22958 } else if (un->un_f_mmc_cap == TRUE) { 22959 22960 /* 22961 * MMC Devices do not support changing the 22962 * logical block size 22963 * 22964 * Note: EINVAL is being returned instead of ENOTTY to 22965 * maintain consistancy with the original mmc 22966 * driver update. 22967 */ 22968 err = EINVAL; 22969 } else { 22970 mutex_enter(SD_MUTEX(un)); 22971 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22972 (un->un_ncmds_in_transport > 0)) { 22973 mutex_exit(SD_MUTEX(un)); 22974 err = EINVAL; 22975 } else { 22976 mutex_exit(SD_MUTEX(un)); 22977 err = sr_change_blkmode(dev, cmd, arg, flag); 22978 } 22979 } 22980 break; 22981 22982 case CDROMGBLKMODE: 22983 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22984 if (!ISCD(un)) { 22985 err = ENOTTY; 22986 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22987 (un->un_f_blockcount_is_valid != FALSE)) { 22988 /* 22989 * Drive is an ATAPI drive so return target block 22990 * size for ATAPI drives since we cannot change the 22991 * blocksize on ATAPI drives. Used primarily to detect 22992 * if an ATAPI cdrom is present. 22993 */ 22994 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22995 sizeof (int), flag) != 0) { 22996 err = EFAULT; 22997 } else { 22998 err = 0; 22999 } 23000 23001 } else { 23002 /* 23003 * Drive supports changing block sizes via a Mode 23004 * Select. 23005 */ 23006 err = sr_change_blkmode(dev, cmd, arg, flag); 23007 } 23008 break; 23009 23010 case CDROMGDRVSPEED: 23011 case CDROMSDRVSPEED: 23012 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 23013 if (!ISCD(un)) { 23014 err = ENOTTY; 23015 } else if (un->un_f_mmc_cap == TRUE) { 23016 /* 23017 * Note: In the future the driver implementation 23018 * for getting and 23019 * setting cd speed should entail: 23020 * 1) If non-mmc try the Toshiba mode page 23021 * (sr_change_speed) 23022 * 2) If mmc but no support for Real Time Streaming try 23023 * the SET CD SPEED (0xBB) command 23024 * (sr_atapi_change_speed) 23025 * 3) If mmc and support for Real Time Streaming 23026 * try the GET PERFORMANCE and SET STREAMING 23027 * commands (not yet implemented, 4380808) 23028 */ 23029 /* 23030 * As per recent MMC spec, CD-ROM speed is variable 23031 * and changes with LBA. Since there is no such 23032 * things as drive speed now, fail this ioctl. 23033 * 23034 * Note: EINVAL is returned for consistancy of original 23035 * implementation which included support for getting 23036 * the drive speed of mmc devices but not setting 23037 * the drive speed. Thus EINVAL would be returned 23038 * if a set request was made for an mmc device. 23039 * We no longer support get or set speed for 23040 * mmc but need to remain consistent with regard 23041 * to the error code returned. 23042 */ 23043 err = EINVAL; 23044 } else if (un->un_f_cfg_is_atapi == TRUE) { 23045 err = sr_atapi_change_speed(dev, cmd, arg, flag); 23046 } else { 23047 err = sr_change_speed(dev, cmd, arg, flag); 23048 } 23049 break; 23050 23051 case CDROMCDDA: 23052 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 23053 if (!ISCD(un)) { 23054 err = ENOTTY; 23055 } else { 23056 err = sr_read_cdda(dev, (void *)arg, flag); 23057 } 23058 break; 23059 23060 case CDROMCDXA: 23061 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 23062 if (!ISCD(un)) { 23063 err = ENOTTY; 23064 } else { 23065 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 23066 } 23067 break; 23068 23069 case CDROMSUBCODE: 23070 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 23071 if (!ISCD(un)) { 23072 err = ENOTTY; 23073 } else { 23074 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 23075 } 23076 break; 23077 23078 23079 #ifdef SDDEBUG 23080 /* RESET/ABORTS testing ioctls */ 23081 case DKIOCRESET: { 23082 int reset_level; 23083 23084 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 23085 err = EFAULT; 23086 } else { 23087 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 23088 "reset_level = 0x%lx\n", reset_level); 23089 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 23090 err = 0; 23091 } else { 23092 err = EIO; 23093 } 23094 } 23095 break; 23096 } 23097 23098 case DKIOCABORT: 23099 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 23100 if (scsi_abort(SD_ADDRESS(un), NULL)) { 23101 err = 0; 23102 } else { 23103 err = EIO; 23104 } 23105 break; 23106 #endif 23107 23108 #ifdef SD_FAULT_INJECTION 23109 /* SDIOC FaultInjection testing ioctls */ 23110 case SDIOCSTART: 23111 case SDIOCSTOP: 23112 case SDIOCINSERTPKT: 23113 case SDIOCINSERTXB: 23114 case SDIOCINSERTUN: 23115 case SDIOCINSERTARQ: 23116 case SDIOCPUSH: 23117 case SDIOCRETRIEVE: 23118 case SDIOCRUN: 23119 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 23120 "SDIOC detected cmd:0x%X:\n", cmd); 23121 /* call error generator */ 23122 sd_faultinjection_ioctl(cmd, arg, un); 23123 err = 0; 23124 break; 23125 23126 #endif /* SD_FAULT_INJECTION */ 23127 23128 case DKIOCFLUSHWRITECACHE: 23129 { 23130 struct dk_callback *dkc = (struct dk_callback *)arg; 23131 23132 mutex_enter(SD_MUTEX(un)); 23133 if (!un->un_f_sync_cache_supported || 23134 !un->un_f_write_cache_enabled) { 23135 err = un->un_f_sync_cache_supported ? 23136 0 : ENOTSUP; 23137 mutex_exit(SD_MUTEX(un)); 23138 if ((flag & FKIOCTL) && dkc != NULL && 23139 dkc->dkc_callback != NULL) { 23140 (*dkc->dkc_callback)(dkc->dkc_cookie, 23141 err); 23142 /* 23143 * Did callback and reported error. 23144 * Since we did a callback, ioctl 23145 * should return 0. 23146 */ 23147 err = 0; 23148 } 23149 break; 23150 } 23151 mutex_exit(SD_MUTEX(un)); 23152 23153 if ((flag & FKIOCTL) && dkc != NULL && 23154 dkc->dkc_callback != NULL) { 23155 /* async SYNC CACHE request */ 23156 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 23157 } else { 23158 /* synchronous SYNC CACHE request */ 23159 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 23160 } 23161 } 23162 break; 23163 23164 case DKIOCGETWCE: { 23165 23166 int wce; 23167 23168 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 23169 break; 23170 } 23171 23172 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 23173 err = EFAULT; 23174 } 23175 break; 23176 } 23177 23178 case DKIOCSETWCE: { 23179 23180 int wce, sync_supported; 23181 int cur_wce = 0; 23182 23183 if (!un->un_f_cache_mode_changeable) { 23184 err = EINVAL; 23185 break; 23186 } 23187 23188 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 23189 err = EFAULT; 23190 break; 23191 } 23192 23193 /* 23194 * Synchronize multiple threads trying to enable 23195 * or disable the cache via the un_f_wcc_cv 23196 * condition variable. 23197 */ 23198 mutex_enter(SD_MUTEX(un)); 23199 23200 /* 23201 * Don't allow the cache to be enabled if the 23202 * config file has it disabled. 23203 */ 23204 if (un->un_f_opt_disable_cache && wce) { 23205 mutex_exit(SD_MUTEX(un)); 23206 err = EINVAL; 23207 break; 23208 } 23209 23210 /* 23211 * Wait for write cache change in progress 23212 * bit to be clear before proceeding. 23213 */ 23214 while (un->un_f_wcc_inprog) 23215 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 23216 23217 un->un_f_wcc_inprog = 1; 23218 23219 mutex_exit(SD_MUTEX(un)); 23220 23221 /* 23222 * Get the current write cache state 23223 */ 23224 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) { 23225 mutex_enter(SD_MUTEX(un)); 23226 un->un_f_wcc_inprog = 0; 23227 cv_broadcast(&un->un_wcc_cv); 23228 mutex_exit(SD_MUTEX(un)); 23229 break; 23230 } 23231 23232 mutex_enter(SD_MUTEX(un)); 23233 un->un_f_write_cache_enabled = (cur_wce != 0); 23234 23235 if (un->un_f_write_cache_enabled && wce == 0) { 23236 /* 23237 * Disable the write cache. Don't clear 23238 * un_f_write_cache_enabled until after 23239 * the mode select and flush are complete. 23240 */ 23241 sync_supported = un->un_f_sync_cache_supported; 23242 23243 /* 23244 * If cache flush is suppressed, we assume that the 23245 * controller firmware will take care of managing the 23246 * write cache for us: no need to explicitly 23247 * disable it. 23248 */ 23249 if (!un->un_f_suppress_cache_flush) { 23250 mutex_exit(SD_MUTEX(un)); 23251 if ((err = sd_cache_control(ssc, 23252 SD_CACHE_NOCHANGE, 23253 SD_CACHE_DISABLE)) == 0 && 23254 sync_supported) { 23255 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 23256 NULL); 23257 } 23258 } else { 23259 mutex_exit(SD_MUTEX(un)); 23260 } 23261 23262 mutex_enter(SD_MUTEX(un)); 23263 if (err == 0) { 23264 un->un_f_write_cache_enabled = 0; 23265 } 23266 23267 } else if (!un->un_f_write_cache_enabled && wce != 0) { 23268 /* 23269 * Set un_f_write_cache_enabled first, so there is 23270 * no window where the cache is enabled, but the 23271 * bit says it isn't. 23272 */ 23273 un->un_f_write_cache_enabled = 1; 23274 23275 /* 23276 * If cache flush is suppressed, we assume that the 23277 * controller firmware will take care of managing the 23278 * write cache for us: no need to explicitly 23279 * enable it. 23280 */ 23281 if (!un->un_f_suppress_cache_flush) { 23282 mutex_exit(SD_MUTEX(un)); 23283 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 23284 SD_CACHE_ENABLE); 23285 } else { 23286 mutex_exit(SD_MUTEX(un)); 23287 } 23288 23289 mutex_enter(SD_MUTEX(un)); 23290 23291 if (err) { 23292 un->un_f_write_cache_enabled = 0; 23293 } 23294 } 23295 23296 un->un_f_wcc_inprog = 0; 23297 cv_broadcast(&un->un_wcc_cv); 23298 mutex_exit(SD_MUTEX(un)); 23299 break; 23300 } 23301 23302 default: 23303 err = ENOTTY; 23304 break; 23305 } 23306 mutex_enter(SD_MUTEX(un)); 23307 un->un_ncmds_in_driver--; 23308 ASSERT(un->un_ncmds_in_driver >= 0); 23309 mutex_exit(SD_MUTEX(un)); 23310 23311 23312 done_without_assess: 23313 sd_ssc_fini(ssc); 23314 23315 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23316 return (err); 23317 23318 done_with_assess: 23319 mutex_enter(SD_MUTEX(un)); 23320 un->un_ncmds_in_driver--; 23321 ASSERT(un->un_ncmds_in_driver >= 0); 23322 mutex_exit(SD_MUTEX(un)); 23323 23324 done_quick_assess: 23325 if (err != 0) 23326 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23327 /* Uninitialize sd_ssc_t pointer */ 23328 sd_ssc_fini(ssc); 23329 23330 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23331 return (err); 23332 } 23333 23334 23335 /* 23336 * Function: sd_dkio_ctrl_info 23337 * 23338 * Description: This routine is the driver entry point for handling controller 23339 * information ioctl requests (DKIOCINFO). 23340 * 23341 * Arguments: dev - the device number 23342 * arg - pointer to user provided dk_cinfo structure 23343 * specifying the controller type and attributes. 23344 * flag - this argument is a pass through to ddi_copyxxx() 23345 * directly from the mode argument of ioctl(). 23346 * 23347 * Return Code: 0 23348 * EFAULT 23349 * ENXIO 23350 */ 23351 23352 static int 23353 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 23354 { 23355 struct sd_lun *un = NULL; 23356 struct dk_cinfo *info; 23357 dev_info_t *pdip; 23358 int lun, tgt; 23359 23360 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23361 return (ENXIO); 23362 } 23363 23364 info = (struct dk_cinfo *) 23365 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 23366 23367 switch (un->un_ctype) { 23368 case CTYPE_CDROM: 23369 info->dki_ctype = DKC_CDROM; 23370 break; 23371 default: 23372 info->dki_ctype = DKC_SCSI_CCS; 23373 break; 23374 } 23375 pdip = ddi_get_parent(SD_DEVINFO(un)); 23376 info->dki_cnum = ddi_get_instance(pdip); 23377 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 23378 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 23379 } else { 23380 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 23381 DK_DEVLEN - 1); 23382 } 23383 23384 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23385 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 23386 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23387 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 23388 23389 /* Unit Information */ 23390 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 23391 info->dki_slave = ((tgt << 3) | lun); 23392 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 23393 DK_DEVLEN - 1); 23394 info->dki_flags = DKI_FMTVOL; 23395 info->dki_partition = SDPART(dev); 23396 23397 /* Max Transfer size of this device in blocks */ 23398 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 23399 info->dki_addr = 0; 23400 info->dki_space = 0; 23401 info->dki_prio = 0; 23402 info->dki_vec = 0; 23403 23404 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 23405 kmem_free(info, sizeof (struct dk_cinfo)); 23406 return (EFAULT); 23407 } else { 23408 kmem_free(info, sizeof (struct dk_cinfo)); 23409 return (0); 23410 } 23411 } 23412 23413 /* 23414 * Function: sd_get_media_info_com 23415 * 23416 * Description: This routine returns the information required to populate 23417 * the fields for the dk_minfo/dk_minfo_ext structures. 23418 * 23419 * Arguments: dev - the device number 23420 * dki_media_type - media_type 23421 * dki_lbsize - logical block size 23422 * dki_capacity - capacity in blocks 23423 * dki_pbsize - physical block size (if requested) 23424 * 23425 * Return Code: 0 23426 * EACCESS 23427 * EFAULT 23428 * ENXIO 23429 * EIO 23430 */ 23431 static int 23432 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize, 23433 diskaddr_t *dki_capacity, uint_t *dki_pbsize) 23434 { 23435 struct sd_lun *un = NULL; 23436 struct uscsi_cmd com; 23437 struct scsi_inquiry *sinq; 23438 u_longlong_t media_capacity; 23439 uint64_t capacity; 23440 uint_t lbasize; 23441 uint_t pbsize; 23442 uchar_t *out_data; 23443 uchar_t *rqbuf; 23444 int rval = 0; 23445 int rtn; 23446 sd_ssc_t *ssc; 23447 23448 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23449 (un->un_state == SD_STATE_OFFLINE)) { 23450 return (ENXIO); 23451 } 23452 23453 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n"); 23454 23455 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23456 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23457 ssc = sd_ssc_init(un); 23458 23459 /* Issue a TUR to determine if the drive is ready with media present */ 23460 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23461 if (rval == ENXIO) { 23462 goto done; 23463 } else if (rval != 0) { 23464 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23465 } 23466 23467 /* Now get configuration data */ 23468 if (ISCD(un)) { 23469 *dki_media_type = DK_CDROM; 23470 23471 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23472 if (un->un_f_mmc_cap == TRUE) { 23473 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23474 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23475 SD_PATH_STANDARD); 23476 23477 if (rtn) { 23478 /* 23479 * We ignore all failures for CD and need to 23480 * put the assessment before processing code 23481 * to avoid missing assessment for FMA. 23482 */ 23483 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23484 /* 23485 * Failed for other than an illegal request 23486 * or command not supported 23487 */ 23488 if ((com.uscsi_status == STATUS_CHECK) && 23489 (com.uscsi_rqstatus == STATUS_GOOD)) { 23490 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23491 (rqbuf[12] != 0x20)) { 23492 rval = EIO; 23493 goto no_assessment; 23494 } 23495 } 23496 } else { 23497 /* 23498 * The GET CONFIGURATION command succeeded 23499 * so set the media type according to the 23500 * returned data 23501 */ 23502 *dki_media_type = out_data[6]; 23503 *dki_media_type <<= 8; 23504 *dki_media_type |= out_data[7]; 23505 } 23506 } 23507 } else { 23508 /* 23509 * The profile list is not available, so we attempt to identify 23510 * the media type based on the inquiry data 23511 */ 23512 sinq = un->un_sd->sd_inq; 23513 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23514 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23515 /* This is a direct access device or optical disk */ 23516 *dki_media_type = DK_FIXED_DISK; 23517 23518 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23519 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23520 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23521 *dki_media_type = DK_ZIP; 23522 } else if ( 23523 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23524 *dki_media_type = DK_JAZ; 23525 } 23526 } 23527 } else { 23528 /* 23529 * Not a CD, direct access or optical disk so return 23530 * unknown media 23531 */ 23532 *dki_media_type = DK_UNKNOWN; 23533 } 23534 } 23535 23536 /* 23537 * Now read the capacity so we can provide the lbasize, 23538 * pbsize and capacity. 23539 */ 23540 if (dki_pbsize && un->un_f_descr_format_supported) { 23541 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 23542 &pbsize, SD_PATH_DIRECT); 23543 23544 /* 23545 * Override the physical blocksize if the instance already 23546 * has a larger value. 23547 */ 23548 pbsize = MAX(pbsize, un->un_phy_blocksize); 23549 } 23550 23551 if (dki_pbsize == NULL || rval != 0 || 23552 !un->un_f_descr_format_supported) { 23553 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23554 SD_PATH_DIRECT); 23555 23556 switch (rval) { 23557 case 0: 23558 if (un->un_f_enable_rmw && 23559 un->un_phy_blocksize != 0) { 23560 pbsize = un->un_phy_blocksize; 23561 } else { 23562 pbsize = lbasize; 23563 } 23564 media_capacity = capacity; 23565 23566 /* 23567 * sd_send_scsi_READ_CAPACITY() reports capacity in 23568 * un->un_sys_blocksize chunks. So we need to convert 23569 * it into cap.lbsize chunks. 23570 */ 23571 if (un->un_f_has_removable_media) { 23572 media_capacity *= un->un_sys_blocksize; 23573 media_capacity /= lbasize; 23574 } 23575 break; 23576 case EACCES: 23577 rval = EACCES; 23578 goto done; 23579 default: 23580 rval = EIO; 23581 goto done; 23582 } 23583 } else { 23584 if (un->un_f_enable_rmw && 23585 !ISP2(pbsize % DEV_BSIZE)) { 23586 pbsize = SSD_SECSIZE; 23587 } else if (!ISP2(lbasize % DEV_BSIZE) || 23588 !ISP2(pbsize % DEV_BSIZE)) { 23589 pbsize = lbasize = DEV_BSIZE; 23590 } 23591 media_capacity = capacity; 23592 } 23593 23594 /* 23595 * If lun is expanded dynamically, update the un structure. 23596 */ 23597 mutex_enter(SD_MUTEX(un)); 23598 if ((un->un_f_blockcount_is_valid == TRUE) && 23599 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23600 (capacity > un->un_blockcount)) { 23601 un->un_f_expnevent = B_FALSE; 23602 sd_update_block_info(un, lbasize, capacity); 23603 } 23604 mutex_exit(SD_MUTEX(un)); 23605 23606 *dki_lbsize = lbasize; 23607 *dki_capacity = media_capacity; 23608 if (dki_pbsize) 23609 *dki_pbsize = pbsize; 23610 23611 done: 23612 if (rval != 0) { 23613 if (rval == EIO) 23614 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23615 else 23616 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23617 } 23618 no_assessment: 23619 sd_ssc_fini(ssc); 23620 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23621 kmem_free(rqbuf, SENSE_LENGTH); 23622 return (rval); 23623 } 23624 23625 /* 23626 * Function: sd_get_media_info 23627 * 23628 * Description: This routine is the driver entry point for handling ioctl 23629 * requests for the media type or command set profile used by the 23630 * drive to operate on the media (DKIOCGMEDIAINFO). 23631 * 23632 * Arguments: dev - the device number 23633 * arg - pointer to user provided dk_minfo structure 23634 * specifying the media type, logical block size and 23635 * drive capacity. 23636 * flag - this argument is a pass through to ddi_copyxxx() 23637 * directly from the mode argument of ioctl(). 23638 * 23639 * Return Code: returns the value from sd_get_media_info_com 23640 */ 23641 static int 23642 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 23643 { 23644 struct dk_minfo mi; 23645 int rval; 23646 23647 rval = sd_get_media_info_com(dev, &mi.dki_media_type, 23648 &mi.dki_lbsize, &mi.dki_capacity, NULL); 23649 23650 if (rval) 23651 return (rval); 23652 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag)) 23653 rval = EFAULT; 23654 return (rval); 23655 } 23656 23657 /* 23658 * Function: sd_get_media_info_ext 23659 * 23660 * Description: This routine is the driver entry point for handling ioctl 23661 * requests for the media type or command set profile used by the 23662 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23663 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23664 * of this ioctl contains both logical block size and physical 23665 * block size. 23666 * 23667 * 23668 * Arguments: dev - the device number 23669 * arg - pointer to user provided dk_minfo_ext structure 23670 * specifying the media type, logical block size, 23671 * physical block size and disk capacity. 23672 * flag - this argument is a pass through to ddi_copyxxx() 23673 * directly from the mode argument of ioctl(). 23674 * 23675 * Return Code: returns the value from sd_get_media_info_com 23676 */ 23677 static int 23678 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23679 { 23680 struct dk_minfo_ext mie; 23681 int rval = 0; 23682 23683 rval = sd_get_media_info_com(dev, &mie.dki_media_type, 23684 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize); 23685 23686 if (rval) 23687 return (rval); 23688 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag)) 23689 rval = EFAULT; 23690 return (rval); 23691 23692 } 23693 23694 /* 23695 * Function: sd_watch_request_submit 23696 * 23697 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit 23698 * depending on which is supported by device. 23699 */ 23700 static opaque_t 23701 sd_watch_request_submit(struct sd_lun *un) 23702 { 23703 dev_t dev; 23704 23705 /* All submissions are unified to use same device number */ 23706 dev = sd_make_device(SD_DEVINFO(un)); 23707 23708 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 23709 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un), 23710 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23711 (caddr_t)dev)); 23712 } else { 23713 return (scsi_watch_request_submit(SD_SCSI_DEVP(un), 23714 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23715 (caddr_t)dev)); 23716 } 23717 } 23718 23719 23720 /* 23721 * Function: sd_check_media 23722 * 23723 * Description: This utility routine implements the functionality for the 23724 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23725 * driver state changes from that specified by the user 23726 * (inserted or ejected). For example, if the user specifies 23727 * DKIO_EJECTED and the current media state is inserted this 23728 * routine will immediately return DKIO_INSERTED. However, if the 23729 * current media state is not inserted the user thread will be 23730 * blocked until the drive state changes. If DKIO_NONE is specified 23731 * the user thread will block until a drive state change occurs. 23732 * 23733 * Arguments: dev - the device number 23734 * state - user pointer to a dkio_state, updated with the current 23735 * drive state at return. 23736 * 23737 * Return Code: ENXIO 23738 * EIO 23739 * EAGAIN 23740 * EINTR 23741 */ 23742 23743 static int 23744 sd_check_media(dev_t dev, enum dkio_state state) 23745 { 23746 struct sd_lun *un = NULL; 23747 enum dkio_state prev_state; 23748 opaque_t token = NULL; 23749 int rval = 0; 23750 sd_ssc_t *ssc; 23751 23752 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23753 return (ENXIO); 23754 } 23755 23756 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23757 23758 ssc = sd_ssc_init(un); 23759 23760 mutex_enter(SD_MUTEX(un)); 23761 23762 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23763 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23764 23765 prev_state = un->un_mediastate; 23766 23767 /* is there anything to do? */ 23768 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23769 /* 23770 * submit the request to the scsi_watch service; 23771 * scsi_media_watch_cb() does the real work 23772 */ 23773 mutex_exit(SD_MUTEX(un)); 23774 23775 /* 23776 * This change handles the case where a scsi watch request is 23777 * added to a device that is powered down. To accomplish this 23778 * we power up the device before adding the scsi watch request, 23779 * since the scsi watch sends a TUR directly to the device 23780 * which the device cannot handle if it is powered down. 23781 */ 23782 if (sd_pm_entry(un) != DDI_SUCCESS) { 23783 mutex_enter(SD_MUTEX(un)); 23784 goto done; 23785 } 23786 23787 token = sd_watch_request_submit(un); 23788 23789 sd_pm_exit(un); 23790 23791 mutex_enter(SD_MUTEX(un)); 23792 if (token == NULL) { 23793 rval = EAGAIN; 23794 goto done; 23795 } 23796 23797 /* 23798 * This is a special case IOCTL that doesn't return 23799 * until the media state changes. Routine sdpower 23800 * knows about and handles this so don't count it 23801 * as an active cmd in the driver, which would 23802 * keep the device busy to the pm framework. 23803 * If the count isn't decremented the device can't 23804 * be powered down. 23805 */ 23806 un->un_ncmds_in_driver--; 23807 ASSERT(un->un_ncmds_in_driver >= 0); 23808 23809 /* 23810 * if a prior request had been made, this will be the same 23811 * token, as scsi_watch was designed that way. 23812 */ 23813 un->un_swr_token = token; 23814 un->un_specified_mediastate = state; 23815 23816 /* 23817 * now wait for media change 23818 * we will not be signalled unless mediastate == state but it is 23819 * still better to test for this condition, since there is a 23820 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23821 */ 23822 SD_TRACE(SD_LOG_COMMON, un, 23823 "sd_check_media: waiting for media state change\n"); 23824 while (un->un_mediastate == state) { 23825 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23826 SD_TRACE(SD_LOG_COMMON, un, 23827 "sd_check_media: waiting for media state " 23828 "was interrupted\n"); 23829 un->un_ncmds_in_driver++; 23830 rval = EINTR; 23831 goto done; 23832 } 23833 SD_TRACE(SD_LOG_COMMON, un, 23834 "sd_check_media: received signal, state=%x\n", 23835 un->un_mediastate); 23836 } 23837 /* 23838 * Inc the counter to indicate the device once again 23839 * has an active outstanding cmd. 23840 */ 23841 un->un_ncmds_in_driver++; 23842 } 23843 23844 /* invalidate geometry */ 23845 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23846 sr_ejected(un); 23847 } 23848 23849 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23850 uint64_t capacity; 23851 uint_t lbasize; 23852 23853 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23854 mutex_exit(SD_MUTEX(un)); 23855 /* 23856 * Since the following routines use SD_PATH_DIRECT, we must 23857 * call PM directly before the upcoming disk accesses. This 23858 * may cause the disk to be power/spin up. 23859 */ 23860 23861 if (sd_pm_entry(un) == DDI_SUCCESS) { 23862 rval = sd_send_scsi_READ_CAPACITY(ssc, 23863 &capacity, &lbasize, SD_PATH_DIRECT); 23864 if (rval != 0) { 23865 sd_pm_exit(un); 23866 if (rval == EIO) 23867 sd_ssc_assessment(ssc, 23868 SD_FMT_STATUS_CHECK); 23869 else 23870 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23871 mutex_enter(SD_MUTEX(un)); 23872 goto done; 23873 } 23874 } else { 23875 rval = EIO; 23876 mutex_enter(SD_MUTEX(un)); 23877 goto done; 23878 } 23879 mutex_enter(SD_MUTEX(un)); 23880 23881 sd_update_block_info(un, lbasize, capacity); 23882 23883 /* 23884 * Check if the media in the device is writable or not 23885 */ 23886 if (ISCD(un)) { 23887 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23888 } 23889 23890 mutex_exit(SD_MUTEX(un)); 23891 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23892 if ((cmlb_validate(un->un_cmlbhandle, 0, 23893 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23894 sd_set_pstats(un); 23895 SD_TRACE(SD_LOG_IO_PARTITION, un, 23896 "sd_check_media: un:0x%p pstats created and " 23897 "set\n", un); 23898 } 23899 23900 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23901 SD_PATH_DIRECT); 23902 23903 sd_pm_exit(un); 23904 23905 if (rval != 0) { 23906 if (rval == EIO) 23907 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23908 else 23909 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23910 } 23911 23912 mutex_enter(SD_MUTEX(un)); 23913 } 23914 done: 23915 sd_ssc_fini(ssc); 23916 un->un_f_watcht_stopped = FALSE; 23917 if (token != NULL && un->un_swr_token != NULL) { 23918 /* 23919 * Use of this local token and the mutex ensures that we avoid 23920 * some race conditions associated with terminating the 23921 * scsi watch. 23922 */ 23923 token = un->un_swr_token; 23924 mutex_exit(SD_MUTEX(un)); 23925 (void) scsi_watch_request_terminate(token, 23926 SCSI_WATCH_TERMINATE_WAIT); 23927 if (scsi_watch_get_ref_count(token) == 0) { 23928 mutex_enter(SD_MUTEX(un)); 23929 un->un_swr_token = (opaque_t)NULL; 23930 } else { 23931 mutex_enter(SD_MUTEX(un)); 23932 } 23933 } 23934 23935 /* 23936 * Update the capacity kstat value, if no media previously 23937 * (capacity kstat is 0) and a media has been inserted 23938 * (un_f_blockcount_is_valid == TRUE) 23939 */ 23940 if (un->un_errstats) { 23941 struct sd_errstats *stp = NULL; 23942 23943 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23944 if ((stp->sd_capacity.value.ui64 == 0) && 23945 (un->un_f_blockcount_is_valid == TRUE)) { 23946 stp->sd_capacity.value.ui64 = 23947 (uint64_t)((uint64_t)un->un_blockcount * 23948 un->un_sys_blocksize); 23949 } 23950 } 23951 mutex_exit(SD_MUTEX(un)); 23952 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23953 return (rval); 23954 } 23955 23956 23957 /* 23958 * Function: sd_delayed_cv_broadcast 23959 * 23960 * Description: Delayed cv_broadcast to allow for target to recover from media 23961 * insertion. 23962 * 23963 * Arguments: arg - driver soft state (unit) structure 23964 */ 23965 23966 static void 23967 sd_delayed_cv_broadcast(void *arg) 23968 { 23969 struct sd_lun *un = arg; 23970 23971 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23972 23973 mutex_enter(SD_MUTEX(un)); 23974 un->un_dcvb_timeid = NULL; 23975 cv_broadcast(&un->un_state_cv); 23976 mutex_exit(SD_MUTEX(un)); 23977 } 23978 23979 23980 /* 23981 * Function: sd_media_watch_cb 23982 * 23983 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23984 * routine processes the TUR sense data and updates the driver 23985 * state if a transition has occurred. The user thread 23986 * (sd_check_media) is then signalled. 23987 * 23988 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23989 * among multiple watches that share this callback function 23990 * resultp - scsi watch facility result packet containing scsi 23991 * packet, status byte and sense data 23992 * 23993 * Return Code: 0 for success, -1 for failure 23994 */ 23995 23996 static int 23997 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23998 { 23999 struct sd_lun *un; 24000 struct scsi_status *statusp = resultp->statusp; 24001 uint8_t *sensep = (uint8_t *)resultp->sensep; 24002 enum dkio_state state = DKIO_NONE; 24003 dev_t dev = (dev_t)arg; 24004 uchar_t actual_sense_length; 24005 uint8_t skey, asc, ascq; 24006 24007 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24008 return (-1); 24009 } 24010 actual_sense_length = resultp->actual_sense_length; 24011 24012 mutex_enter(SD_MUTEX(un)); 24013 SD_TRACE(SD_LOG_COMMON, un, 24014 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 24015 *((char *)statusp), (void *)sensep, actual_sense_length); 24016 24017 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 24018 un->un_mediastate = DKIO_DEV_GONE; 24019 cv_broadcast(&un->un_state_cv); 24020 mutex_exit(SD_MUTEX(un)); 24021 24022 return (0); 24023 } 24024 24025 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 24026 if (sd_gesn_media_data_valid(resultp->mmc_data)) { 24027 if ((resultp->mmc_data[5] & 24028 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) { 24029 state = DKIO_INSERTED; 24030 } else { 24031 state = DKIO_EJECTED; 24032 } 24033 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) == 24034 SD_GESN_MEDIA_EVENT_EJECTREQUEST) { 24035 sd_log_eject_request_event(un, KM_NOSLEEP); 24036 } 24037 } 24038 } else if (sensep != NULL) { 24039 /* 24040 * If there was a check condition then sensep points to valid 24041 * sense data. If status was not a check condition but a 24042 * reservation or busy status then the new state is DKIO_NONE. 24043 */ 24044 skey = scsi_sense_key(sensep); 24045 asc = scsi_sense_asc(sensep); 24046 ascq = scsi_sense_ascq(sensep); 24047 24048 SD_INFO(SD_LOG_COMMON, un, 24049 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 24050 skey, asc, ascq); 24051 /* This routine only uses up to 13 bytes of sense data. */ 24052 if (actual_sense_length >= 13) { 24053 if (skey == KEY_UNIT_ATTENTION) { 24054 if (asc == 0x28) { 24055 state = DKIO_INSERTED; 24056 } 24057 } else if (skey == KEY_NOT_READY) { 24058 /* 24059 * Sense data of 02/06/00 means that the 24060 * drive could not read the media (No 24061 * reference position found). In this case 24062 * to prevent a hang on the DKIOCSTATE IOCTL 24063 * we set the media state to DKIO_INSERTED. 24064 */ 24065 if (asc == 0x06 && ascq == 0x00) 24066 state = DKIO_INSERTED; 24067 24068 /* 24069 * if 02/04/02 means that the host 24070 * should send start command. Explicitly 24071 * leave the media state as is 24072 * (inserted) as the media is inserted 24073 * and host has stopped device for PM 24074 * reasons. Upon next true read/write 24075 * to this media will bring the 24076 * device to the right state good for 24077 * media access. 24078 */ 24079 if (asc == 0x3a) { 24080 state = DKIO_EJECTED; 24081 } else { 24082 /* 24083 * If the drive is busy with an 24084 * operation or long write, keep the 24085 * media in an inserted state. 24086 */ 24087 24088 if ((asc == 0x04) && 24089 ((ascq == 0x02) || 24090 (ascq == 0x07) || 24091 (ascq == 0x08))) { 24092 state = DKIO_INSERTED; 24093 } 24094 } 24095 } else if (skey == KEY_NO_SENSE) { 24096 if ((asc == 0x00) && (ascq == 0x00)) { 24097 /* 24098 * Sense Data 00/00/00 does not provide 24099 * any information about the state of 24100 * the media. Ignore it. 24101 */ 24102 mutex_exit(SD_MUTEX(un)); 24103 return (0); 24104 } 24105 } 24106 } 24107 } else if ((*((char *)statusp) == STATUS_GOOD) && 24108 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 24109 state = DKIO_INSERTED; 24110 } 24111 24112 SD_TRACE(SD_LOG_COMMON, un, 24113 "sd_media_watch_cb: state=%x, specified=%x\n", 24114 state, un->un_specified_mediastate); 24115 24116 /* 24117 * now signal the waiting thread if this is *not* the specified state; 24118 * delay the signal if the state is DKIO_INSERTED to allow the target 24119 * to recover 24120 */ 24121 if (state != un->un_specified_mediastate) { 24122 un->un_mediastate = state; 24123 if (state == DKIO_INSERTED) { 24124 /* 24125 * delay the signal to give the drive a chance 24126 * to do what it apparently needs to do 24127 */ 24128 SD_TRACE(SD_LOG_COMMON, un, 24129 "sd_media_watch_cb: delayed cv_broadcast\n"); 24130 if (un->un_dcvb_timeid == NULL) { 24131 un->un_dcvb_timeid = 24132 timeout(sd_delayed_cv_broadcast, un, 24133 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 24134 } 24135 } else { 24136 SD_TRACE(SD_LOG_COMMON, un, 24137 "sd_media_watch_cb: immediate cv_broadcast\n"); 24138 cv_broadcast(&un->un_state_cv); 24139 } 24140 } 24141 mutex_exit(SD_MUTEX(un)); 24142 return (0); 24143 } 24144 24145 24146 /* 24147 * Function: sd_dkio_get_temp 24148 * 24149 * Description: This routine is the driver entry point for handling ioctl 24150 * requests to get the disk temperature. 24151 * 24152 * Arguments: dev - the device number 24153 * arg - pointer to user provided dk_temperature structure. 24154 * flag - this argument is a pass through to ddi_copyxxx() 24155 * directly from the mode argument of ioctl(). 24156 * 24157 * Return Code: 0 24158 * EFAULT 24159 * ENXIO 24160 * EAGAIN 24161 */ 24162 24163 static int 24164 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 24165 { 24166 struct sd_lun *un = NULL; 24167 struct dk_temperature *dktemp = NULL; 24168 uchar_t *temperature_page; 24169 int rval = 0; 24170 int path_flag = SD_PATH_STANDARD; 24171 sd_ssc_t *ssc; 24172 24173 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24174 return (ENXIO); 24175 } 24176 24177 ssc = sd_ssc_init(un); 24178 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 24179 24180 /* copyin the disk temp argument to get the user flags */ 24181 if (ddi_copyin((void *)arg, dktemp, 24182 sizeof (struct dk_temperature), flag) != 0) { 24183 rval = EFAULT; 24184 goto done; 24185 } 24186 24187 /* Initialize the temperature to invalid. */ 24188 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24189 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24190 24191 /* 24192 * Note: Investigate removing the "bypass pm" semantic. 24193 * Can we just bypass PM always? 24194 */ 24195 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 24196 path_flag = SD_PATH_DIRECT; 24197 ASSERT(!mutex_owned(&un->un_pm_mutex)); 24198 mutex_enter(&un->un_pm_mutex); 24199 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24200 /* 24201 * If DKT_BYPASS_PM is set, and the drive happens to be 24202 * in low power mode, we can not wake it up, Need to 24203 * return EAGAIN. 24204 */ 24205 mutex_exit(&un->un_pm_mutex); 24206 rval = EAGAIN; 24207 goto done; 24208 } else { 24209 /* 24210 * Indicate to PM the device is busy. This is required 24211 * to avoid a race - i.e. the ioctl is issuing a 24212 * command and the pm framework brings down the device 24213 * to low power mode (possible power cut-off on some 24214 * platforms). 24215 */ 24216 mutex_exit(&un->un_pm_mutex); 24217 if (sd_pm_entry(un) != DDI_SUCCESS) { 24218 rval = EAGAIN; 24219 goto done; 24220 } 24221 } 24222 } 24223 24224 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24225 24226 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 24227 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 24228 if (rval != 0) 24229 goto done2; 24230 24231 /* 24232 * For the current temperature verify that the parameter length is 0x02 24233 * and the parameter code is 0x00 24234 */ 24235 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24236 (temperature_page[5] == 0x00)) { 24237 if (temperature_page[9] == 0xFF) { 24238 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24239 } else { 24240 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24241 } 24242 } 24243 24244 /* 24245 * For the reference temperature verify that the parameter 24246 * length is 0x02 and the parameter code is 0x01 24247 */ 24248 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24249 (temperature_page[11] == 0x01)) { 24250 if (temperature_page[15] == 0xFF) { 24251 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24252 } else { 24253 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24254 } 24255 } 24256 24257 /* Do the copyout regardless of the temperature commands status. */ 24258 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24259 flag) != 0) { 24260 rval = EFAULT; 24261 goto done1; 24262 } 24263 24264 done2: 24265 if (rval != 0) { 24266 if (rval == EIO) 24267 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24268 else 24269 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24270 } 24271 done1: 24272 if (path_flag == SD_PATH_DIRECT) { 24273 sd_pm_exit(un); 24274 } 24275 24276 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24277 done: 24278 sd_ssc_fini(ssc); 24279 if (dktemp != NULL) { 24280 kmem_free(dktemp, sizeof (struct dk_temperature)); 24281 } 24282 24283 return (rval); 24284 } 24285 24286 24287 /* 24288 * Function: sd_log_page_supported 24289 * 24290 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24291 * supported log pages. 24292 * 24293 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 24294 * structure for this target. 24295 * log_page - 24296 * 24297 * Return Code: -1 - on error (log sense is optional and may not be supported). 24298 * 0 - log page not found. 24299 * 1 - log page found. 24300 */ 24301 24302 static int 24303 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 24304 { 24305 uchar_t *log_page_data; 24306 int i; 24307 int match = 0; 24308 int log_size; 24309 int status = 0; 24310 struct sd_lun *un; 24311 24312 ASSERT(ssc != NULL); 24313 un = ssc->ssc_un; 24314 ASSERT(un != NULL); 24315 24316 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24317 24318 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 24319 SD_PATH_DIRECT); 24320 24321 if (status != 0) { 24322 if (status == EIO) { 24323 /* 24324 * Some disks do not support log sense, we 24325 * should ignore this kind of error(sense key is 24326 * 0x5 - illegal request). 24327 */ 24328 uint8_t *sensep; 24329 int senlen; 24330 24331 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 24332 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 24333 ssc->ssc_uscsi_cmd->uscsi_rqresid); 24334 24335 if (senlen > 0 && 24336 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 24337 sd_ssc_assessment(ssc, 24338 SD_FMT_IGNORE_COMPROMISE); 24339 } else { 24340 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24341 } 24342 } else { 24343 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24344 } 24345 24346 SD_ERROR(SD_LOG_COMMON, un, 24347 "sd_log_page_supported: failed log page retrieval\n"); 24348 kmem_free(log_page_data, 0xFF); 24349 return (-1); 24350 } 24351 24352 log_size = log_page_data[3]; 24353 24354 /* 24355 * The list of supported log pages start from the fourth byte. Check 24356 * until we run out of log pages or a match is found. 24357 */ 24358 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24359 if (log_page_data[i] == log_page) { 24360 match++; 24361 } 24362 } 24363 kmem_free(log_page_data, 0xFF); 24364 return (match); 24365 } 24366 24367 24368 /* 24369 * Function: sd_mhdioc_failfast 24370 * 24371 * Description: This routine is the driver entry point for handling ioctl 24372 * requests to enable/disable the multihost failfast option. 24373 * (MHIOCENFAILFAST) 24374 * 24375 * Arguments: dev - the device number 24376 * arg - user specified probing interval. 24377 * flag - this argument is a pass through to ddi_copyxxx() 24378 * directly from the mode argument of ioctl(). 24379 * 24380 * Return Code: 0 24381 * EFAULT 24382 * ENXIO 24383 */ 24384 24385 static int 24386 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24387 { 24388 struct sd_lun *un = NULL; 24389 int mh_time; 24390 int rval = 0; 24391 24392 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24393 return (ENXIO); 24394 } 24395 24396 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24397 return (EFAULT); 24398 24399 if (mh_time) { 24400 mutex_enter(SD_MUTEX(un)); 24401 un->un_resvd_status |= SD_FAILFAST; 24402 mutex_exit(SD_MUTEX(un)); 24403 /* 24404 * If mh_time is INT_MAX, then this ioctl is being used for 24405 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24406 */ 24407 if (mh_time != INT_MAX) { 24408 rval = sd_check_mhd(dev, mh_time); 24409 } 24410 } else { 24411 (void) sd_check_mhd(dev, 0); 24412 mutex_enter(SD_MUTEX(un)); 24413 un->un_resvd_status &= ~SD_FAILFAST; 24414 mutex_exit(SD_MUTEX(un)); 24415 } 24416 return (rval); 24417 } 24418 24419 24420 /* 24421 * Function: sd_mhdioc_takeown 24422 * 24423 * Description: This routine is the driver entry point for handling ioctl 24424 * requests to forcefully acquire exclusive access rights to the 24425 * multihost disk (MHIOCTKOWN). 24426 * 24427 * Arguments: dev - the device number 24428 * arg - user provided structure specifying the delay 24429 * parameters in milliseconds 24430 * flag - this argument is a pass through to ddi_copyxxx() 24431 * directly from the mode argument of ioctl(). 24432 * 24433 * Return Code: 0 24434 * EFAULT 24435 * ENXIO 24436 */ 24437 24438 static int 24439 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24440 { 24441 struct sd_lun *un = NULL; 24442 struct mhioctkown *tkown = NULL; 24443 int rval = 0; 24444 24445 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24446 return (ENXIO); 24447 } 24448 24449 if (arg != NULL) { 24450 tkown = (struct mhioctkown *) 24451 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24452 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24453 if (rval != 0) { 24454 rval = EFAULT; 24455 goto error; 24456 } 24457 } 24458 24459 rval = sd_take_ownership(dev, tkown); 24460 mutex_enter(SD_MUTEX(un)); 24461 if (rval == 0) { 24462 un->un_resvd_status |= SD_RESERVE; 24463 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24464 sd_reinstate_resv_delay = 24465 tkown->reinstate_resv_delay * 1000; 24466 } else { 24467 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24468 } 24469 /* 24470 * Give the scsi_watch routine interval set by 24471 * the MHIOCENFAILFAST ioctl precedence here. 24472 */ 24473 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24474 mutex_exit(SD_MUTEX(un)); 24475 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24476 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24477 "sd_mhdioc_takeown : %d\n", 24478 sd_reinstate_resv_delay); 24479 } else { 24480 mutex_exit(SD_MUTEX(un)); 24481 } 24482 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24483 sd_mhd_reset_notify_cb, (caddr_t)un); 24484 } else { 24485 un->un_resvd_status &= ~SD_RESERVE; 24486 mutex_exit(SD_MUTEX(un)); 24487 } 24488 24489 error: 24490 if (tkown != NULL) { 24491 kmem_free(tkown, sizeof (struct mhioctkown)); 24492 } 24493 return (rval); 24494 } 24495 24496 24497 /* 24498 * Function: sd_mhdioc_release 24499 * 24500 * Description: This routine is the driver entry point for handling ioctl 24501 * requests to release exclusive access rights to the multihost 24502 * disk (MHIOCRELEASE). 24503 * 24504 * Arguments: dev - the device number 24505 * 24506 * Return Code: 0 24507 * ENXIO 24508 */ 24509 24510 static int 24511 sd_mhdioc_release(dev_t dev) 24512 { 24513 struct sd_lun *un = NULL; 24514 timeout_id_t resvd_timeid_save; 24515 int resvd_status_save; 24516 int rval = 0; 24517 24518 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24519 return (ENXIO); 24520 } 24521 24522 mutex_enter(SD_MUTEX(un)); 24523 resvd_status_save = un->un_resvd_status; 24524 un->un_resvd_status &= 24525 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24526 if (un->un_resvd_timeid) { 24527 resvd_timeid_save = un->un_resvd_timeid; 24528 un->un_resvd_timeid = NULL; 24529 mutex_exit(SD_MUTEX(un)); 24530 (void) untimeout(resvd_timeid_save); 24531 } else { 24532 mutex_exit(SD_MUTEX(un)); 24533 } 24534 24535 /* 24536 * destroy any pending timeout thread that may be attempting to 24537 * reinstate reservation on this device. 24538 */ 24539 sd_rmv_resv_reclaim_req(dev); 24540 24541 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24542 mutex_enter(SD_MUTEX(un)); 24543 if ((un->un_mhd_token) && 24544 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24545 mutex_exit(SD_MUTEX(un)); 24546 (void) sd_check_mhd(dev, 0); 24547 } else { 24548 mutex_exit(SD_MUTEX(un)); 24549 } 24550 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24551 sd_mhd_reset_notify_cb, (caddr_t)un); 24552 } else { 24553 /* 24554 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24555 */ 24556 mutex_enter(SD_MUTEX(un)); 24557 un->un_resvd_status = resvd_status_save; 24558 mutex_exit(SD_MUTEX(un)); 24559 } 24560 return (rval); 24561 } 24562 24563 24564 /* 24565 * Function: sd_mhdioc_register_devid 24566 * 24567 * Description: This routine is the driver entry point for handling ioctl 24568 * requests to register the device id (MHIOCREREGISTERDEVID). 24569 * 24570 * Note: The implementation for this ioctl has been updated to 24571 * be consistent with the original PSARC case (1999/357) 24572 * (4375899, 4241671, 4220005) 24573 * 24574 * Arguments: dev - the device number 24575 * 24576 * Return Code: 0 24577 * ENXIO 24578 */ 24579 24580 static int 24581 sd_mhdioc_register_devid(dev_t dev) 24582 { 24583 struct sd_lun *un = NULL; 24584 int rval = 0; 24585 sd_ssc_t *ssc; 24586 24587 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24588 return (ENXIO); 24589 } 24590 24591 ASSERT(!mutex_owned(SD_MUTEX(un))); 24592 24593 mutex_enter(SD_MUTEX(un)); 24594 24595 /* If a devid already exists, de-register it */ 24596 if (un->un_devid != NULL) { 24597 ddi_devid_unregister(SD_DEVINFO(un)); 24598 /* 24599 * After unregister devid, needs to free devid memory 24600 */ 24601 ddi_devid_free(un->un_devid); 24602 un->un_devid = NULL; 24603 } 24604 24605 /* Check for reservation conflict */ 24606 mutex_exit(SD_MUTEX(un)); 24607 ssc = sd_ssc_init(un); 24608 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24609 mutex_enter(SD_MUTEX(un)); 24610 24611 switch (rval) { 24612 case 0: 24613 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24614 break; 24615 case EACCES: 24616 break; 24617 default: 24618 rval = EIO; 24619 } 24620 24621 mutex_exit(SD_MUTEX(un)); 24622 if (rval != 0) { 24623 if (rval == EIO) 24624 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24625 else 24626 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24627 } 24628 sd_ssc_fini(ssc); 24629 return (rval); 24630 } 24631 24632 24633 /* 24634 * Function: sd_mhdioc_inkeys 24635 * 24636 * Description: This routine is the driver entry point for handling ioctl 24637 * requests to issue the SCSI-3 Persistent In Read Keys command 24638 * to the device (MHIOCGRP_INKEYS). 24639 * 24640 * Arguments: dev - the device number 24641 * arg - user provided in_keys structure 24642 * flag - this argument is a pass through to ddi_copyxxx() 24643 * directly from the mode argument of ioctl(). 24644 * 24645 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24646 * ENXIO 24647 * EFAULT 24648 */ 24649 24650 static int 24651 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24652 { 24653 struct sd_lun *un; 24654 mhioc_inkeys_t inkeys; 24655 int rval = 0; 24656 24657 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24658 return (ENXIO); 24659 } 24660 24661 #ifdef _MULTI_DATAMODEL 24662 switch (ddi_model_convert_from(flag & FMODELS)) { 24663 case DDI_MODEL_ILP32: { 24664 struct mhioc_inkeys32 inkeys32; 24665 24666 if (ddi_copyin(arg, &inkeys32, 24667 sizeof (struct mhioc_inkeys32), flag) != 0) { 24668 return (EFAULT); 24669 } 24670 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24671 if ((rval = sd_persistent_reservation_in_read_keys(un, 24672 &inkeys, flag)) != 0) { 24673 return (rval); 24674 } 24675 inkeys32.generation = inkeys.generation; 24676 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24677 flag) != 0) { 24678 return (EFAULT); 24679 } 24680 break; 24681 } 24682 case DDI_MODEL_NONE: 24683 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24684 flag) != 0) { 24685 return (EFAULT); 24686 } 24687 if ((rval = sd_persistent_reservation_in_read_keys(un, 24688 &inkeys, flag)) != 0) { 24689 return (rval); 24690 } 24691 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24692 flag) != 0) { 24693 return (EFAULT); 24694 } 24695 break; 24696 } 24697 24698 #else /* ! _MULTI_DATAMODEL */ 24699 24700 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24701 return (EFAULT); 24702 } 24703 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24704 if (rval != 0) { 24705 return (rval); 24706 } 24707 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24708 return (EFAULT); 24709 } 24710 24711 #endif /* _MULTI_DATAMODEL */ 24712 24713 return (rval); 24714 } 24715 24716 24717 /* 24718 * Function: sd_mhdioc_inresv 24719 * 24720 * Description: This routine is the driver entry point for handling ioctl 24721 * requests to issue the SCSI-3 Persistent In Read Reservations 24722 * command to the device (MHIOCGRP_INKEYS). 24723 * 24724 * Arguments: dev - the device number 24725 * arg - user provided in_resv structure 24726 * flag - this argument is a pass through to ddi_copyxxx() 24727 * directly from the mode argument of ioctl(). 24728 * 24729 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24730 * ENXIO 24731 * EFAULT 24732 */ 24733 24734 static int 24735 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24736 { 24737 struct sd_lun *un; 24738 mhioc_inresvs_t inresvs; 24739 int rval = 0; 24740 24741 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24742 return (ENXIO); 24743 } 24744 24745 #ifdef _MULTI_DATAMODEL 24746 24747 switch (ddi_model_convert_from(flag & FMODELS)) { 24748 case DDI_MODEL_ILP32: { 24749 struct mhioc_inresvs32 inresvs32; 24750 24751 if (ddi_copyin(arg, &inresvs32, 24752 sizeof (struct mhioc_inresvs32), flag) != 0) { 24753 return (EFAULT); 24754 } 24755 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24756 if ((rval = sd_persistent_reservation_in_read_resv(un, 24757 &inresvs, flag)) != 0) { 24758 return (rval); 24759 } 24760 inresvs32.generation = inresvs.generation; 24761 if (ddi_copyout(&inresvs32, arg, 24762 sizeof (struct mhioc_inresvs32), flag) != 0) { 24763 return (EFAULT); 24764 } 24765 break; 24766 } 24767 case DDI_MODEL_NONE: 24768 if (ddi_copyin(arg, &inresvs, 24769 sizeof (mhioc_inresvs_t), flag) != 0) { 24770 return (EFAULT); 24771 } 24772 if ((rval = sd_persistent_reservation_in_read_resv(un, 24773 &inresvs, flag)) != 0) { 24774 return (rval); 24775 } 24776 if (ddi_copyout(&inresvs, arg, 24777 sizeof (mhioc_inresvs_t), flag) != 0) { 24778 return (EFAULT); 24779 } 24780 break; 24781 } 24782 24783 #else /* ! _MULTI_DATAMODEL */ 24784 24785 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24786 return (EFAULT); 24787 } 24788 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24789 if (rval != 0) { 24790 return (rval); 24791 } 24792 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24793 return (EFAULT); 24794 } 24795 24796 #endif /* ! _MULTI_DATAMODEL */ 24797 24798 return (rval); 24799 } 24800 24801 24802 /* 24803 * The following routines support the clustering functionality described below 24804 * and implement lost reservation reclaim functionality. 24805 * 24806 * Clustering 24807 * ---------- 24808 * The clustering code uses two different, independent forms of SCSI 24809 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24810 * Persistent Group Reservations. For any particular disk, it will use either 24811 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24812 * 24813 * SCSI-2 24814 * The cluster software takes ownership of a multi-hosted disk by issuing the 24815 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24816 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24817 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24818 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24819 * driver. The meaning of failfast is that if the driver (on this host) ever 24820 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24821 * it should immediately panic the host. The motivation for this ioctl is that 24822 * if this host does encounter reservation conflict, the underlying cause is 24823 * that some other host of the cluster has decided that this host is no longer 24824 * in the cluster and has seized control of the disks for itself. Since this 24825 * host is no longer in the cluster, it ought to panic itself. The 24826 * MHIOCENFAILFAST ioctl does two things: 24827 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24828 * error to panic the host 24829 * (b) it sets up a periodic timer to test whether this host still has 24830 * "access" (in that no other host has reserved the device): if the 24831 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24832 * purpose of that periodic timer is to handle scenarios where the host is 24833 * otherwise temporarily quiescent, temporarily doing no real i/o. 24834 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24835 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24836 * the device itself. 24837 * 24838 * SCSI-3 PGR 24839 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24840 * facility is supported through the shared multihost disk ioctls 24841 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24842 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR) 24843 * 24844 * Reservation Reclaim: 24845 * -------------------- 24846 * To support the lost reservation reclaim operations this driver creates a 24847 * single thread to handle reinstating reservations on all devices that have 24848 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24849 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24850 * and the reservation reclaim thread loops through the requests to regain the 24851 * lost reservations. 24852 */ 24853 24854 /* 24855 * Function: sd_check_mhd() 24856 * 24857 * Description: This function sets up and submits a scsi watch request or 24858 * terminates an existing watch request. This routine is used in 24859 * support of reservation reclaim. 24860 * 24861 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24862 * among multiple watches that share the callback function 24863 * interval - the number of microseconds specifying the watch 24864 * interval for issuing TEST UNIT READY commands. If 24865 * set to 0 the watch should be terminated. If the 24866 * interval is set to 0 and if the device is required 24867 * to hold reservation while disabling failfast, the 24868 * watch is restarted with an interval of 24869 * reinstate_resv_delay. 24870 * 24871 * Return Code: 0 - Successful submit/terminate of scsi watch request 24872 * ENXIO - Indicates an invalid device was specified 24873 * EAGAIN - Unable to submit the scsi watch request 24874 */ 24875 24876 static int 24877 sd_check_mhd(dev_t dev, int interval) 24878 { 24879 struct sd_lun *un; 24880 opaque_t token; 24881 24882 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24883 return (ENXIO); 24884 } 24885 24886 /* is this a watch termination request? */ 24887 if (interval == 0) { 24888 mutex_enter(SD_MUTEX(un)); 24889 /* if there is an existing watch task then terminate it */ 24890 if (un->un_mhd_token) { 24891 token = un->un_mhd_token; 24892 un->un_mhd_token = NULL; 24893 mutex_exit(SD_MUTEX(un)); 24894 (void) scsi_watch_request_terminate(token, 24895 SCSI_WATCH_TERMINATE_ALL_WAIT); 24896 mutex_enter(SD_MUTEX(un)); 24897 } else { 24898 mutex_exit(SD_MUTEX(un)); 24899 /* 24900 * Note: If we return here we don't check for the 24901 * failfast case. This is the original legacy 24902 * implementation but perhaps we should be checking 24903 * the failfast case. 24904 */ 24905 return (0); 24906 } 24907 /* 24908 * If the device is required to hold reservation while 24909 * disabling failfast, we need to restart the scsi_watch 24910 * routine with an interval of reinstate_resv_delay. 24911 */ 24912 if (un->un_resvd_status & SD_RESERVE) { 24913 interval = sd_reinstate_resv_delay/1000; 24914 } else { 24915 /* no failfast so bail */ 24916 mutex_exit(SD_MUTEX(un)); 24917 return (0); 24918 } 24919 mutex_exit(SD_MUTEX(un)); 24920 } 24921 24922 /* 24923 * adjust minimum time interval to 1 second, 24924 * and convert from msecs to usecs 24925 */ 24926 if (interval > 0 && interval < 1000) { 24927 interval = 1000; 24928 } 24929 interval *= 1000; 24930 24931 /* 24932 * submit the request to the scsi_watch service 24933 */ 24934 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24935 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24936 if (token == NULL) { 24937 return (EAGAIN); 24938 } 24939 24940 /* 24941 * save token for termination later on 24942 */ 24943 mutex_enter(SD_MUTEX(un)); 24944 un->un_mhd_token = token; 24945 mutex_exit(SD_MUTEX(un)); 24946 return (0); 24947 } 24948 24949 24950 /* 24951 * Function: sd_mhd_watch_cb() 24952 * 24953 * Description: This function is the call back function used by the scsi watch 24954 * facility. The scsi watch facility sends the "Test Unit Ready" 24955 * and processes the status. If applicable (i.e. a "Unit Attention" 24956 * status and automatic "Request Sense" not used) the scsi watch 24957 * facility will send a "Request Sense" and retrieve the sense data 24958 * to be passed to this callback function. In either case the 24959 * automatic "Request Sense" or the facility submitting one, this 24960 * callback is passed the status and sense data. 24961 * 24962 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24963 * among multiple watches that share this callback function 24964 * resultp - scsi watch facility result packet containing scsi 24965 * packet, status byte and sense data 24966 * 24967 * Return Code: 0 - continue the watch task 24968 * non-zero - terminate the watch task 24969 */ 24970 24971 static int 24972 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24973 { 24974 struct sd_lun *un; 24975 struct scsi_status *statusp; 24976 uint8_t *sensep; 24977 struct scsi_pkt *pkt; 24978 uchar_t actual_sense_length; 24979 dev_t dev = (dev_t)arg; 24980 24981 ASSERT(resultp != NULL); 24982 statusp = resultp->statusp; 24983 sensep = (uint8_t *)resultp->sensep; 24984 pkt = resultp->pkt; 24985 actual_sense_length = resultp->actual_sense_length; 24986 24987 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24988 return (ENXIO); 24989 } 24990 24991 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24992 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24993 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24994 24995 /* Begin processing of the status and/or sense data */ 24996 if (pkt->pkt_reason != CMD_CMPLT) { 24997 /* Handle the incomplete packet */ 24998 sd_mhd_watch_incomplete(un, pkt); 24999 return (0); 25000 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 25001 if (*((unsigned char *)statusp) 25002 == STATUS_RESERVATION_CONFLICT) { 25003 /* 25004 * Handle a reservation conflict by panicking if 25005 * configured for failfast or by logging the conflict 25006 * and updating the reservation status 25007 */ 25008 mutex_enter(SD_MUTEX(un)); 25009 if ((un->un_resvd_status & SD_FAILFAST) && 25010 (sd_failfast_enable)) { 25011 sd_panic_for_res_conflict(un); 25012 /*NOTREACHED*/ 25013 } 25014 SD_INFO(SD_LOG_IOCTL_MHD, un, 25015 "sd_mhd_watch_cb: Reservation Conflict\n"); 25016 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 25017 mutex_exit(SD_MUTEX(un)); 25018 } 25019 } 25020 25021 if (sensep != NULL) { 25022 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 25023 mutex_enter(SD_MUTEX(un)); 25024 if ((scsi_sense_asc(sensep) == 25025 SD_SCSI_RESET_SENSE_CODE) && 25026 (un->un_resvd_status & SD_RESERVE)) { 25027 /* 25028 * The additional sense code indicates a power 25029 * on or bus device reset has occurred; update 25030 * the reservation status. 25031 */ 25032 un->un_resvd_status |= 25033 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25034 SD_INFO(SD_LOG_IOCTL_MHD, un, 25035 "sd_mhd_watch_cb: Lost Reservation\n"); 25036 } 25037 } else { 25038 return (0); 25039 } 25040 } else { 25041 mutex_enter(SD_MUTEX(un)); 25042 } 25043 25044 if ((un->un_resvd_status & SD_RESERVE) && 25045 (un->un_resvd_status & SD_LOST_RESERVE)) { 25046 if (un->un_resvd_status & SD_WANT_RESERVE) { 25047 /* 25048 * A reset occurred in between the last probe and this 25049 * one so if a timeout is pending cancel it. 25050 */ 25051 if (un->un_resvd_timeid) { 25052 timeout_id_t temp_id = un->un_resvd_timeid; 25053 un->un_resvd_timeid = NULL; 25054 mutex_exit(SD_MUTEX(un)); 25055 (void) untimeout(temp_id); 25056 mutex_enter(SD_MUTEX(un)); 25057 } 25058 un->un_resvd_status &= ~SD_WANT_RESERVE; 25059 } 25060 if (un->un_resvd_timeid == 0) { 25061 /* Schedule a timeout to handle the lost reservation */ 25062 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 25063 (void *)dev, 25064 drv_usectohz(sd_reinstate_resv_delay)); 25065 } 25066 } 25067 mutex_exit(SD_MUTEX(un)); 25068 return (0); 25069 } 25070 25071 25072 /* 25073 * Function: sd_mhd_watch_incomplete() 25074 * 25075 * Description: This function is used to find out why a scsi pkt sent by the 25076 * scsi watch facility was not completed. Under some scenarios this 25077 * routine will return. Otherwise it will send a bus reset to see 25078 * if the drive is still online. 25079 * 25080 * Arguments: un - driver soft state (unit) structure 25081 * pkt - incomplete scsi pkt 25082 */ 25083 25084 static void 25085 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 25086 { 25087 int be_chatty; 25088 int perr; 25089 25090 ASSERT(pkt != NULL); 25091 ASSERT(un != NULL); 25092 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 25093 perr = (pkt->pkt_statistics & STAT_PERR); 25094 25095 mutex_enter(SD_MUTEX(un)); 25096 if (un->un_state == SD_STATE_DUMPING) { 25097 mutex_exit(SD_MUTEX(un)); 25098 return; 25099 } 25100 25101 switch (pkt->pkt_reason) { 25102 case CMD_UNX_BUS_FREE: 25103 /* 25104 * If we had a parity error that caused the target to drop BSY*, 25105 * don't be chatty about it. 25106 */ 25107 if (perr && be_chatty) { 25108 be_chatty = 0; 25109 } 25110 break; 25111 case CMD_TAG_REJECT: 25112 /* 25113 * The SCSI-2 spec states that a tag reject will be sent by the 25114 * target if tagged queuing is not supported. A tag reject may 25115 * also be sent during certain initialization periods or to 25116 * control internal resources. For the latter case the target 25117 * may also return Queue Full. 25118 * 25119 * If this driver receives a tag reject from a target that is 25120 * going through an init period or controlling internal 25121 * resources tagged queuing will be disabled. This is a less 25122 * than optimal behavior but the driver is unable to determine 25123 * the target state and assumes tagged queueing is not supported 25124 */ 25125 pkt->pkt_flags = 0; 25126 un->un_tagflags = 0; 25127 25128 if (un->un_f_opt_queueing == TRUE) { 25129 un->un_throttle = min(un->un_throttle, 3); 25130 } else { 25131 un->un_throttle = 1; 25132 } 25133 mutex_exit(SD_MUTEX(un)); 25134 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 25135 mutex_enter(SD_MUTEX(un)); 25136 break; 25137 case CMD_INCOMPLETE: 25138 /* 25139 * The transport stopped with an abnormal state, fallthrough and 25140 * reset the target and/or bus unless selection did not complete 25141 * (indicated by STATE_GOT_BUS) in which case we don't want to 25142 * go through a target/bus reset 25143 */ 25144 if (pkt->pkt_state == STATE_GOT_BUS) { 25145 break; 25146 } 25147 /*FALLTHROUGH*/ 25148 25149 case CMD_TIMEOUT: 25150 default: 25151 /* 25152 * The lun may still be running the command, so a lun reset 25153 * should be attempted. If the lun reset fails or cannot be 25154 * issued, than try a target reset. Lastly try a bus reset. 25155 */ 25156 if ((pkt->pkt_statistics & 25157 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 25158 int reset_retval = 0; 25159 mutex_exit(SD_MUTEX(un)); 25160 if (un->un_f_allow_bus_device_reset == TRUE) { 25161 if (un->un_f_lun_reset_enabled == TRUE) { 25162 reset_retval = 25163 scsi_reset(SD_ADDRESS(un), 25164 RESET_LUN); 25165 } 25166 if (reset_retval == 0) { 25167 reset_retval = 25168 scsi_reset(SD_ADDRESS(un), 25169 RESET_TARGET); 25170 } 25171 } 25172 if (reset_retval == 0) { 25173 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25174 } 25175 mutex_enter(SD_MUTEX(un)); 25176 } 25177 break; 25178 } 25179 25180 /* A device/bus reset has occurred; update the reservation status. */ 25181 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 25182 (STAT_BUS_RESET | STAT_DEV_RESET))) { 25183 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25184 un->un_resvd_status |= 25185 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25186 SD_INFO(SD_LOG_IOCTL_MHD, un, 25187 "sd_mhd_watch_incomplete: Lost Reservation\n"); 25188 } 25189 } 25190 25191 /* 25192 * The disk has been turned off; Update the device state. 25193 * 25194 * Note: Should we be offlining the disk here? 25195 */ 25196 if (pkt->pkt_state == STATE_GOT_BUS) { 25197 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 25198 "Disk not responding to selection\n"); 25199 if (un->un_state != SD_STATE_OFFLINE) { 25200 New_state(un, SD_STATE_OFFLINE); 25201 } 25202 } else if (be_chatty) { 25203 /* 25204 * suppress messages if they are all the same pkt reason; 25205 * with TQ, many (up to 256) are returned with the same 25206 * pkt_reason 25207 */ 25208 if (pkt->pkt_reason != un->un_last_pkt_reason) { 25209 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25210 "sd_mhd_watch_incomplete: " 25211 "SCSI transport failed: reason '%s'\n", 25212 scsi_rname(pkt->pkt_reason)); 25213 } 25214 } 25215 un->un_last_pkt_reason = pkt->pkt_reason; 25216 mutex_exit(SD_MUTEX(un)); 25217 } 25218 25219 25220 /* 25221 * Function: sd_sname() 25222 * 25223 * Description: This is a simple little routine to return a string containing 25224 * a printable description of command status byte for use in 25225 * logging. 25226 * 25227 * Arguments: status - pointer to a status byte 25228 * 25229 * Return Code: char * - string containing status description. 25230 */ 25231 25232 static char * 25233 sd_sname(uchar_t status) 25234 { 25235 switch (status & STATUS_MASK) { 25236 case STATUS_GOOD: 25237 return ("good status"); 25238 case STATUS_CHECK: 25239 return ("check condition"); 25240 case STATUS_MET: 25241 return ("condition met"); 25242 case STATUS_BUSY: 25243 return ("busy"); 25244 case STATUS_INTERMEDIATE: 25245 return ("intermediate"); 25246 case STATUS_INTERMEDIATE_MET: 25247 return ("intermediate - condition met"); 25248 case STATUS_RESERVATION_CONFLICT: 25249 return ("reservation_conflict"); 25250 case STATUS_TERMINATED: 25251 return ("command terminated"); 25252 case STATUS_QFULL: 25253 return ("queue full"); 25254 default: 25255 return ("<unknown status>"); 25256 } 25257 } 25258 25259 25260 /* 25261 * Function: sd_mhd_resvd_recover() 25262 * 25263 * Description: This function adds a reservation entry to the 25264 * sd_resv_reclaim_request list and signals the reservation 25265 * reclaim thread that there is work pending. If the reservation 25266 * reclaim thread has not been previously created this function 25267 * will kick it off. 25268 * 25269 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25270 * among multiple watches that share this callback function 25271 * 25272 * Context: This routine is called by timeout() and is run in interrupt 25273 * context. It must not sleep or call other functions which may 25274 * sleep. 25275 */ 25276 25277 static void 25278 sd_mhd_resvd_recover(void *arg) 25279 { 25280 dev_t dev = (dev_t)arg; 25281 struct sd_lun *un; 25282 struct sd_thr_request *sd_treq = NULL; 25283 struct sd_thr_request *sd_cur = NULL; 25284 struct sd_thr_request *sd_prev = NULL; 25285 int already_there = 0; 25286 25287 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25288 return; 25289 } 25290 25291 mutex_enter(SD_MUTEX(un)); 25292 un->un_resvd_timeid = NULL; 25293 if (un->un_resvd_status & SD_WANT_RESERVE) { 25294 /* 25295 * There was a reset so don't issue the reserve, allow the 25296 * sd_mhd_watch_cb callback function to notice this and 25297 * reschedule the timeout for reservation. 25298 */ 25299 mutex_exit(SD_MUTEX(un)); 25300 return; 25301 } 25302 mutex_exit(SD_MUTEX(un)); 25303 25304 /* 25305 * Add this device to the sd_resv_reclaim_request list and the 25306 * sd_resv_reclaim_thread should take care of the rest. 25307 * 25308 * Note: We can't sleep in this context so if the memory allocation 25309 * fails allow the sd_mhd_watch_cb callback function to notice this and 25310 * reschedule the timeout for reservation. (4378460) 25311 */ 25312 sd_treq = (struct sd_thr_request *) 25313 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25314 if (sd_treq == NULL) { 25315 return; 25316 } 25317 25318 sd_treq->sd_thr_req_next = NULL; 25319 sd_treq->dev = dev; 25320 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25321 if (sd_tr.srq_thr_req_head == NULL) { 25322 sd_tr.srq_thr_req_head = sd_treq; 25323 } else { 25324 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25325 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25326 if (sd_cur->dev == dev) { 25327 /* 25328 * already in Queue so don't log 25329 * another request for the device 25330 */ 25331 already_there = 1; 25332 break; 25333 } 25334 sd_prev = sd_cur; 25335 } 25336 if (!already_there) { 25337 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25338 "logging request for %lx\n", dev); 25339 sd_prev->sd_thr_req_next = sd_treq; 25340 } else { 25341 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25342 } 25343 } 25344 25345 /* 25346 * Create a kernel thread to do the reservation reclaim and free up this 25347 * thread. We cannot block this thread while we go away to do the 25348 * reservation reclaim 25349 */ 25350 if (sd_tr.srq_resv_reclaim_thread == NULL) 25351 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25352 sd_resv_reclaim_thread, NULL, 25353 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25354 25355 /* Tell the reservation reclaim thread that it has work to do */ 25356 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25357 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25358 } 25359 25360 /* 25361 * Function: sd_resv_reclaim_thread() 25362 * 25363 * Description: This function implements the reservation reclaim operations 25364 * 25365 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25366 * among multiple watches that share this callback function 25367 */ 25368 25369 static void 25370 sd_resv_reclaim_thread() 25371 { 25372 struct sd_lun *un; 25373 struct sd_thr_request *sd_mhreq; 25374 25375 /* Wait for work */ 25376 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25377 if (sd_tr.srq_thr_req_head == NULL) { 25378 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25379 &sd_tr.srq_resv_reclaim_mutex); 25380 } 25381 25382 /* Loop while we have work */ 25383 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25384 un = ddi_get_soft_state(sd_state, 25385 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25386 if (un == NULL) { 25387 /* 25388 * softstate structure is NULL so just 25389 * dequeue the request and continue 25390 */ 25391 sd_tr.srq_thr_req_head = 25392 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25393 kmem_free(sd_tr.srq_thr_cur_req, 25394 sizeof (struct sd_thr_request)); 25395 continue; 25396 } 25397 25398 /* dequeue the request */ 25399 sd_mhreq = sd_tr.srq_thr_cur_req; 25400 sd_tr.srq_thr_req_head = 25401 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25402 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25403 25404 /* 25405 * Reclaim reservation only if SD_RESERVE is still set. There 25406 * may have been a call to MHIOCRELEASE before we got here. 25407 */ 25408 mutex_enter(SD_MUTEX(un)); 25409 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25410 /* 25411 * Note: The SD_LOST_RESERVE flag is cleared before 25412 * reclaiming the reservation. If this is done after the 25413 * call to sd_reserve_release a reservation loss in the 25414 * window between pkt completion of reserve cmd and 25415 * mutex_enter below may not be recognized 25416 */ 25417 un->un_resvd_status &= ~SD_LOST_RESERVE; 25418 mutex_exit(SD_MUTEX(un)); 25419 25420 if (sd_reserve_release(sd_mhreq->dev, 25421 SD_RESERVE) == 0) { 25422 mutex_enter(SD_MUTEX(un)); 25423 un->un_resvd_status |= SD_RESERVE; 25424 mutex_exit(SD_MUTEX(un)); 25425 SD_INFO(SD_LOG_IOCTL_MHD, un, 25426 "sd_resv_reclaim_thread: " 25427 "Reservation Recovered\n"); 25428 } else { 25429 mutex_enter(SD_MUTEX(un)); 25430 un->un_resvd_status |= SD_LOST_RESERVE; 25431 mutex_exit(SD_MUTEX(un)); 25432 SD_INFO(SD_LOG_IOCTL_MHD, un, 25433 "sd_resv_reclaim_thread: Failed " 25434 "Reservation Recovery\n"); 25435 } 25436 } else { 25437 mutex_exit(SD_MUTEX(un)); 25438 } 25439 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25440 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25441 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25442 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25443 /* 25444 * wakeup the destroy thread if anyone is waiting on 25445 * us to complete. 25446 */ 25447 cv_signal(&sd_tr.srq_inprocess_cv); 25448 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25449 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25450 } 25451 25452 /* 25453 * cleanup the sd_tr structure now that this thread will not exist 25454 */ 25455 ASSERT(sd_tr.srq_thr_req_head == NULL); 25456 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25457 sd_tr.srq_resv_reclaim_thread = NULL; 25458 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25459 thread_exit(); 25460 } 25461 25462 25463 /* 25464 * Function: sd_rmv_resv_reclaim_req() 25465 * 25466 * Description: This function removes any pending reservation reclaim requests 25467 * for the specified device. 25468 * 25469 * Arguments: dev - the device 'dev_t' 25470 */ 25471 25472 static void 25473 sd_rmv_resv_reclaim_req(dev_t dev) 25474 { 25475 struct sd_thr_request *sd_mhreq; 25476 struct sd_thr_request *sd_prev; 25477 25478 /* Remove a reservation reclaim request from the list */ 25479 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25480 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25481 /* 25482 * We are attempting to reinstate reservation for 25483 * this device. We wait for sd_reserve_release() 25484 * to return before we return. 25485 */ 25486 cv_wait(&sd_tr.srq_inprocess_cv, 25487 &sd_tr.srq_resv_reclaim_mutex); 25488 } else { 25489 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25490 if (sd_mhreq && sd_mhreq->dev == dev) { 25491 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25492 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25493 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25494 return; 25495 } 25496 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25497 if (sd_mhreq && sd_mhreq->dev == dev) { 25498 break; 25499 } 25500 sd_prev = sd_mhreq; 25501 } 25502 if (sd_mhreq != NULL) { 25503 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25504 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25505 } 25506 } 25507 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25508 } 25509 25510 25511 /* 25512 * Function: sd_mhd_reset_notify_cb() 25513 * 25514 * Description: This is a call back function for scsi_reset_notify. This 25515 * function updates the softstate reserved status and logs the 25516 * reset. The driver scsi watch facility callback function 25517 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25518 * will reclaim the reservation. 25519 * 25520 * Arguments: arg - driver soft state (unit) structure 25521 */ 25522 25523 static void 25524 sd_mhd_reset_notify_cb(caddr_t arg) 25525 { 25526 struct sd_lun *un = (struct sd_lun *)arg; 25527 25528 mutex_enter(SD_MUTEX(un)); 25529 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25530 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25531 SD_INFO(SD_LOG_IOCTL_MHD, un, 25532 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25533 } 25534 mutex_exit(SD_MUTEX(un)); 25535 } 25536 25537 25538 /* 25539 * Function: sd_take_ownership() 25540 * 25541 * Description: This routine implements an algorithm to achieve a stable 25542 * reservation on disks which don't implement priority reserve, 25543 * and makes sure that other host lose re-reservation attempts. 25544 * This algorithm contains of a loop that keeps issuing the RESERVE 25545 * for some period of time (min_ownership_delay, default 6 seconds) 25546 * During that loop, it looks to see if there has been a bus device 25547 * reset or bus reset (both of which cause an existing reservation 25548 * to be lost). If the reservation is lost issue RESERVE until a 25549 * period of min_ownership_delay with no resets has gone by, or 25550 * until max_ownership_delay has expired. This loop ensures that 25551 * the host really did manage to reserve the device, in spite of 25552 * resets. The looping for min_ownership_delay (default six 25553 * seconds) is important to early generation clustering products, 25554 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25555 * MHIOCENFAILFAST periodic timer of two seconds. By having 25556 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25557 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25558 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25559 * have already noticed, via the MHIOCENFAILFAST polling, that it 25560 * no longer "owns" the disk and will have panicked itself. Thus, 25561 * the host issuing the MHIOCTKOWN is assured (with timing 25562 * dependencies) that by the time it actually starts to use the 25563 * disk for real work, the old owner is no longer accessing it. 25564 * 25565 * min_ownership_delay is the minimum amount of time for which the 25566 * disk must be reserved continuously devoid of resets before the 25567 * MHIOCTKOWN ioctl will return success. 25568 * 25569 * max_ownership_delay indicates the amount of time by which the 25570 * take ownership should succeed or timeout with an error. 25571 * 25572 * Arguments: dev - the device 'dev_t' 25573 * *p - struct containing timing info. 25574 * 25575 * Return Code: 0 for success or error code 25576 */ 25577 25578 static int 25579 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25580 { 25581 struct sd_lun *un; 25582 int rval; 25583 int err; 25584 int reservation_count = 0; 25585 int min_ownership_delay = 6000000; /* in usec */ 25586 int max_ownership_delay = 30000000; /* in usec */ 25587 clock_t start_time; /* starting time of this algorithm */ 25588 clock_t end_time; /* time limit for giving up */ 25589 clock_t ownership_time; /* time limit for stable ownership */ 25590 clock_t current_time; 25591 clock_t previous_current_time; 25592 25593 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25594 return (ENXIO); 25595 } 25596 25597 /* 25598 * Attempt a device reservation. A priority reservation is requested. 25599 */ 25600 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25601 != SD_SUCCESS) { 25602 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25603 "sd_take_ownership: return(1)=%d\n", rval); 25604 return (rval); 25605 } 25606 25607 /* Update the softstate reserved status to indicate the reservation */ 25608 mutex_enter(SD_MUTEX(un)); 25609 un->un_resvd_status |= SD_RESERVE; 25610 un->un_resvd_status &= 25611 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25612 mutex_exit(SD_MUTEX(un)); 25613 25614 if (p != NULL) { 25615 if (p->min_ownership_delay != 0) { 25616 min_ownership_delay = p->min_ownership_delay * 1000; 25617 } 25618 if (p->max_ownership_delay != 0) { 25619 max_ownership_delay = p->max_ownership_delay * 1000; 25620 } 25621 } 25622 SD_INFO(SD_LOG_IOCTL_MHD, un, 25623 "sd_take_ownership: min, max delays: %d, %d\n", 25624 min_ownership_delay, max_ownership_delay); 25625 25626 start_time = ddi_get_lbolt(); 25627 current_time = start_time; 25628 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25629 end_time = start_time + drv_usectohz(max_ownership_delay); 25630 25631 while (current_time - end_time < 0) { 25632 delay(drv_usectohz(500000)); 25633 25634 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25635 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25636 mutex_enter(SD_MUTEX(un)); 25637 rval = (un->un_resvd_status & 25638 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25639 mutex_exit(SD_MUTEX(un)); 25640 break; 25641 } 25642 } 25643 previous_current_time = current_time; 25644 current_time = ddi_get_lbolt(); 25645 mutex_enter(SD_MUTEX(un)); 25646 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25647 ownership_time = ddi_get_lbolt() + 25648 drv_usectohz(min_ownership_delay); 25649 reservation_count = 0; 25650 } else { 25651 reservation_count++; 25652 } 25653 un->un_resvd_status |= SD_RESERVE; 25654 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25655 mutex_exit(SD_MUTEX(un)); 25656 25657 SD_INFO(SD_LOG_IOCTL_MHD, un, 25658 "sd_take_ownership: ticks for loop iteration=%ld, " 25659 "reservation=%s\n", (current_time - previous_current_time), 25660 reservation_count ? "ok" : "reclaimed"); 25661 25662 if (current_time - ownership_time >= 0 && 25663 reservation_count >= 4) { 25664 rval = 0; /* Achieved a stable ownership */ 25665 break; 25666 } 25667 if (current_time - end_time >= 0) { 25668 rval = EACCES; /* No ownership in max possible time */ 25669 break; 25670 } 25671 } 25672 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25673 "sd_take_ownership: return(2)=%d\n", rval); 25674 return (rval); 25675 } 25676 25677 25678 /* 25679 * Function: sd_reserve_release() 25680 * 25681 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25682 * PRIORITY RESERVE commands based on a user specified command type 25683 * 25684 * Arguments: dev - the device 'dev_t' 25685 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25686 * SD_RESERVE, SD_RELEASE 25687 * 25688 * Return Code: 0 or Error Code 25689 */ 25690 25691 static int 25692 sd_reserve_release(dev_t dev, int cmd) 25693 { 25694 struct uscsi_cmd *com = NULL; 25695 struct sd_lun *un = NULL; 25696 char cdb[CDB_GROUP0]; 25697 int rval; 25698 25699 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25700 (cmd == SD_PRIORITY_RESERVE)); 25701 25702 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25703 return (ENXIO); 25704 } 25705 25706 /* instantiate and initialize the command and cdb */ 25707 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25708 bzero(cdb, CDB_GROUP0); 25709 com->uscsi_flags = USCSI_SILENT; 25710 com->uscsi_timeout = un->un_reserve_release_time; 25711 com->uscsi_cdblen = CDB_GROUP0; 25712 com->uscsi_cdb = cdb; 25713 if (cmd == SD_RELEASE) { 25714 cdb[0] = SCMD_RELEASE; 25715 } else { 25716 cdb[0] = SCMD_RESERVE; 25717 } 25718 25719 /* Send the command. */ 25720 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25721 SD_PATH_STANDARD); 25722 25723 /* 25724 * "break" a reservation that is held by another host, by issuing a 25725 * reset if priority reserve is desired, and we could not get the 25726 * device. 25727 */ 25728 if ((cmd == SD_PRIORITY_RESERVE) && 25729 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25730 /* 25731 * First try to reset the LUN. If we cannot, then try a target 25732 * reset, followed by a bus reset if the target reset fails. 25733 */ 25734 int reset_retval = 0; 25735 if (un->un_f_lun_reset_enabled == TRUE) { 25736 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25737 } 25738 if (reset_retval == 0) { 25739 /* The LUN reset either failed or was not issued */ 25740 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25741 } 25742 if ((reset_retval == 0) && 25743 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25744 rval = EIO; 25745 kmem_free(com, sizeof (*com)); 25746 return (rval); 25747 } 25748 25749 bzero(com, sizeof (struct uscsi_cmd)); 25750 com->uscsi_flags = USCSI_SILENT; 25751 com->uscsi_cdb = cdb; 25752 com->uscsi_cdblen = CDB_GROUP0; 25753 com->uscsi_timeout = 5; 25754 25755 /* 25756 * Reissue the last reserve command, this time without request 25757 * sense. Assume that it is just a regular reserve command. 25758 */ 25759 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25760 SD_PATH_STANDARD); 25761 } 25762 25763 /* Return an error if still getting a reservation conflict. */ 25764 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25765 rval = EACCES; 25766 } 25767 25768 kmem_free(com, sizeof (*com)); 25769 return (rval); 25770 } 25771 25772 25773 #define SD_NDUMP_RETRIES 12 25774 /* 25775 * System Crash Dump routine 25776 */ 25777 25778 static int 25779 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25780 { 25781 int instance; 25782 int partition; 25783 int i; 25784 int err; 25785 struct sd_lun *un; 25786 struct scsi_pkt *wr_pktp; 25787 struct buf *wr_bp; 25788 struct buf wr_buf; 25789 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25790 daddr_t tgt_blkno; /* rmw - blkno for target */ 25791 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25792 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25793 size_t io_start_offset; 25794 int doing_rmw = FALSE; 25795 int rval; 25796 ssize_t dma_resid; 25797 daddr_t oblkno; 25798 diskaddr_t nblks = 0; 25799 diskaddr_t start_block; 25800 25801 instance = SDUNIT(dev); 25802 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25803 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25804 return (ENXIO); 25805 } 25806 25807 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25808 25809 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25810 25811 partition = SDPART(dev); 25812 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25813 25814 if (!(NOT_DEVBSIZE(un))) { 25815 int secmask = 0; 25816 int blknomask = 0; 25817 25818 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25819 secmask = un->un_tgt_blocksize - 1; 25820 25821 if (blkno & blknomask) { 25822 SD_TRACE(SD_LOG_DUMP, un, 25823 "sddump: dump start block not modulo %d\n", 25824 un->un_tgt_blocksize); 25825 return (EINVAL); 25826 } 25827 25828 if ((nblk * DEV_BSIZE) & secmask) { 25829 SD_TRACE(SD_LOG_DUMP, un, 25830 "sddump: dump length not modulo %d\n", 25831 un->un_tgt_blocksize); 25832 return (EINVAL); 25833 } 25834 25835 } 25836 25837 /* Validate blocks to dump at against partition size. */ 25838 25839 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25840 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25841 25842 if (NOT_DEVBSIZE(un)) { 25843 if ((blkno + nblk) > nblks) { 25844 SD_TRACE(SD_LOG_DUMP, un, 25845 "sddump: dump range larger than partition: " 25846 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25847 blkno, nblk, nblks); 25848 return (EINVAL); 25849 } 25850 } else { 25851 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25852 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25853 SD_TRACE(SD_LOG_DUMP, un, 25854 "sddump: dump range larger than partition: " 25855 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25856 blkno, nblk, nblks); 25857 return (EINVAL); 25858 } 25859 } 25860 25861 mutex_enter(&un->un_pm_mutex); 25862 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25863 struct scsi_pkt *start_pktp; 25864 25865 mutex_exit(&un->un_pm_mutex); 25866 25867 /* 25868 * use pm framework to power on HBA 1st 25869 */ 25870 (void) pm_raise_power(SD_DEVINFO(un), 0, 25871 SD_PM_STATE_ACTIVE(un)); 25872 25873 /* 25874 * Dump no long uses sdpower to power on a device, it's 25875 * in-line here so it can be done in polled mode. 25876 */ 25877 25878 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25879 25880 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25881 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25882 25883 if (start_pktp == NULL) { 25884 /* We were not given a SCSI packet, fail. */ 25885 return (EIO); 25886 } 25887 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25888 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25889 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25890 start_pktp->pkt_flags = FLAG_NOINTR; 25891 25892 mutex_enter(SD_MUTEX(un)); 25893 SD_FILL_SCSI1_LUN(un, start_pktp); 25894 mutex_exit(SD_MUTEX(un)); 25895 /* 25896 * Scsi_poll returns 0 (success) if the command completes and 25897 * the status block is STATUS_GOOD. 25898 */ 25899 if (sd_scsi_poll(un, start_pktp) != 0) { 25900 scsi_destroy_pkt(start_pktp); 25901 return (EIO); 25902 } 25903 scsi_destroy_pkt(start_pktp); 25904 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un), 25905 SD_PM_STATE_CHANGE); 25906 } else { 25907 mutex_exit(&un->un_pm_mutex); 25908 } 25909 25910 mutex_enter(SD_MUTEX(un)); 25911 un->un_throttle = 0; 25912 25913 /* 25914 * The first time through, reset the specific target device. 25915 * However, when cpr calls sddump we know that sd is in a 25916 * a good state so no bus reset is required. 25917 * Clear sense data via Request Sense cmd. 25918 * In sddump we don't care about allow_bus_device_reset anymore 25919 */ 25920 25921 if ((un->un_state != SD_STATE_SUSPENDED) && 25922 (un->un_state != SD_STATE_DUMPING)) { 25923 25924 New_state(un, SD_STATE_DUMPING); 25925 25926 if (un->un_f_is_fibre == FALSE) { 25927 mutex_exit(SD_MUTEX(un)); 25928 /* 25929 * Attempt a bus reset for parallel scsi. 25930 * 25931 * Note: A bus reset is required because on some host 25932 * systems (i.e. E420R) a bus device reset is 25933 * insufficient to reset the state of the target. 25934 * 25935 * Note: Don't issue the reset for fibre-channel, 25936 * because this tends to hang the bus (loop) for 25937 * too long while everyone is logging out and in 25938 * and the deadman timer for dumping will fire 25939 * before the dump is complete. 25940 */ 25941 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25942 mutex_enter(SD_MUTEX(un)); 25943 Restore_state(un); 25944 mutex_exit(SD_MUTEX(un)); 25945 return (EIO); 25946 } 25947 25948 /* Delay to give the device some recovery time. */ 25949 drv_usecwait(10000); 25950 25951 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25952 SD_INFO(SD_LOG_DUMP, un, 25953 "sddump: sd_send_polled_RQS failed\n"); 25954 } 25955 mutex_enter(SD_MUTEX(un)); 25956 } 25957 } 25958 25959 /* 25960 * Convert the partition-relative block number to a 25961 * disk physical block number. 25962 */ 25963 if (NOT_DEVBSIZE(un)) { 25964 blkno += start_block; 25965 } else { 25966 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25967 blkno += start_block; 25968 } 25969 25970 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25971 25972 25973 /* 25974 * Check if the device has a non-512 block size. 25975 */ 25976 wr_bp = NULL; 25977 if (NOT_DEVBSIZE(un)) { 25978 tgt_byte_offset = blkno * un->un_sys_blocksize; 25979 tgt_byte_count = nblk * un->un_sys_blocksize; 25980 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25981 (tgt_byte_count % un->un_tgt_blocksize)) { 25982 doing_rmw = TRUE; 25983 /* 25984 * Calculate the block number and number of block 25985 * in terms of the media block size. 25986 */ 25987 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25988 tgt_nblk = 25989 ((tgt_byte_offset + tgt_byte_count + 25990 (un->un_tgt_blocksize - 1)) / 25991 un->un_tgt_blocksize) - tgt_blkno; 25992 25993 /* 25994 * Invoke the routine which is going to do read part 25995 * of read-modify-write. 25996 * Note that this routine returns a pointer to 25997 * a valid bp in wr_bp. 25998 */ 25999 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 26000 &wr_bp); 26001 if (err) { 26002 mutex_exit(SD_MUTEX(un)); 26003 return (err); 26004 } 26005 /* 26006 * Offset is being calculated as - 26007 * (original block # * system block size) - 26008 * (new block # * target block size) 26009 */ 26010 io_start_offset = 26011 ((uint64_t)(blkno * un->un_sys_blocksize)) - 26012 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 26013 26014 ASSERT(io_start_offset < un->un_tgt_blocksize); 26015 /* 26016 * Do the modify portion of read modify write. 26017 */ 26018 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 26019 (size_t)nblk * un->un_sys_blocksize); 26020 } else { 26021 doing_rmw = FALSE; 26022 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 26023 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 26024 } 26025 26026 /* Convert blkno and nblk to target blocks */ 26027 blkno = tgt_blkno; 26028 nblk = tgt_nblk; 26029 } else { 26030 wr_bp = &wr_buf; 26031 bzero(wr_bp, sizeof (struct buf)); 26032 wr_bp->b_flags = B_BUSY; 26033 wr_bp->b_un.b_addr = addr; 26034 wr_bp->b_bcount = nblk << DEV_BSHIFT; 26035 wr_bp->b_resid = 0; 26036 } 26037 26038 mutex_exit(SD_MUTEX(un)); 26039 26040 /* 26041 * Obtain a SCSI packet for the write command. 26042 * It should be safe to call the allocator here without 26043 * worrying about being locked for DVMA mapping because 26044 * the address we're passed is already a DVMA mapping 26045 * 26046 * We are also not going to worry about semaphore ownership 26047 * in the dump buffer. Dumping is single threaded at present. 26048 */ 26049 26050 wr_pktp = NULL; 26051 26052 dma_resid = wr_bp->b_bcount; 26053 oblkno = blkno; 26054 26055 if (!(NOT_DEVBSIZE(un))) { 26056 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 26057 } 26058 26059 while (dma_resid != 0) { 26060 26061 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26062 wr_bp->b_flags &= ~B_ERROR; 26063 26064 if (un->un_partial_dma_supported == 1) { 26065 blkno = oblkno + 26066 ((wr_bp->b_bcount - dma_resid) / 26067 un->un_tgt_blocksize); 26068 nblk = dma_resid / un->un_tgt_blocksize; 26069 26070 if (wr_pktp) { 26071 /* 26072 * Partial DMA transfers after initial transfer 26073 */ 26074 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 26075 blkno, nblk); 26076 } else { 26077 /* Initial transfer */ 26078 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26079 un->un_pkt_flags, NULL_FUNC, NULL, 26080 blkno, nblk); 26081 } 26082 } else { 26083 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26084 0, NULL_FUNC, NULL, blkno, nblk); 26085 } 26086 26087 if (rval == 0) { 26088 /* We were given a SCSI packet, continue. */ 26089 break; 26090 } 26091 26092 if (i == 0) { 26093 if (wr_bp->b_flags & B_ERROR) { 26094 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26095 "no resources for dumping; " 26096 "error code: 0x%x, retrying", 26097 geterror(wr_bp)); 26098 } else { 26099 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26100 "no resources for dumping; retrying"); 26101 } 26102 } else if (i != (SD_NDUMP_RETRIES - 1)) { 26103 if (wr_bp->b_flags & B_ERROR) { 26104 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26105 "no resources for dumping; error code: " 26106 "0x%x, retrying\n", geterror(wr_bp)); 26107 } 26108 } else { 26109 if (wr_bp->b_flags & B_ERROR) { 26110 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26111 "no resources for dumping; " 26112 "error code: 0x%x, retries failed, " 26113 "giving up.\n", geterror(wr_bp)); 26114 } else { 26115 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26116 "no resources for dumping; " 26117 "retries failed, giving up.\n"); 26118 } 26119 mutex_enter(SD_MUTEX(un)); 26120 Restore_state(un); 26121 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 26122 mutex_exit(SD_MUTEX(un)); 26123 scsi_free_consistent_buf(wr_bp); 26124 } else { 26125 mutex_exit(SD_MUTEX(un)); 26126 } 26127 return (EIO); 26128 } 26129 drv_usecwait(10000); 26130 } 26131 26132 if (un->un_partial_dma_supported == 1) { 26133 /* 26134 * save the resid from PARTIAL_DMA 26135 */ 26136 dma_resid = wr_pktp->pkt_resid; 26137 if (dma_resid != 0) 26138 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 26139 wr_pktp->pkt_resid = 0; 26140 } else { 26141 dma_resid = 0; 26142 } 26143 26144 /* SunBug 1222170 */ 26145 wr_pktp->pkt_flags = FLAG_NOINTR; 26146 26147 err = EIO; 26148 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26149 26150 /* 26151 * Scsi_poll returns 0 (success) if the command completes and 26152 * the status block is STATUS_GOOD. We should only check 26153 * errors if this condition is not true. Even then we should 26154 * send our own request sense packet only if we have a check 26155 * condition and auto request sense has not been performed by 26156 * the hba. 26157 */ 26158 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 26159 26160 if ((sd_scsi_poll(un, wr_pktp) == 0) && 26161 (wr_pktp->pkt_resid == 0)) { 26162 err = SD_SUCCESS; 26163 break; 26164 } 26165 26166 /* 26167 * Check CMD_DEV_GONE 1st, give up if device is gone. 26168 */ 26169 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 26170 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26171 "Error while dumping state...Device is gone\n"); 26172 break; 26173 } 26174 26175 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 26176 SD_INFO(SD_LOG_DUMP, un, 26177 "sddump: write failed with CHECK, try # %d\n", i); 26178 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 26179 (void) sd_send_polled_RQS(un); 26180 } 26181 26182 continue; 26183 } 26184 26185 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 26186 int reset_retval = 0; 26187 26188 SD_INFO(SD_LOG_DUMP, un, 26189 "sddump: write failed with BUSY, try # %d\n", i); 26190 26191 if (un->un_f_lun_reset_enabled == TRUE) { 26192 reset_retval = scsi_reset(SD_ADDRESS(un), 26193 RESET_LUN); 26194 } 26195 if (reset_retval == 0) { 26196 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26197 } 26198 (void) sd_send_polled_RQS(un); 26199 26200 } else { 26201 SD_INFO(SD_LOG_DUMP, un, 26202 "sddump: write failed with 0x%x, try # %d\n", 26203 SD_GET_PKT_STATUS(wr_pktp), i); 26204 mutex_enter(SD_MUTEX(un)); 26205 sd_reset_target(un, wr_pktp); 26206 mutex_exit(SD_MUTEX(un)); 26207 } 26208 26209 /* 26210 * If we are not getting anywhere with lun/target resets, 26211 * let's reset the bus. 26212 */ 26213 if (i == SD_NDUMP_RETRIES/2) { 26214 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26215 (void) sd_send_polled_RQS(un); 26216 } 26217 } 26218 } 26219 26220 scsi_destroy_pkt(wr_pktp); 26221 mutex_enter(SD_MUTEX(un)); 26222 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 26223 mutex_exit(SD_MUTEX(un)); 26224 scsi_free_consistent_buf(wr_bp); 26225 } else { 26226 mutex_exit(SD_MUTEX(un)); 26227 } 26228 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 26229 return (err); 26230 } 26231 26232 /* 26233 * Function: sd_scsi_poll() 26234 * 26235 * Description: This is a wrapper for the scsi_poll call. 26236 * 26237 * Arguments: sd_lun - The unit structure 26238 * scsi_pkt - The scsi packet being sent to the device. 26239 * 26240 * Return Code: 0 - Command completed successfully with good status 26241 * -1 - Command failed. This could indicate a check condition 26242 * or other status value requiring recovery action. 26243 * 26244 * NOTE: This code is only called off sddump(). 26245 */ 26246 26247 static int 26248 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 26249 { 26250 int status; 26251 26252 ASSERT(un != NULL); 26253 ASSERT(!mutex_owned(SD_MUTEX(un))); 26254 ASSERT(pktp != NULL); 26255 26256 status = SD_SUCCESS; 26257 26258 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 26259 pktp->pkt_flags |= un->un_tagflags; 26260 pktp->pkt_flags &= ~FLAG_NODISCON; 26261 } 26262 26263 status = sd_ddi_scsi_poll(pktp); 26264 /* 26265 * Scsi_poll returns 0 (success) if the command completes and the 26266 * status block is STATUS_GOOD. We should only check errors if this 26267 * condition is not true. Even then we should send our own request 26268 * sense packet only if we have a check condition and auto 26269 * request sense has not been performed by the hba. 26270 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 26271 */ 26272 if ((status != SD_SUCCESS) && 26273 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 26274 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 26275 (pktp->pkt_reason != CMD_DEV_GONE)) 26276 (void) sd_send_polled_RQS(un); 26277 26278 return (status); 26279 } 26280 26281 /* 26282 * Function: sd_send_polled_RQS() 26283 * 26284 * Description: This sends the request sense command to a device. 26285 * 26286 * Arguments: sd_lun - The unit structure 26287 * 26288 * Return Code: 0 - Command completed successfully with good status 26289 * -1 - Command failed. 26290 * 26291 */ 26292 26293 static int 26294 sd_send_polled_RQS(struct sd_lun *un) 26295 { 26296 int ret_val; 26297 struct scsi_pkt *rqs_pktp; 26298 struct buf *rqs_bp; 26299 26300 ASSERT(un != NULL); 26301 ASSERT(!mutex_owned(SD_MUTEX(un))); 26302 26303 ret_val = SD_SUCCESS; 26304 26305 rqs_pktp = un->un_rqs_pktp; 26306 rqs_bp = un->un_rqs_bp; 26307 26308 mutex_enter(SD_MUTEX(un)); 26309 26310 if (un->un_sense_isbusy) { 26311 ret_val = SD_FAILURE; 26312 mutex_exit(SD_MUTEX(un)); 26313 return (ret_val); 26314 } 26315 26316 /* 26317 * If the request sense buffer (and packet) is not in use, 26318 * let's set the un_sense_isbusy and send our packet 26319 */ 26320 un->un_sense_isbusy = 1; 26321 rqs_pktp->pkt_resid = 0; 26322 rqs_pktp->pkt_reason = 0; 26323 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26324 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26325 26326 mutex_exit(SD_MUTEX(un)); 26327 26328 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26329 " 0x%p\n", rqs_bp->b_un.b_addr); 26330 26331 /* 26332 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26333 * axle - it has a call into us! 26334 */ 26335 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26336 SD_INFO(SD_LOG_COMMON, un, 26337 "sd_send_polled_RQS: RQS failed\n"); 26338 } 26339 26340 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26341 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26342 26343 mutex_enter(SD_MUTEX(un)); 26344 un->un_sense_isbusy = 0; 26345 mutex_exit(SD_MUTEX(un)); 26346 26347 return (ret_val); 26348 } 26349 26350 /* 26351 * Defines needed for localized version of the scsi_poll routine. 26352 */ 26353 #define CSEC 10000 /* usecs */ 26354 #define SEC_TO_CSEC (1000000/CSEC) 26355 26356 /* 26357 * Function: sd_ddi_scsi_poll() 26358 * 26359 * Description: Localized version of the scsi_poll routine. The purpose is to 26360 * send a scsi_pkt to a device as a polled command. This version 26361 * is to ensure more robust handling of transport errors. 26362 * Specifically this routine cures not ready, coming ready 26363 * transition for power up and reset of sonoma's. This can take 26364 * up to 45 seconds for power-on and 20 seconds for reset of a 26365 * sonoma lun. 26366 * 26367 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26368 * 26369 * Return Code: 0 - Command completed successfully with good status 26370 * -1 - Command failed. 26371 * 26372 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 26373 * be fixed (removing this code), we need to determine how to handle the 26374 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 26375 * 26376 * NOTE: This code is only called off sddump(). 26377 */ 26378 static int 26379 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26380 { 26381 int rval = -1; 26382 int savef; 26383 long savet; 26384 void (*savec)(); 26385 int timeout; 26386 int busy_count; 26387 int poll_delay; 26388 int rc; 26389 uint8_t *sensep; 26390 struct scsi_arq_status *arqstat; 26391 extern int do_polled_io; 26392 26393 ASSERT(pkt->pkt_scbp); 26394 26395 /* 26396 * save old flags.. 26397 */ 26398 savef = pkt->pkt_flags; 26399 savec = pkt->pkt_comp; 26400 savet = pkt->pkt_time; 26401 26402 pkt->pkt_flags |= FLAG_NOINTR; 26403 26404 /* 26405 * XXX there is nothing in the SCSA spec that states that we should not 26406 * do a callback for polled cmds; however, removing this will break sd 26407 * and probably other target drivers 26408 */ 26409 pkt->pkt_comp = NULL; 26410 26411 /* 26412 * we don't like a polled command without timeout. 26413 * 60 seconds seems long enough. 26414 */ 26415 if (pkt->pkt_time == 0) 26416 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26417 26418 /* 26419 * Send polled cmd. 26420 * 26421 * We do some error recovery for various errors. Tran_busy, 26422 * queue full, and non-dispatched commands are retried every 10 msec. 26423 * as they are typically transient failures. Busy status and Not 26424 * Ready are retried every second as this status takes a while to 26425 * change. 26426 */ 26427 timeout = pkt->pkt_time * SEC_TO_CSEC; 26428 26429 for (busy_count = 0; busy_count < timeout; busy_count++) { 26430 /* 26431 * Initialize pkt status variables. 26432 */ 26433 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26434 26435 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26436 if (rc != TRAN_BUSY) { 26437 /* Transport failed - give up. */ 26438 break; 26439 } else { 26440 /* Transport busy - try again. */ 26441 poll_delay = 1 * CSEC; /* 10 msec. */ 26442 } 26443 } else { 26444 /* 26445 * Transport accepted - check pkt status. 26446 */ 26447 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26448 if ((pkt->pkt_reason == CMD_CMPLT) && 26449 (rc == STATUS_CHECK) && 26450 (pkt->pkt_state & STATE_ARQ_DONE)) { 26451 arqstat = 26452 (struct scsi_arq_status *)(pkt->pkt_scbp); 26453 sensep = (uint8_t *)&arqstat->sts_sensedata; 26454 } else { 26455 sensep = NULL; 26456 } 26457 26458 if ((pkt->pkt_reason == CMD_CMPLT) && 26459 (rc == STATUS_GOOD)) { 26460 /* No error - we're done */ 26461 rval = 0; 26462 break; 26463 26464 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26465 /* Lost connection - give up */ 26466 break; 26467 26468 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26469 (pkt->pkt_state == 0)) { 26470 /* Pkt not dispatched - try again. */ 26471 poll_delay = 1 * CSEC; /* 10 msec. */ 26472 26473 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26474 (rc == STATUS_QFULL)) { 26475 /* Queue full - try again. */ 26476 poll_delay = 1 * CSEC; /* 10 msec. */ 26477 26478 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26479 (rc == STATUS_BUSY)) { 26480 /* Busy - try again. */ 26481 poll_delay = 100 * CSEC; /* 1 sec. */ 26482 busy_count += (SEC_TO_CSEC - 1); 26483 26484 } else if ((sensep != NULL) && 26485 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 26486 /* 26487 * Unit Attention - try again. 26488 * Pretend it took 1 sec. 26489 * NOTE: 'continue' avoids poll_delay 26490 */ 26491 busy_count += (SEC_TO_CSEC - 1); 26492 continue; 26493 26494 } else if ((sensep != NULL) && 26495 (scsi_sense_key(sensep) == KEY_NOT_READY) && 26496 (scsi_sense_asc(sensep) == 0x04) && 26497 (scsi_sense_ascq(sensep) == 0x01)) { 26498 /* 26499 * Not ready -> ready - try again. 26500 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 26501 * ...same as STATUS_BUSY 26502 */ 26503 poll_delay = 100 * CSEC; /* 1 sec. */ 26504 busy_count += (SEC_TO_CSEC - 1); 26505 26506 } else { 26507 /* BAD status - give up. */ 26508 break; 26509 } 26510 } 26511 26512 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 26513 !do_polled_io) { 26514 delay(drv_usectohz(poll_delay)); 26515 } else { 26516 /* we busy wait during cpr_dump or interrupt threads */ 26517 drv_usecwait(poll_delay); 26518 } 26519 } 26520 26521 pkt->pkt_flags = savef; 26522 pkt->pkt_comp = savec; 26523 pkt->pkt_time = savet; 26524 26525 /* return on error */ 26526 if (rval) 26527 return (rval); 26528 26529 /* 26530 * This is not a performance critical code path. 26531 * 26532 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26533 * issues associated with looking at DMA memory prior to 26534 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26535 */ 26536 scsi_sync_pkt(pkt); 26537 return (0); 26538 } 26539 26540 26541 26542 /* 26543 * Function: sd_persistent_reservation_in_read_keys 26544 * 26545 * Description: This routine is the driver entry point for handling CD-ROM 26546 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26547 * by sending the SCSI-3 PRIN commands to the device. 26548 * Processes the read keys command response by copying the 26549 * reservation key information into the user provided buffer. 26550 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26551 * 26552 * Arguments: un - Pointer to soft state struct for the target. 26553 * usrp - user provided pointer to multihost Persistent In Read 26554 * Keys structure (mhioc_inkeys_t) 26555 * flag - this argument is a pass through to ddi_copyxxx() 26556 * directly from the mode argument of ioctl(). 26557 * 26558 * Return Code: 0 - Success 26559 * EACCES 26560 * ENOTSUP 26561 * errno return code from sd_send_scsi_cmd() 26562 * 26563 * Context: Can sleep. Does not return until command is completed. 26564 */ 26565 26566 static int 26567 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26568 mhioc_inkeys_t *usrp, int flag) 26569 { 26570 #ifdef _MULTI_DATAMODEL 26571 struct mhioc_key_list32 li32; 26572 #endif 26573 sd_prin_readkeys_t *in; 26574 mhioc_inkeys_t *ptr; 26575 mhioc_key_list_t li; 26576 uchar_t *data_bufp = NULL; 26577 int data_len = 0; 26578 int rval = 0; 26579 size_t copysz = 0; 26580 sd_ssc_t *ssc; 26581 26582 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26583 return (EINVAL); 26584 } 26585 bzero(&li, sizeof (mhioc_key_list_t)); 26586 26587 ssc = sd_ssc_init(un); 26588 26589 /* 26590 * Get the listsize from user 26591 */ 26592 #ifdef _MULTI_DATAMODEL 26593 switch (ddi_model_convert_from(flag & FMODELS)) { 26594 case DDI_MODEL_ILP32: 26595 copysz = sizeof (struct mhioc_key_list32); 26596 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26597 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26598 "sd_persistent_reservation_in_read_keys: " 26599 "failed ddi_copyin: mhioc_key_list32_t\n"); 26600 rval = EFAULT; 26601 goto done; 26602 } 26603 li.listsize = li32.listsize; 26604 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26605 break; 26606 26607 case DDI_MODEL_NONE: 26608 copysz = sizeof (mhioc_key_list_t); 26609 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26610 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26611 "sd_persistent_reservation_in_read_keys: " 26612 "failed ddi_copyin: mhioc_key_list_t\n"); 26613 rval = EFAULT; 26614 goto done; 26615 } 26616 break; 26617 } 26618 26619 #else /* ! _MULTI_DATAMODEL */ 26620 copysz = sizeof (mhioc_key_list_t); 26621 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26622 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26623 "sd_persistent_reservation_in_read_keys: " 26624 "failed ddi_copyin: mhioc_key_list_t\n"); 26625 rval = EFAULT; 26626 goto done; 26627 } 26628 #endif 26629 26630 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26631 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26632 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26633 26634 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26635 data_len, data_bufp); 26636 if (rval != 0) { 26637 if (rval == EIO) 26638 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26639 else 26640 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26641 goto done; 26642 } 26643 in = (sd_prin_readkeys_t *)data_bufp; 26644 ptr->generation = BE_32(in->generation); 26645 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26646 26647 /* 26648 * Return the min(listsize, listlen) keys 26649 */ 26650 #ifdef _MULTI_DATAMODEL 26651 26652 switch (ddi_model_convert_from(flag & FMODELS)) { 26653 case DDI_MODEL_ILP32: 26654 li32.listlen = li.listlen; 26655 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26656 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26657 "sd_persistent_reservation_in_read_keys: " 26658 "failed ddi_copyout: mhioc_key_list32_t\n"); 26659 rval = EFAULT; 26660 goto done; 26661 } 26662 break; 26663 26664 case DDI_MODEL_NONE: 26665 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26666 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26667 "sd_persistent_reservation_in_read_keys: " 26668 "failed ddi_copyout: mhioc_key_list_t\n"); 26669 rval = EFAULT; 26670 goto done; 26671 } 26672 break; 26673 } 26674 26675 #else /* ! _MULTI_DATAMODEL */ 26676 26677 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26678 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26679 "sd_persistent_reservation_in_read_keys: " 26680 "failed ddi_copyout: mhioc_key_list_t\n"); 26681 rval = EFAULT; 26682 goto done; 26683 } 26684 26685 #endif /* _MULTI_DATAMODEL */ 26686 26687 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26688 li.listsize * MHIOC_RESV_KEY_SIZE); 26689 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26690 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26691 "sd_persistent_reservation_in_read_keys: " 26692 "failed ddi_copyout: keylist\n"); 26693 rval = EFAULT; 26694 } 26695 done: 26696 sd_ssc_fini(ssc); 26697 kmem_free(data_bufp, data_len); 26698 return (rval); 26699 } 26700 26701 26702 /* 26703 * Function: sd_persistent_reservation_in_read_resv 26704 * 26705 * Description: This routine is the driver entry point for handling CD-ROM 26706 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26707 * by sending the SCSI-3 PRIN commands to the device. 26708 * Process the read persistent reservations command response by 26709 * copying the reservation information into the user provided 26710 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26711 * 26712 * Arguments: un - Pointer to soft state struct for the target. 26713 * usrp - user provided pointer to multihost Persistent In Read 26714 * Keys structure (mhioc_inkeys_t) 26715 * flag - this argument is a pass through to ddi_copyxxx() 26716 * directly from the mode argument of ioctl(). 26717 * 26718 * Return Code: 0 - Success 26719 * EACCES 26720 * ENOTSUP 26721 * errno return code from sd_send_scsi_cmd() 26722 * 26723 * Context: Can sleep. Does not return until command is completed. 26724 */ 26725 26726 static int 26727 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26728 mhioc_inresvs_t *usrp, int flag) 26729 { 26730 #ifdef _MULTI_DATAMODEL 26731 struct mhioc_resv_desc_list32 resvlist32; 26732 #endif 26733 sd_prin_readresv_t *in; 26734 mhioc_inresvs_t *ptr; 26735 sd_readresv_desc_t *readresv_ptr; 26736 mhioc_resv_desc_list_t resvlist; 26737 mhioc_resv_desc_t resvdesc; 26738 uchar_t *data_bufp = NULL; 26739 int data_len; 26740 int rval = 0; 26741 int i; 26742 size_t copysz = 0; 26743 mhioc_resv_desc_t *bufp; 26744 sd_ssc_t *ssc; 26745 26746 if ((ptr = usrp) == NULL) { 26747 return (EINVAL); 26748 } 26749 26750 ssc = sd_ssc_init(un); 26751 26752 /* 26753 * Get the listsize from user 26754 */ 26755 #ifdef _MULTI_DATAMODEL 26756 switch (ddi_model_convert_from(flag & FMODELS)) { 26757 case DDI_MODEL_ILP32: 26758 copysz = sizeof (struct mhioc_resv_desc_list32); 26759 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26760 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26761 "sd_persistent_reservation_in_read_resv: " 26762 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26763 rval = EFAULT; 26764 goto done; 26765 } 26766 resvlist.listsize = resvlist32.listsize; 26767 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26768 break; 26769 26770 case DDI_MODEL_NONE: 26771 copysz = sizeof (mhioc_resv_desc_list_t); 26772 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26773 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26774 "sd_persistent_reservation_in_read_resv: " 26775 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26776 rval = EFAULT; 26777 goto done; 26778 } 26779 break; 26780 } 26781 #else /* ! _MULTI_DATAMODEL */ 26782 copysz = sizeof (mhioc_resv_desc_list_t); 26783 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26784 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26785 "sd_persistent_reservation_in_read_resv: " 26786 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26787 rval = EFAULT; 26788 goto done; 26789 } 26790 #endif /* ! _MULTI_DATAMODEL */ 26791 26792 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26793 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26794 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26795 26796 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26797 data_len, data_bufp); 26798 if (rval != 0) { 26799 if (rval == EIO) 26800 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26801 else 26802 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26803 goto done; 26804 } 26805 in = (sd_prin_readresv_t *)data_bufp; 26806 ptr->generation = BE_32(in->generation); 26807 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26808 26809 /* 26810 * Return the min(listsize, listlen( keys 26811 */ 26812 #ifdef _MULTI_DATAMODEL 26813 26814 switch (ddi_model_convert_from(flag & FMODELS)) { 26815 case DDI_MODEL_ILP32: 26816 resvlist32.listlen = resvlist.listlen; 26817 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26818 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26819 "sd_persistent_reservation_in_read_resv: " 26820 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26821 rval = EFAULT; 26822 goto done; 26823 } 26824 break; 26825 26826 case DDI_MODEL_NONE: 26827 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26828 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26829 "sd_persistent_reservation_in_read_resv: " 26830 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26831 rval = EFAULT; 26832 goto done; 26833 } 26834 break; 26835 } 26836 26837 #else /* ! _MULTI_DATAMODEL */ 26838 26839 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26840 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26841 "sd_persistent_reservation_in_read_resv: " 26842 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26843 rval = EFAULT; 26844 goto done; 26845 } 26846 26847 #endif /* ! _MULTI_DATAMODEL */ 26848 26849 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26850 bufp = resvlist.list; 26851 copysz = sizeof (mhioc_resv_desc_t); 26852 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26853 i++, readresv_ptr++, bufp++) { 26854 26855 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26856 MHIOC_RESV_KEY_SIZE); 26857 resvdesc.type = readresv_ptr->type; 26858 resvdesc.scope = readresv_ptr->scope; 26859 resvdesc.scope_specific_addr = 26860 BE_32(readresv_ptr->scope_specific_addr); 26861 26862 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26863 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26864 "sd_persistent_reservation_in_read_resv: " 26865 "failed ddi_copyout: resvlist\n"); 26866 rval = EFAULT; 26867 goto done; 26868 } 26869 } 26870 done: 26871 sd_ssc_fini(ssc); 26872 /* only if data_bufp is allocated, we need to free it */ 26873 if (data_bufp) { 26874 kmem_free(data_bufp, data_len); 26875 } 26876 return (rval); 26877 } 26878 26879 26880 /* 26881 * Function: sr_change_blkmode() 26882 * 26883 * Description: This routine is the driver entry point for handling CD-ROM 26884 * block mode ioctl requests. Support for returning and changing 26885 * the current block size in use by the device is implemented. The 26886 * LBA size is changed via a MODE SELECT Block Descriptor. 26887 * 26888 * This routine issues a mode sense with an allocation length of 26889 * 12 bytes for the mode page header and a single block descriptor. 26890 * 26891 * Arguments: dev - the device 'dev_t' 26892 * cmd - the request type; one of CDROMGBLKMODE (get) or 26893 * CDROMSBLKMODE (set) 26894 * data - current block size or requested block size 26895 * flag - this argument is a pass through to ddi_copyxxx() directly 26896 * from the mode argument of ioctl(). 26897 * 26898 * Return Code: the code returned by sd_send_scsi_cmd() 26899 * EINVAL if invalid arguments are provided 26900 * EFAULT if ddi_copyxxx() fails 26901 * ENXIO if fail ddi_get_soft_state 26902 * EIO if invalid mode sense block descriptor length 26903 * 26904 */ 26905 26906 static int 26907 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26908 { 26909 struct sd_lun *un = NULL; 26910 struct mode_header *sense_mhp, *select_mhp; 26911 struct block_descriptor *sense_desc, *select_desc; 26912 int current_bsize; 26913 int rval = EINVAL; 26914 uchar_t *sense = NULL; 26915 uchar_t *select = NULL; 26916 sd_ssc_t *ssc; 26917 26918 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26919 26920 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26921 return (ENXIO); 26922 } 26923 26924 /* 26925 * The block length is changed via the Mode Select block descriptor, the 26926 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26927 * required as part of this routine. Therefore the mode sense allocation 26928 * length is specified to be the length of a mode page header and a 26929 * block descriptor. 26930 */ 26931 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26932 26933 ssc = sd_ssc_init(un); 26934 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26935 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26936 sd_ssc_fini(ssc); 26937 if (rval != 0) { 26938 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26939 "sr_change_blkmode: Mode Sense Failed\n"); 26940 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26941 return (rval); 26942 } 26943 26944 /* Check the block descriptor len to handle only 1 block descriptor */ 26945 sense_mhp = (struct mode_header *)sense; 26946 if ((sense_mhp->bdesc_length == 0) || 26947 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26948 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26949 "sr_change_blkmode: Mode Sense returned invalid block" 26950 " descriptor length\n"); 26951 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26952 return (EIO); 26953 } 26954 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26955 current_bsize = ((sense_desc->blksize_hi << 16) | 26956 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26957 26958 /* Process command */ 26959 switch (cmd) { 26960 case CDROMGBLKMODE: 26961 /* Return the block size obtained during the mode sense */ 26962 if (ddi_copyout(¤t_bsize, (void *)data, 26963 sizeof (int), flag) != 0) 26964 rval = EFAULT; 26965 break; 26966 case CDROMSBLKMODE: 26967 /* Validate the requested block size */ 26968 switch (data) { 26969 case CDROM_BLK_512: 26970 case CDROM_BLK_1024: 26971 case CDROM_BLK_2048: 26972 case CDROM_BLK_2056: 26973 case CDROM_BLK_2336: 26974 case CDROM_BLK_2340: 26975 case CDROM_BLK_2352: 26976 case CDROM_BLK_2368: 26977 case CDROM_BLK_2448: 26978 case CDROM_BLK_2646: 26979 case CDROM_BLK_2647: 26980 break; 26981 default: 26982 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26983 "sr_change_blkmode: " 26984 "Block Size '%ld' Not Supported\n", data); 26985 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26986 return (EINVAL); 26987 } 26988 26989 /* 26990 * The current block size matches the requested block size so 26991 * there is no need to send the mode select to change the size 26992 */ 26993 if (current_bsize == data) { 26994 break; 26995 } 26996 26997 /* Build the select data for the requested block size */ 26998 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26999 select_mhp = (struct mode_header *)select; 27000 select_desc = 27001 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 27002 /* 27003 * The LBA size is changed via the block descriptor, so the 27004 * descriptor is built according to the user data 27005 */ 27006 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 27007 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 27008 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 27009 select_desc->blksize_lo = (char)((data) & 0x000000ff); 27010 27011 /* Send the mode select for the requested block size */ 27012 ssc = sd_ssc_init(un); 27013 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 27014 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 27015 SD_PATH_STANDARD); 27016 sd_ssc_fini(ssc); 27017 if (rval != 0) { 27018 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27019 "sr_change_blkmode: Mode Select Failed\n"); 27020 /* 27021 * The mode select failed for the requested block size, 27022 * so reset the data for the original block size and 27023 * send it to the target. The error is indicated by the 27024 * return value for the failed mode select. 27025 */ 27026 select_desc->blksize_hi = sense_desc->blksize_hi; 27027 select_desc->blksize_mid = sense_desc->blksize_mid; 27028 select_desc->blksize_lo = sense_desc->blksize_lo; 27029 ssc = sd_ssc_init(un); 27030 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 27031 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 27032 SD_PATH_STANDARD); 27033 sd_ssc_fini(ssc); 27034 } else { 27035 ASSERT(!mutex_owned(SD_MUTEX(un))); 27036 mutex_enter(SD_MUTEX(un)); 27037 sd_update_block_info(un, (uint32_t)data, 0); 27038 mutex_exit(SD_MUTEX(un)); 27039 } 27040 break; 27041 default: 27042 /* should not reach here, but check anyway */ 27043 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27044 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 27045 rval = EINVAL; 27046 break; 27047 } 27048 27049 if (select) { 27050 kmem_free(select, BUFLEN_CHG_BLK_MODE); 27051 } 27052 if (sense) { 27053 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27054 } 27055 return (rval); 27056 } 27057 27058 27059 /* 27060 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 27061 * implement driver support for getting and setting the CD speed. The command 27062 * set used will be based on the device type. If the device has not been 27063 * identified as MMC the Toshiba vendor specific mode page will be used. If 27064 * the device is MMC but does not support the Real Time Streaming feature 27065 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 27066 * be used to read the speed. 27067 */ 27068 27069 /* 27070 * Function: sr_change_speed() 27071 * 27072 * Description: This routine is the driver entry point for handling CD-ROM 27073 * drive speed ioctl requests for devices supporting the Toshiba 27074 * vendor specific drive speed mode page. Support for returning 27075 * and changing the current drive speed in use by the device is 27076 * implemented. 27077 * 27078 * Arguments: dev - the device 'dev_t' 27079 * cmd - the request type; one of CDROMGDRVSPEED (get) or 27080 * CDROMSDRVSPEED (set) 27081 * data - current drive speed or requested drive speed 27082 * flag - this argument is a pass through to ddi_copyxxx() directly 27083 * from the mode argument of ioctl(). 27084 * 27085 * Return Code: the code returned by sd_send_scsi_cmd() 27086 * EINVAL if invalid arguments are provided 27087 * EFAULT if ddi_copyxxx() fails 27088 * ENXIO if fail ddi_get_soft_state 27089 * EIO if invalid mode sense block descriptor length 27090 */ 27091 27092 static int 27093 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27094 { 27095 struct sd_lun *un = NULL; 27096 struct mode_header *sense_mhp, *select_mhp; 27097 struct mode_speed *sense_page, *select_page; 27098 int current_speed; 27099 int rval = EINVAL; 27100 int bd_len; 27101 uchar_t *sense = NULL; 27102 uchar_t *select = NULL; 27103 sd_ssc_t *ssc; 27104 27105 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27106 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27107 return (ENXIO); 27108 } 27109 27110 /* 27111 * Note: The drive speed is being modified here according to a Toshiba 27112 * vendor specific mode page (0x31). 27113 */ 27114 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27115 27116 ssc = sd_ssc_init(un); 27117 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27118 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 27119 SD_PATH_STANDARD); 27120 sd_ssc_fini(ssc); 27121 if (rval != 0) { 27122 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27123 "sr_change_speed: Mode Sense Failed\n"); 27124 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27125 return (rval); 27126 } 27127 sense_mhp = (struct mode_header *)sense; 27128 27129 /* Check the block descriptor len to handle only 1 block descriptor */ 27130 bd_len = sense_mhp->bdesc_length; 27131 if (bd_len > MODE_BLK_DESC_LENGTH) { 27132 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27133 "sr_change_speed: Mode Sense returned invalid block " 27134 "descriptor length\n"); 27135 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27136 return (EIO); 27137 } 27138 27139 sense_page = (struct mode_speed *) 27140 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27141 current_speed = sense_page->speed; 27142 27143 /* Process command */ 27144 switch (cmd) { 27145 case CDROMGDRVSPEED: 27146 /* Return the drive speed obtained during the mode sense */ 27147 if (current_speed == 0x2) { 27148 current_speed = CDROM_TWELVE_SPEED; 27149 } 27150 if (ddi_copyout(¤t_speed, (void *)data, 27151 sizeof (int), flag) != 0) { 27152 rval = EFAULT; 27153 } 27154 break; 27155 case CDROMSDRVSPEED: 27156 /* Validate the requested drive speed */ 27157 switch ((uchar_t)data) { 27158 case CDROM_TWELVE_SPEED: 27159 data = 0x2; 27160 /*FALLTHROUGH*/ 27161 case CDROM_NORMAL_SPEED: 27162 case CDROM_DOUBLE_SPEED: 27163 case CDROM_QUAD_SPEED: 27164 case CDROM_MAXIMUM_SPEED: 27165 break; 27166 default: 27167 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27168 "sr_change_speed: " 27169 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 27170 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27171 return (EINVAL); 27172 } 27173 27174 /* 27175 * The current drive speed matches the requested drive speed so 27176 * there is no need to send the mode select to change the speed 27177 */ 27178 if (current_speed == data) { 27179 break; 27180 } 27181 27182 /* Build the select data for the requested drive speed */ 27183 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27184 select_mhp = (struct mode_header *)select; 27185 select_mhp->bdesc_length = 0; 27186 select_page = 27187 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27188 select_page = 27189 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27190 select_page->mode_page.code = CDROM_MODE_SPEED; 27191 select_page->mode_page.length = 2; 27192 select_page->speed = (uchar_t)data; 27193 27194 /* Send the mode select for the requested block size */ 27195 ssc = sd_ssc_init(un); 27196 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27197 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27198 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27199 sd_ssc_fini(ssc); 27200 if (rval != 0) { 27201 /* 27202 * The mode select failed for the requested drive speed, 27203 * so reset the data for the original drive speed and 27204 * send it to the target. The error is indicated by the 27205 * return value for the failed mode select. 27206 */ 27207 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27208 "sr_drive_speed: Mode Select Failed\n"); 27209 select_page->speed = sense_page->speed; 27210 ssc = sd_ssc_init(un); 27211 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27212 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27213 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27214 sd_ssc_fini(ssc); 27215 } 27216 break; 27217 default: 27218 /* should not reach here, but check anyway */ 27219 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27220 "sr_change_speed: Command '%x' Not Supported\n", cmd); 27221 rval = EINVAL; 27222 break; 27223 } 27224 27225 if (select) { 27226 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 27227 } 27228 if (sense) { 27229 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27230 } 27231 27232 return (rval); 27233 } 27234 27235 27236 /* 27237 * Function: sr_atapi_change_speed() 27238 * 27239 * Description: This routine is the driver entry point for handling CD-ROM 27240 * drive speed ioctl requests for MMC devices that do not support 27241 * the Real Time Streaming feature (0x107). 27242 * 27243 * Note: This routine will use the SET SPEED command which may not 27244 * be supported by all devices. 27245 * 27246 * Arguments: dev- the device 'dev_t' 27247 * cmd- the request type; one of CDROMGDRVSPEED (get) or 27248 * CDROMSDRVSPEED (set) 27249 * data- current drive speed or requested drive speed 27250 * flag- this argument is a pass through to ddi_copyxxx() directly 27251 * from the mode argument of ioctl(). 27252 * 27253 * Return Code: the code returned by sd_send_scsi_cmd() 27254 * EINVAL if invalid arguments are provided 27255 * EFAULT if ddi_copyxxx() fails 27256 * ENXIO if fail ddi_get_soft_state 27257 * EIO if invalid mode sense block descriptor length 27258 */ 27259 27260 static int 27261 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27262 { 27263 struct sd_lun *un; 27264 struct uscsi_cmd *com = NULL; 27265 struct mode_header_grp2 *sense_mhp; 27266 uchar_t *sense_page; 27267 uchar_t *sense = NULL; 27268 char cdb[CDB_GROUP5]; 27269 int bd_len; 27270 int current_speed = 0; 27271 int max_speed = 0; 27272 int rval; 27273 sd_ssc_t *ssc; 27274 27275 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27276 27277 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27278 return (ENXIO); 27279 } 27280 27281 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 27282 27283 ssc = sd_ssc_init(un); 27284 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27285 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 27286 SD_PATH_STANDARD); 27287 sd_ssc_fini(ssc); 27288 if (rval != 0) { 27289 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27290 "sr_atapi_change_speed: Mode Sense Failed\n"); 27291 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27292 return (rval); 27293 } 27294 27295 /* Check the block descriptor len to handle only 1 block descriptor */ 27296 sense_mhp = (struct mode_header_grp2 *)sense; 27297 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 27298 if (bd_len > MODE_BLK_DESC_LENGTH) { 27299 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27300 "sr_atapi_change_speed: Mode Sense returned invalid " 27301 "block descriptor length\n"); 27302 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27303 return (EIO); 27304 } 27305 27306 /* Calculate the current and maximum drive speeds */ 27307 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27308 current_speed = (sense_page[14] << 8) | sense_page[15]; 27309 max_speed = (sense_page[8] << 8) | sense_page[9]; 27310 27311 /* Process the command */ 27312 switch (cmd) { 27313 case CDROMGDRVSPEED: 27314 current_speed /= SD_SPEED_1X; 27315 if (ddi_copyout(¤t_speed, (void *)data, 27316 sizeof (int), flag) != 0) 27317 rval = EFAULT; 27318 break; 27319 case CDROMSDRVSPEED: 27320 /* Convert the speed code to KB/sec */ 27321 switch ((uchar_t)data) { 27322 case CDROM_NORMAL_SPEED: 27323 current_speed = SD_SPEED_1X; 27324 break; 27325 case CDROM_DOUBLE_SPEED: 27326 current_speed = 2 * SD_SPEED_1X; 27327 break; 27328 case CDROM_QUAD_SPEED: 27329 current_speed = 4 * SD_SPEED_1X; 27330 break; 27331 case CDROM_TWELVE_SPEED: 27332 current_speed = 12 * SD_SPEED_1X; 27333 break; 27334 case CDROM_MAXIMUM_SPEED: 27335 current_speed = 0xffff; 27336 break; 27337 default: 27338 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27339 "sr_atapi_change_speed: invalid drive speed %d\n", 27340 (uchar_t)data); 27341 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27342 return (EINVAL); 27343 } 27344 27345 /* Check the request against the drive's max speed. */ 27346 if (current_speed != 0xffff) { 27347 if (current_speed > max_speed) { 27348 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27349 return (EINVAL); 27350 } 27351 } 27352 27353 /* 27354 * Build and send the SET SPEED command 27355 * 27356 * Note: The SET SPEED (0xBB) command used in this routine is 27357 * obsolete per the SCSI MMC spec but still supported in the 27358 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27359 * therefore the command is still implemented in this routine. 27360 */ 27361 bzero(cdb, sizeof (cdb)); 27362 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27363 cdb[2] = (uchar_t)(current_speed >> 8); 27364 cdb[3] = (uchar_t)current_speed; 27365 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27366 com->uscsi_cdb = (caddr_t)cdb; 27367 com->uscsi_cdblen = CDB_GROUP5; 27368 com->uscsi_bufaddr = NULL; 27369 com->uscsi_buflen = 0; 27370 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27371 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 27372 break; 27373 default: 27374 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27375 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27376 rval = EINVAL; 27377 } 27378 27379 if (sense) { 27380 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27381 } 27382 if (com) { 27383 kmem_free(com, sizeof (*com)); 27384 } 27385 return (rval); 27386 } 27387 27388 27389 /* 27390 * Function: sr_pause_resume() 27391 * 27392 * Description: This routine is the driver entry point for handling CD-ROM 27393 * pause/resume ioctl requests. This only affects the audio play 27394 * operation. 27395 * 27396 * Arguments: dev - the device 'dev_t' 27397 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27398 * for setting the resume bit of the cdb. 27399 * 27400 * Return Code: the code returned by sd_send_scsi_cmd() 27401 * EINVAL if invalid mode specified 27402 * 27403 */ 27404 27405 static int 27406 sr_pause_resume(dev_t dev, int cmd) 27407 { 27408 struct sd_lun *un; 27409 struct uscsi_cmd *com; 27410 char cdb[CDB_GROUP1]; 27411 int rval; 27412 27413 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27414 return (ENXIO); 27415 } 27416 27417 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27418 bzero(cdb, CDB_GROUP1); 27419 cdb[0] = SCMD_PAUSE_RESUME; 27420 switch (cmd) { 27421 case CDROMRESUME: 27422 cdb[8] = 1; 27423 break; 27424 case CDROMPAUSE: 27425 cdb[8] = 0; 27426 break; 27427 default: 27428 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27429 " Command '%x' Not Supported\n", cmd); 27430 rval = EINVAL; 27431 goto done; 27432 } 27433 27434 com->uscsi_cdb = cdb; 27435 com->uscsi_cdblen = CDB_GROUP1; 27436 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27437 27438 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27439 SD_PATH_STANDARD); 27440 27441 done: 27442 kmem_free(com, sizeof (*com)); 27443 return (rval); 27444 } 27445 27446 27447 /* 27448 * Function: sr_play_msf() 27449 * 27450 * Description: This routine is the driver entry point for handling CD-ROM 27451 * ioctl requests to output the audio signals at the specified 27452 * starting address and continue the audio play until the specified 27453 * ending address (CDROMPLAYMSF) The address is in Minute Second 27454 * Frame (MSF) format. 27455 * 27456 * Arguments: dev - the device 'dev_t' 27457 * data - pointer to user provided audio msf structure, 27458 * specifying start/end addresses. 27459 * flag - this argument is a pass through to ddi_copyxxx() 27460 * directly from the mode argument of ioctl(). 27461 * 27462 * Return Code: the code returned by sd_send_scsi_cmd() 27463 * EFAULT if ddi_copyxxx() fails 27464 * ENXIO if fail ddi_get_soft_state 27465 * EINVAL if data pointer is NULL 27466 */ 27467 27468 static int 27469 sr_play_msf(dev_t dev, caddr_t data, int flag) 27470 { 27471 struct sd_lun *un; 27472 struct uscsi_cmd *com; 27473 struct cdrom_msf msf_struct; 27474 struct cdrom_msf *msf = &msf_struct; 27475 char cdb[CDB_GROUP1]; 27476 int rval; 27477 27478 if (data == NULL) { 27479 return (EINVAL); 27480 } 27481 27482 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27483 return (ENXIO); 27484 } 27485 27486 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27487 return (EFAULT); 27488 } 27489 27490 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27491 bzero(cdb, CDB_GROUP1); 27492 cdb[0] = SCMD_PLAYAUDIO_MSF; 27493 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27494 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27495 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27496 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27497 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27498 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27499 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27500 } else { 27501 cdb[3] = msf->cdmsf_min0; 27502 cdb[4] = msf->cdmsf_sec0; 27503 cdb[5] = msf->cdmsf_frame0; 27504 cdb[6] = msf->cdmsf_min1; 27505 cdb[7] = msf->cdmsf_sec1; 27506 cdb[8] = msf->cdmsf_frame1; 27507 } 27508 com->uscsi_cdb = cdb; 27509 com->uscsi_cdblen = CDB_GROUP1; 27510 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27511 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27512 SD_PATH_STANDARD); 27513 kmem_free(com, sizeof (*com)); 27514 return (rval); 27515 } 27516 27517 27518 /* 27519 * Function: sr_play_trkind() 27520 * 27521 * Description: This routine is the driver entry point for handling CD-ROM 27522 * ioctl requests to output the audio signals at the specified 27523 * starting address and continue the audio play until the specified 27524 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27525 * format. 27526 * 27527 * Arguments: dev - the device 'dev_t' 27528 * data - pointer to user provided audio track/index structure, 27529 * specifying start/end addresses. 27530 * flag - this argument is a pass through to ddi_copyxxx() 27531 * directly from the mode argument of ioctl(). 27532 * 27533 * Return Code: the code returned by sd_send_scsi_cmd() 27534 * EFAULT if ddi_copyxxx() fails 27535 * ENXIO if fail ddi_get_soft_state 27536 * EINVAL if data pointer is NULL 27537 */ 27538 27539 static int 27540 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27541 { 27542 struct cdrom_ti ti_struct; 27543 struct cdrom_ti *ti = &ti_struct; 27544 struct uscsi_cmd *com = NULL; 27545 char cdb[CDB_GROUP1]; 27546 int rval; 27547 27548 if (data == NULL) { 27549 return (EINVAL); 27550 } 27551 27552 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27553 return (EFAULT); 27554 } 27555 27556 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27557 bzero(cdb, CDB_GROUP1); 27558 cdb[0] = SCMD_PLAYAUDIO_TI; 27559 cdb[4] = ti->cdti_trk0; 27560 cdb[5] = ti->cdti_ind0; 27561 cdb[7] = ti->cdti_trk1; 27562 cdb[8] = ti->cdti_ind1; 27563 com->uscsi_cdb = cdb; 27564 com->uscsi_cdblen = CDB_GROUP1; 27565 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27566 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27567 SD_PATH_STANDARD); 27568 kmem_free(com, sizeof (*com)); 27569 return (rval); 27570 } 27571 27572 27573 /* 27574 * Function: sr_read_all_subcodes() 27575 * 27576 * Description: This routine is the driver entry point for handling CD-ROM 27577 * ioctl requests to return raw subcode data while the target is 27578 * playing audio (CDROMSUBCODE). 27579 * 27580 * Arguments: dev - the device 'dev_t' 27581 * data - pointer to user provided cdrom subcode structure, 27582 * specifying the transfer length and address. 27583 * flag - this argument is a pass through to ddi_copyxxx() 27584 * directly from the mode argument of ioctl(). 27585 * 27586 * Return Code: the code returned by sd_send_scsi_cmd() 27587 * EFAULT if ddi_copyxxx() fails 27588 * ENXIO if fail ddi_get_soft_state 27589 * EINVAL if data pointer is NULL 27590 */ 27591 27592 static int 27593 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27594 { 27595 struct sd_lun *un = NULL; 27596 struct uscsi_cmd *com = NULL; 27597 struct cdrom_subcode *subcode = NULL; 27598 int rval; 27599 size_t buflen; 27600 char cdb[CDB_GROUP5]; 27601 27602 #ifdef _MULTI_DATAMODEL 27603 /* To support ILP32 applications in an LP64 world */ 27604 struct cdrom_subcode32 cdrom_subcode32; 27605 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27606 #endif 27607 if (data == NULL) { 27608 return (EINVAL); 27609 } 27610 27611 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27612 return (ENXIO); 27613 } 27614 27615 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27616 27617 #ifdef _MULTI_DATAMODEL 27618 switch (ddi_model_convert_from(flag & FMODELS)) { 27619 case DDI_MODEL_ILP32: 27620 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27621 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27622 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27623 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27624 return (EFAULT); 27625 } 27626 /* Convert the ILP32 uscsi data from the application to LP64 */ 27627 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27628 break; 27629 case DDI_MODEL_NONE: 27630 if (ddi_copyin(data, subcode, 27631 sizeof (struct cdrom_subcode), flag)) { 27632 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27633 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27634 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27635 return (EFAULT); 27636 } 27637 break; 27638 } 27639 #else /* ! _MULTI_DATAMODEL */ 27640 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27641 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27642 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27643 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27644 return (EFAULT); 27645 } 27646 #endif /* _MULTI_DATAMODEL */ 27647 27648 /* 27649 * Since MMC-2 expects max 3 bytes for length, check if the 27650 * length input is greater than 3 bytes 27651 */ 27652 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27653 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27654 "sr_read_all_subcodes: " 27655 "cdrom transfer length too large: %d (limit %d)\n", 27656 subcode->cdsc_length, 0xFFFFFF); 27657 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27658 return (EINVAL); 27659 } 27660 27661 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27662 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27663 bzero(cdb, CDB_GROUP5); 27664 27665 if (un->un_f_mmc_cap == TRUE) { 27666 cdb[0] = (char)SCMD_READ_CD; 27667 cdb[2] = (char)0xff; 27668 cdb[3] = (char)0xff; 27669 cdb[4] = (char)0xff; 27670 cdb[5] = (char)0xff; 27671 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27672 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27673 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27674 cdb[10] = 1; 27675 } else { 27676 /* 27677 * Note: A vendor specific command (0xDF) is being used her to 27678 * request a read of all subcodes. 27679 */ 27680 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27681 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27682 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27683 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27684 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27685 } 27686 com->uscsi_cdb = cdb; 27687 com->uscsi_cdblen = CDB_GROUP5; 27688 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27689 com->uscsi_buflen = buflen; 27690 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27691 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27692 SD_PATH_STANDARD); 27693 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27694 kmem_free(com, sizeof (*com)); 27695 return (rval); 27696 } 27697 27698 27699 /* 27700 * Function: sr_read_subchannel() 27701 * 27702 * Description: This routine is the driver entry point for handling CD-ROM 27703 * ioctl requests to return the Q sub-channel data of the CD 27704 * current position block. (CDROMSUBCHNL) The data includes the 27705 * track number, index number, absolute CD-ROM address (LBA or MSF 27706 * format per the user) , track relative CD-ROM address (LBA or MSF 27707 * format per the user), control data and audio status. 27708 * 27709 * Arguments: dev - the device 'dev_t' 27710 * data - pointer to user provided cdrom sub-channel structure 27711 * flag - this argument is a pass through to ddi_copyxxx() 27712 * directly from the mode argument of ioctl(). 27713 * 27714 * Return Code: the code returned by sd_send_scsi_cmd() 27715 * EFAULT if ddi_copyxxx() fails 27716 * ENXIO if fail ddi_get_soft_state 27717 * EINVAL if data pointer is NULL 27718 */ 27719 27720 static int 27721 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27722 { 27723 struct sd_lun *un; 27724 struct uscsi_cmd *com; 27725 struct cdrom_subchnl subchanel; 27726 struct cdrom_subchnl *subchnl = &subchanel; 27727 char cdb[CDB_GROUP1]; 27728 caddr_t buffer; 27729 int rval; 27730 27731 if (data == NULL) { 27732 return (EINVAL); 27733 } 27734 27735 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27736 (un->un_state == SD_STATE_OFFLINE)) { 27737 return (ENXIO); 27738 } 27739 27740 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27741 return (EFAULT); 27742 } 27743 27744 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27745 bzero(cdb, CDB_GROUP1); 27746 cdb[0] = SCMD_READ_SUBCHANNEL; 27747 /* Set the MSF bit based on the user requested address format */ 27748 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27749 /* 27750 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27751 * returned 27752 */ 27753 cdb[2] = 0x40; 27754 /* 27755 * Set byte 3 to specify the return data format. A value of 0x01 27756 * indicates that the CD-ROM current position should be returned. 27757 */ 27758 cdb[3] = 0x01; 27759 cdb[8] = 0x10; 27760 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27761 com->uscsi_cdb = cdb; 27762 com->uscsi_cdblen = CDB_GROUP1; 27763 com->uscsi_bufaddr = buffer; 27764 com->uscsi_buflen = 16; 27765 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27766 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27767 SD_PATH_STANDARD); 27768 if (rval != 0) { 27769 kmem_free(buffer, 16); 27770 kmem_free(com, sizeof (*com)); 27771 return (rval); 27772 } 27773 27774 /* Process the returned Q sub-channel data */ 27775 subchnl->cdsc_audiostatus = buffer[1]; 27776 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4; 27777 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27778 subchnl->cdsc_trk = buffer[6]; 27779 subchnl->cdsc_ind = buffer[7]; 27780 if (subchnl->cdsc_format & CDROM_LBA) { 27781 subchnl->cdsc_absaddr.lba = 27782 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27783 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27784 subchnl->cdsc_reladdr.lba = 27785 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27786 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27787 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27788 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27789 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27790 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27791 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27792 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27793 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27794 } else { 27795 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27796 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27797 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27798 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27799 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27800 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27801 } 27802 kmem_free(buffer, 16); 27803 kmem_free(com, sizeof (*com)); 27804 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27805 != 0) { 27806 return (EFAULT); 27807 } 27808 return (rval); 27809 } 27810 27811 27812 /* 27813 * Function: sr_read_tocentry() 27814 * 27815 * Description: This routine is the driver entry point for handling CD-ROM 27816 * ioctl requests to read from the Table of Contents (TOC) 27817 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27818 * fields, the starting address (LBA or MSF format per the user) 27819 * and the data mode if the user specified track is a data track. 27820 * 27821 * Note: The READ HEADER (0x44) command used in this routine is 27822 * obsolete per the SCSI MMC spec but still supported in the 27823 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27824 * therefore the command is still implemented in this routine. 27825 * 27826 * Arguments: dev - the device 'dev_t' 27827 * data - pointer to user provided toc entry structure, 27828 * specifying the track # and the address format 27829 * (LBA or MSF). 27830 * flag - this argument is a pass through to ddi_copyxxx() 27831 * directly from the mode argument of ioctl(). 27832 * 27833 * Return Code: the code returned by sd_send_scsi_cmd() 27834 * EFAULT if ddi_copyxxx() fails 27835 * ENXIO if fail ddi_get_soft_state 27836 * EINVAL if data pointer is NULL 27837 */ 27838 27839 static int 27840 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27841 { 27842 struct sd_lun *un = NULL; 27843 struct uscsi_cmd *com; 27844 struct cdrom_tocentry toc_entry; 27845 struct cdrom_tocentry *entry = &toc_entry; 27846 caddr_t buffer; 27847 int rval; 27848 char cdb[CDB_GROUP1]; 27849 27850 if (data == NULL) { 27851 return (EINVAL); 27852 } 27853 27854 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27855 (un->un_state == SD_STATE_OFFLINE)) { 27856 return (ENXIO); 27857 } 27858 27859 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27860 return (EFAULT); 27861 } 27862 27863 /* Validate the requested track and address format */ 27864 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27865 return (EINVAL); 27866 } 27867 27868 if (entry->cdte_track == 0) { 27869 return (EINVAL); 27870 } 27871 27872 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27873 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27874 bzero(cdb, CDB_GROUP1); 27875 27876 cdb[0] = SCMD_READ_TOC; 27877 /* Set the MSF bit based on the user requested address format */ 27878 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27879 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27880 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27881 } else { 27882 cdb[6] = entry->cdte_track; 27883 } 27884 27885 /* 27886 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27887 * (4 byte TOC response header + 8 byte track descriptor) 27888 */ 27889 cdb[8] = 12; 27890 com->uscsi_cdb = cdb; 27891 com->uscsi_cdblen = CDB_GROUP1; 27892 com->uscsi_bufaddr = buffer; 27893 com->uscsi_buflen = 0x0C; 27894 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27895 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27896 SD_PATH_STANDARD); 27897 if (rval != 0) { 27898 kmem_free(buffer, 12); 27899 kmem_free(com, sizeof (*com)); 27900 return (rval); 27901 } 27902 27903 /* Process the toc entry */ 27904 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27905 entry->cdte_ctrl = (buffer[5] & 0x0F); 27906 if (entry->cdte_format & CDROM_LBA) { 27907 entry->cdte_addr.lba = 27908 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27909 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27910 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27911 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27912 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27913 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27914 /* 27915 * Send a READ TOC command using the LBA address format to get 27916 * the LBA for the track requested so it can be used in the 27917 * READ HEADER request 27918 * 27919 * Note: The MSF bit of the READ HEADER command specifies the 27920 * output format. The block address specified in that command 27921 * must be in LBA format. 27922 */ 27923 cdb[1] = 0; 27924 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27925 SD_PATH_STANDARD); 27926 if (rval != 0) { 27927 kmem_free(buffer, 12); 27928 kmem_free(com, sizeof (*com)); 27929 return (rval); 27930 } 27931 } else { 27932 entry->cdte_addr.msf.minute = buffer[9]; 27933 entry->cdte_addr.msf.second = buffer[10]; 27934 entry->cdte_addr.msf.frame = buffer[11]; 27935 /* 27936 * Send a READ TOC command using the LBA address format to get 27937 * the LBA for the track requested so it can be used in the 27938 * READ HEADER request 27939 * 27940 * Note: The MSF bit of the READ HEADER command specifies the 27941 * output format. The block address specified in that command 27942 * must be in LBA format. 27943 */ 27944 cdb[1] = 0; 27945 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27946 SD_PATH_STANDARD); 27947 if (rval != 0) { 27948 kmem_free(buffer, 12); 27949 kmem_free(com, sizeof (*com)); 27950 return (rval); 27951 } 27952 } 27953 27954 /* 27955 * Build and send the READ HEADER command to determine the data mode of 27956 * the user specified track. 27957 */ 27958 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27959 (entry->cdte_track != CDROM_LEADOUT)) { 27960 bzero(cdb, CDB_GROUP1); 27961 cdb[0] = SCMD_READ_HEADER; 27962 cdb[2] = buffer[8]; 27963 cdb[3] = buffer[9]; 27964 cdb[4] = buffer[10]; 27965 cdb[5] = buffer[11]; 27966 cdb[8] = 0x08; 27967 com->uscsi_buflen = 0x08; 27968 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27969 SD_PATH_STANDARD); 27970 if (rval == 0) { 27971 entry->cdte_datamode = buffer[0]; 27972 } else { 27973 /* 27974 * READ HEADER command failed, since this is 27975 * obsoleted in one spec, its better to return 27976 * -1 for an invlid track so that we can still 27977 * receive the rest of the TOC data. 27978 */ 27979 entry->cdte_datamode = (uchar_t)-1; 27980 } 27981 } else { 27982 entry->cdte_datamode = (uchar_t)-1; 27983 } 27984 27985 kmem_free(buffer, 12); 27986 kmem_free(com, sizeof (*com)); 27987 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27988 return (EFAULT); 27989 27990 return (rval); 27991 } 27992 27993 27994 /* 27995 * Function: sr_read_tochdr() 27996 * 27997 * Description: This routine is the driver entry point for handling CD-ROM 27998 * ioctl requests to read the Table of Contents (TOC) header 27999 * (CDROMREADTOHDR). The TOC header consists of the disk starting 28000 * and ending track numbers 28001 * 28002 * Arguments: dev - the device 'dev_t' 28003 * data - pointer to user provided toc header structure, 28004 * specifying the starting and ending track numbers. 28005 * flag - this argument is a pass through to ddi_copyxxx() 28006 * directly from the mode argument of ioctl(). 28007 * 28008 * Return Code: the code returned by sd_send_scsi_cmd() 28009 * EFAULT if ddi_copyxxx() fails 28010 * ENXIO if fail ddi_get_soft_state 28011 * EINVAL if data pointer is NULL 28012 */ 28013 28014 static int 28015 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 28016 { 28017 struct sd_lun *un; 28018 struct uscsi_cmd *com; 28019 struct cdrom_tochdr toc_header; 28020 struct cdrom_tochdr *hdr = &toc_header; 28021 char cdb[CDB_GROUP1]; 28022 int rval; 28023 caddr_t buffer; 28024 28025 if (data == NULL) { 28026 return (EINVAL); 28027 } 28028 28029 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28030 (un->un_state == SD_STATE_OFFLINE)) { 28031 return (ENXIO); 28032 } 28033 28034 buffer = kmem_zalloc(4, KM_SLEEP); 28035 bzero(cdb, CDB_GROUP1); 28036 cdb[0] = SCMD_READ_TOC; 28037 /* 28038 * Specifying a track number of 0x00 in the READ TOC command indicates 28039 * that the TOC header should be returned 28040 */ 28041 cdb[6] = 0x00; 28042 /* 28043 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 28044 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 28045 */ 28046 cdb[8] = 0x04; 28047 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28048 com->uscsi_cdb = cdb; 28049 com->uscsi_cdblen = CDB_GROUP1; 28050 com->uscsi_bufaddr = buffer; 28051 com->uscsi_buflen = 0x04; 28052 com->uscsi_timeout = 300; 28053 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28054 28055 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28056 SD_PATH_STANDARD); 28057 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 28058 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 28059 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 28060 } else { 28061 hdr->cdth_trk0 = buffer[2]; 28062 hdr->cdth_trk1 = buffer[3]; 28063 } 28064 kmem_free(buffer, 4); 28065 kmem_free(com, sizeof (*com)); 28066 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 28067 return (EFAULT); 28068 } 28069 return (rval); 28070 } 28071 28072 28073 /* 28074 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 28075 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 28076 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 28077 * digital audio and extended architecture digital audio. These modes are 28078 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 28079 * MMC specs. 28080 * 28081 * In addition to support for the various data formats these routines also 28082 * include support for devices that implement only the direct access READ 28083 * commands (0x08, 0x28), devices that implement the READ_CD commands 28084 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 28085 * READ CDXA commands (0xD8, 0xDB) 28086 */ 28087 28088 /* 28089 * Function: sr_read_mode1() 28090 * 28091 * Description: This routine is the driver entry point for handling CD-ROM 28092 * ioctl read mode1 requests (CDROMREADMODE1). 28093 * 28094 * Arguments: dev - the device 'dev_t' 28095 * data - pointer to user provided cd read structure specifying 28096 * the lba buffer address and length. 28097 * flag - this argument is a pass through to ddi_copyxxx() 28098 * directly from the mode argument of ioctl(). 28099 * 28100 * Return Code: the code returned by sd_send_scsi_cmd() 28101 * EFAULT if ddi_copyxxx() fails 28102 * ENXIO if fail ddi_get_soft_state 28103 * EINVAL if data pointer is NULL 28104 */ 28105 28106 static int 28107 sr_read_mode1(dev_t dev, caddr_t data, int flag) 28108 { 28109 struct sd_lun *un; 28110 struct cdrom_read mode1_struct; 28111 struct cdrom_read *mode1 = &mode1_struct; 28112 int rval; 28113 sd_ssc_t *ssc; 28114 28115 #ifdef _MULTI_DATAMODEL 28116 /* To support ILP32 applications in an LP64 world */ 28117 struct cdrom_read32 cdrom_read32; 28118 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28119 #endif /* _MULTI_DATAMODEL */ 28120 28121 if (data == NULL) { 28122 return (EINVAL); 28123 } 28124 28125 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28126 (un->un_state == SD_STATE_OFFLINE)) { 28127 return (ENXIO); 28128 } 28129 28130 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28131 "sd_read_mode1: entry: un:0x%p\n", un); 28132 28133 #ifdef _MULTI_DATAMODEL 28134 switch (ddi_model_convert_from(flag & FMODELS)) { 28135 case DDI_MODEL_ILP32: 28136 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28137 return (EFAULT); 28138 } 28139 /* Convert the ILP32 uscsi data from the application to LP64 */ 28140 cdrom_read32tocdrom_read(cdrd32, mode1); 28141 break; 28142 case DDI_MODEL_NONE: 28143 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28144 return (EFAULT); 28145 } 28146 } 28147 #else /* ! _MULTI_DATAMODEL */ 28148 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28149 return (EFAULT); 28150 } 28151 #endif /* _MULTI_DATAMODEL */ 28152 28153 ssc = sd_ssc_init(un); 28154 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 28155 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 28156 sd_ssc_fini(ssc); 28157 28158 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28159 "sd_read_mode1: exit: un:0x%p\n", un); 28160 28161 return (rval); 28162 } 28163 28164 28165 /* 28166 * Function: sr_read_cd_mode2() 28167 * 28168 * Description: This routine is the driver entry point for handling CD-ROM 28169 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28170 * support the READ CD (0xBE) command or the 1st generation 28171 * READ CD (0xD4) command. 28172 * 28173 * Arguments: dev - the device 'dev_t' 28174 * data - pointer to user provided cd read structure specifying 28175 * the lba buffer address and length. 28176 * flag - this argument is a pass through to ddi_copyxxx() 28177 * directly from the mode argument of ioctl(). 28178 * 28179 * Return Code: the code returned by sd_send_scsi_cmd() 28180 * EFAULT if ddi_copyxxx() fails 28181 * ENXIO if fail ddi_get_soft_state 28182 * EINVAL if data pointer is NULL 28183 */ 28184 28185 static int 28186 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 28187 { 28188 struct sd_lun *un; 28189 struct uscsi_cmd *com; 28190 struct cdrom_read mode2_struct; 28191 struct cdrom_read *mode2 = &mode2_struct; 28192 uchar_t cdb[CDB_GROUP5]; 28193 int nblocks; 28194 int rval; 28195 #ifdef _MULTI_DATAMODEL 28196 /* To support ILP32 applications in an LP64 world */ 28197 struct cdrom_read32 cdrom_read32; 28198 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28199 #endif /* _MULTI_DATAMODEL */ 28200 28201 if (data == NULL) { 28202 return (EINVAL); 28203 } 28204 28205 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28206 (un->un_state == SD_STATE_OFFLINE)) { 28207 return (ENXIO); 28208 } 28209 28210 #ifdef _MULTI_DATAMODEL 28211 switch (ddi_model_convert_from(flag & FMODELS)) { 28212 case DDI_MODEL_ILP32: 28213 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28214 return (EFAULT); 28215 } 28216 /* Convert the ILP32 uscsi data from the application to LP64 */ 28217 cdrom_read32tocdrom_read(cdrd32, mode2); 28218 break; 28219 case DDI_MODEL_NONE: 28220 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28221 return (EFAULT); 28222 } 28223 break; 28224 } 28225 28226 #else /* ! _MULTI_DATAMODEL */ 28227 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28228 return (EFAULT); 28229 } 28230 #endif /* _MULTI_DATAMODEL */ 28231 28232 bzero(cdb, sizeof (cdb)); 28233 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 28234 /* Read command supported by 1st generation atapi drives */ 28235 cdb[0] = SCMD_READ_CDD4; 28236 } else { 28237 /* Universal CD Access Command */ 28238 cdb[0] = SCMD_READ_CD; 28239 } 28240 28241 /* 28242 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 28243 */ 28244 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 28245 28246 /* set the start address */ 28247 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 28248 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 28249 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28250 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 28251 28252 /* set the transfer length */ 28253 nblocks = mode2->cdread_buflen / 2336; 28254 cdb[6] = (uchar_t)(nblocks >> 16); 28255 cdb[7] = (uchar_t)(nblocks >> 8); 28256 cdb[8] = (uchar_t)nblocks; 28257 28258 /* set the filter bits */ 28259 cdb[9] = CDROM_READ_CD_USERDATA; 28260 28261 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28262 com->uscsi_cdb = (caddr_t)cdb; 28263 com->uscsi_cdblen = sizeof (cdb); 28264 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28265 com->uscsi_buflen = mode2->cdread_buflen; 28266 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28267 28268 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28269 SD_PATH_STANDARD); 28270 kmem_free(com, sizeof (*com)); 28271 return (rval); 28272 } 28273 28274 28275 /* 28276 * Function: sr_read_mode2() 28277 * 28278 * Description: This routine is the driver entry point for handling CD-ROM 28279 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28280 * do not support the READ CD (0xBE) command. 28281 * 28282 * Arguments: dev - the device 'dev_t' 28283 * data - pointer to user provided cd read structure specifying 28284 * the lba buffer address and length. 28285 * flag - this argument is a pass through to ddi_copyxxx() 28286 * directly from the mode argument of ioctl(). 28287 * 28288 * Return Code: the code returned by sd_send_scsi_cmd() 28289 * EFAULT if ddi_copyxxx() fails 28290 * ENXIO if fail ddi_get_soft_state 28291 * EINVAL if data pointer is NULL 28292 * EIO if fail to reset block size 28293 * EAGAIN if commands are in progress in the driver 28294 */ 28295 28296 static int 28297 sr_read_mode2(dev_t dev, caddr_t data, int flag) 28298 { 28299 struct sd_lun *un; 28300 struct cdrom_read mode2_struct; 28301 struct cdrom_read *mode2 = &mode2_struct; 28302 int rval; 28303 uint32_t restore_blksize; 28304 struct uscsi_cmd *com; 28305 uchar_t cdb[CDB_GROUP0]; 28306 int nblocks; 28307 28308 #ifdef _MULTI_DATAMODEL 28309 /* To support ILP32 applications in an LP64 world */ 28310 struct cdrom_read32 cdrom_read32; 28311 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28312 #endif /* _MULTI_DATAMODEL */ 28313 28314 if (data == NULL) { 28315 return (EINVAL); 28316 } 28317 28318 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28319 (un->un_state == SD_STATE_OFFLINE)) { 28320 return (ENXIO); 28321 } 28322 28323 /* 28324 * Because this routine will update the device and driver block size 28325 * being used we want to make sure there are no commands in progress. 28326 * If commands are in progress the user will have to try again. 28327 * 28328 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 28329 * in sdioctl to protect commands from sdioctl through to the top of 28330 * sd_uscsi_strategy. See sdioctl for details. 28331 */ 28332 mutex_enter(SD_MUTEX(un)); 28333 if (un->un_ncmds_in_driver != 1) { 28334 mutex_exit(SD_MUTEX(un)); 28335 return (EAGAIN); 28336 } 28337 mutex_exit(SD_MUTEX(un)); 28338 28339 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28340 "sd_read_mode2: entry: un:0x%p\n", un); 28341 28342 #ifdef _MULTI_DATAMODEL 28343 switch (ddi_model_convert_from(flag & FMODELS)) { 28344 case DDI_MODEL_ILP32: 28345 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28346 return (EFAULT); 28347 } 28348 /* Convert the ILP32 uscsi data from the application to LP64 */ 28349 cdrom_read32tocdrom_read(cdrd32, mode2); 28350 break; 28351 case DDI_MODEL_NONE: 28352 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28353 return (EFAULT); 28354 } 28355 break; 28356 } 28357 #else /* ! _MULTI_DATAMODEL */ 28358 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 28359 return (EFAULT); 28360 } 28361 #endif /* _MULTI_DATAMODEL */ 28362 28363 /* Store the current target block size for restoration later */ 28364 restore_blksize = un->un_tgt_blocksize; 28365 28366 /* Change the device and soft state target block size to 2336 */ 28367 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28368 rval = EIO; 28369 goto done; 28370 } 28371 28372 28373 bzero(cdb, sizeof (cdb)); 28374 28375 /* set READ operation */ 28376 cdb[0] = SCMD_READ; 28377 28378 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28379 mode2->cdread_lba >>= 2; 28380 28381 /* set the start address */ 28382 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28383 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28384 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28385 28386 /* set the transfer length */ 28387 nblocks = mode2->cdread_buflen / 2336; 28388 cdb[4] = (uchar_t)nblocks & 0xFF; 28389 28390 /* build command */ 28391 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28392 com->uscsi_cdb = (caddr_t)cdb; 28393 com->uscsi_cdblen = sizeof (cdb); 28394 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28395 com->uscsi_buflen = mode2->cdread_buflen; 28396 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28397 28398 /* 28399 * Issue SCSI command with user space address for read buffer. 28400 * 28401 * This sends the command through main channel in the driver. 28402 * 28403 * Since this is accessed via an IOCTL call, we go through the 28404 * standard path, so that if the device was powered down, then 28405 * it would be 'awakened' to handle the command. 28406 */ 28407 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28408 SD_PATH_STANDARD); 28409 28410 kmem_free(com, sizeof (*com)); 28411 28412 /* Restore the device and soft state target block size */ 28413 if (sr_sector_mode(dev, restore_blksize) != 0) { 28414 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28415 "can't do switch back to mode 1\n"); 28416 /* 28417 * If sd_send_scsi_READ succeeded we still need to report 28418 * an error because we failed to reset the block size 28419 */ 28420 if (rval == 0) { 28421 rval = EIO; 28422 } 28423 } 28424 28425 done: 28426 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28427 "sd_read_mode2: exit: un:0x%p\n", un); 28428 28429 return (rval); 28430 } 28431 28432 28433 /* 28434 * Function: sr_sector_mode() 28435 * 28436 * Description: This utility function is used by sr_read_mode2 to set the target 28437 * block size based on the user specified size. This is a legacy 28438 * implementation based upon a vendor specific mode page 28439 * 28440 * Arguments: dev - the device 'dev_t' 28441 * data - flag indicating if block size is being set to 2336 or 28442 * 512. 28443 * 28444 * Return Code: the code returned by sd_send_scsi_cmd() 28445 * EFAULT if ddi_copyxxx() fails 28446 * ENXIO if fail ddi_get_soft_state 28447 * EINVAL if data pointer is NULL 28448 */ 28449 28450 static int 28451 sr_sector_mode(dev_t dev, uint32_t blksize) 28452 { 28453 struct sd_lun *un; 28454 uchar_t *sense; 28455 uchar_t *select; 28456 int rval; 28457 sd_ssc_t *ssc; 28458 28459 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28460 (un->un_state == SD_STATE_OFFLINE)) { 28461 return (ENXIO); 28462 } 28463 28464 sense = kmem_zalloc(20, KM_SLEEP); 28465 28466 /* Note: This is a vendor specific mode page (0x81) */ 28467 ssc = sd_ssc_init(un); 28468 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 28469 SD_PATH_STANDARD); 28470 sd_ssc_fini(ssc); 28471 if (rval != 0) { 28472 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28473 "sr_sector_mode: Mode Sense failed\n"); 28474 kmem_free(sense, 20); 28475 return (rval); 28476 } 28477 select = kmem_zalloc(20, KM_SLEEP); 28478 select[3] = 0x08; 28479 select[10] = ((blksize >> 8) & 0xff); 28480 select[11] = (blksize & 0xff); 28481 select[12] = 0x01; 28482 select[13] = 0x06; 28483 select[14] = sense[14]; 28484 select[15] = sense[15]; 28485 if (blksize == SD_MODE2_BLKSIZE) { 28486 select[14] |= 0x01; 28487 } 28488 28489 ssc = sd_ssc_init(un); 28490 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 28491 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28492 sd_ssc_fini(ssc); 28493 if (rval != 0) { 28494 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28495 "sr_sector_mode: Mode Select failed\n"); 28496 } else { 28497 /* 28498 * Only update the softstate block size if we successfully 28499 * changed the device block mode. 28500 */ 28501 mutex_enter(SD_MUTEX(un)); 28502 sd_update_block_info(un, blksize, 0); 28503 mutex_exit(SD_MUTEX(un)); 28504 } 28505 kmem_free(sense, 20); 28506 kmem_free(select, 20); 28507 return (rval); 28508 } 28509 28510 28511 /* 28512 * Function: sr_read_cdda() 28513 * 28514 * Description: This routine is the driver entry point for handling CD-ROM 28515 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28516 * the target supports CDDA these requests are handled via a vendor 28517 * specific command (0xD8) If the target does not support CDDA 28518 * these requests are handled via the READ CD command (0xBE). 28519 * 28520 * Arguments: dev - the device 'dev_t' 28521 * data - pointer to user provided CD-DA structure specifying 28522 * the track starting address, transfer length, and 28523 * subcode options. 28524 * flag - this argument is a pass through to ddi_copyxxx() 28525 * directly from the mode argument of ioctl(). 28526 * 28527 * Return Code: the code returned by sd_send_scsi_cmd() 28528 * EFAULT if ddi_copyxxx() fails 28529 * ENXIO if fail ddi_get_soft_state 28530 * EINVAL if invalid arguments are provided 28531 * ENOTTY 28532 */ 28533 28534 static int 28535 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28536 { 28537 struct sd_lun *un; 28538 struct uscsi_cmd *com; 28539 struct cdrom_cdda *cdda; 28540 int rval; 28541 size_t buflen; 28542 char cdb[CDB_GROUP5]; 28543 28544 #ifdef _MULTI_DATAMODEL 28545 /* To support ILP32 applications in an LP64 world */ 28546 struct cdrom_cdda32 cdrom_cdda32; 28547 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28548 #endif /* _MULTI_DATAMODEL */ 28549 28550 if (data == NULL) { 28551 return (EINVAL); 28552 } 28553 28554 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28555 return (ENXIO); 28556 } 28557 28558 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28559 28560 #ifdef _MULTI_DATAMODEL 28561 switch (ddi_model_convert_from(flag & FMODELS)) { 28562 case DDI_MODEL_ILP32: 28563 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28564 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28565 "sr_read_cdda: ddi_copyin Failed\n"); 28566 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28567 return (EFAULT); 28568 } 28569 /* Convert the ILP32 uscsi data from the application to LP64 */ 28570 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28571 break; 28572 case DDI_MODEL_NONE: 28573 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28574 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28575 "sr_read_cdda: ddi_copyin Failed\n"); 28576 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28577 return (EFAULT); 28578 } 28579 break; 28580 } 28581 #else /* ! _MULTI_DATAMODEL */ 28582 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28583 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28584 "sr_read_cdda: ddi_copyin Failed\n"); 28585 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28586 return (EFAULT); 28587 } 28588 #endif /* _MULTI_DATAMODEL */ 28589 28590 /* 28591 * Since MMC-2 expects max 3 bytes for length, check if the 28592 * length input is greater than 3 bytes 28593 */ 28594 if ((cdda->cdda_length & 0xFF000000) != 0) { 28595 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28596 "cdrom transfer length too large: %d (limit %d)\n", 28597 cdda->cdda_length, 0xFFFFFF); 28598 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28599 return (EINVAL); 28600 } 28601 28602 switch (cdda->cdda_subcode) { 28603 case CDROM_DA_NO_SUBCODE: 28604 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28605 break; 28606 case CDROM_DA_SUBQ: 28607 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28608 break; 28609 case CDROM_DA_ALL_SUBCODE: 28610 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28611 break; 28612 case CDROM_DA_SUBCODE_ONLY: 28613 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28614 break; 28615 default: 28616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28617 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28618 cdda->cdda_subcode); 28619 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28620 return (EINVAL); 28621 } 28622 28623 /* Build and send the command */ 28624 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28625 bzero(cdb, CDB_GROUP5); 28626 28627 if (un->un_f_cfg_cdda == TRUE) { 28628 cdb[0] = (char)SCMD_READ_CD; 28629 cdb[1] = 0x04; 28630 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28631 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28632 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28633 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28634 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28635 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28636 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28637 cdb[9] = 0x10; 28638 switch (cdda->cdda_subcode) { 28639 case CDROM_DA_NO_SUBCODE : 28640 cdb[10] = 0x0; 28641 break; 28642 case CDROM_DA_SUBQ : 28643 cdb[10] = 0x2; 28644 break; 28645 case CDROM_DA_ALL_SUBCODE : 28646 cdb[10] = 0x1; 28647 break; 28648 case CDROM_DA_SUBCODE_ONLY : 28649 /* FALLTHROUGH */ 28650 default : 28651 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28652 kmem_free(com, sizeof (*com)); 28653 return (ENOTTY); 28654 } 28655 } else { 28656 cdb[0] = (char)SCMD_READ_CDDA; 28657 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28658 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28659 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28660 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28661 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28662 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28663 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28664 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28665 cdb[10] = cdda->cdda_subcode; 28666 } 28667 28668 com->uscsi_cdb = cdb; 28669 com->uscsi_cdblen = CDB_GROUP5; 28670 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28671 com->uscsi_buflen = buflen; 28672 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28673 28674 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28675 SD_PATH_STANDARD); 28676 28677 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28678 kmem_free(com, sizeof (*com)); 28679 return (rval); 28680 } 28681 28682 28683 /* 28684 * Function: sr_read_cdxa() 28685 * 28686 * Description: This routine is the driver entry point for handling CD-ROM 28687 * ioctl requests to return CD-XA (Extended Architecture) data. 28688 * (CDROMCDXA). 28689 * 28690 * Arguments: dev - the device 'dev_t' 28691 * data - pointer to user provided CD-XA structure specifying 28692 * the data starting address, transfer length, and format 28693 * flag - this argument is a pass through to ddi_copyxxx() 28694 * directly from the mode argument of ioctl(). 28695 * 28696 * Return Code: the code returned by sd_send_scsi_cmd() 28697 * EFAULT if ddi_copyxxx() fails 28698 * ENXIO if fail ddi_get_soft_state 28699 * EINVAL if data pointer is NULL 28700 */ 28701 28702 static int 28703 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28704 { 28705 struct sd_lun *un; 28706 struct uscsi_cmd *com; 28707 struct cdrom_cdxa *cdxa; 28708 int rval; 28709 size_t buflen; 28710 char cdb[CDB_GROUP5]; 28711 uchar_t read_flags; 28712 28713 #ifdef _MULTI_DATAMODEL 28714 /* To support ILP32 applications in an LP64 world */ 28715 struct cdrom_cdxa32 cdrom_cdxa32; 28716 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28717 #endif /* _MULTI_DATAMODEL */ 28718 28719 if (data == NULL) { 28720 return (EINVAL); 28721 } 28722 28723 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28724 return (ENXIO); 28725 } 28726 28727 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28728 28729 #ifdef _MULTI_DATAMODEL 28730 switch (ddi_model_convert_from(flag & FMODELS)) { 28731 case DDI_MODEL_ILP32: 28732 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28733 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28734 return (EFAULT); 28735 } 28736 /* 28737 * Convert the ILP32 uscsi data from the 28738 * application to LP64 for internal use. 28739 */ 28740 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28741 break; 28742 case DDI_MODEL_NONE: 28743 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28744 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28745 return (EFAULT); 28746 } 28747 break; 28748 } 28749 #else /* ! _MULTI_DATAMODEL */ 28750 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28751 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28752 return (EFAULT); 28753 } 28754 #endif /* _MULTI_DATAMODEL */ 28755 28756 /* 28757 * Since MMC-2 expects max 3 bytes for length, check if the 28758 * length input is greater than 3 bytes 28759 */ 28760 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28761 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28762 "cdrom transfer length too large: %d (limit %d)\n", 28763 cdxa->cdxa_length, 0xFFFFFF); 28764 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28765 return (EINVAL); 28766 } 28767 28768 switch (cdxa->cdxa_format) { 28769 case CDROM_XA_DATA: 28770 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28771 read_flags = 0x10; 28772 break; 28773 case CDROM_XA_SECTOR_DATA: 28774 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28775 read_flags = 0xf8; 28776 break; 28777 case CDROM_XA_DATA_W_ERROR: 28778 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28779 read_flags = 0xfc; 28780 break; 28781 default: 28782 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28783 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28784 cdxa->cdxa_format); 28785 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28786 return (EINVAL); 28787 } 28788 28789 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28790 bzero(cdb, CDB_GROUP5); 28791 if (un->un_f_mmc_cap == TRUE) { 28792 cdb[0] = (char)SCMD_READ_CD; 28793 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28794 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28795 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28796 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28797 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28798 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28799 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28800 cdb[9] = (char)read_flags; 28801 } else { 28802 /* 28803 * Note: A vendor specific command (0xDB) is being used her to 28804 * request a read of all subcodes. 28805 */ 28806 cdb[0] = (char)SCMD_READ_CDXA; 28807 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28808 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28809 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28810 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28811 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28812 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28813 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28814 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28815 cdb[10] = cdxa->cdxa_format; 28816 } 28817 com->uscsi_cdb = cdb; 28818 com->uscsi_cdblen = CDB_GROUP5; 28819 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28820 com->uscsi_buflen = buflen; 28821 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28822 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28823 SD_PATH_STANDARD); 28824 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28825 kmem_free(com, sizeof (*com)); 28826 return (rval); 28827 } 28828 28829 28830 /* 28831 * Function: sr_eject() 28832 * 28833 * Description: This routine is the driver entry point for handling CD-ROM 28834 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28835 * 28836 * Arguments: dev - the device 'dev_t' 28837 * 28838 * Return Code: the code returned by sd_send_scsi_cmd() 28839 */ 28840 28841 static int 28842 sr_eject(dev_t dev) 28843 { 28844 struct sd_lun *un; 28845 int rval; 28846 sd_ssc_t *ssc; 28847 28848 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28849 (un->un_state == SD_STATE_OFFLINE)) { 28850 return (ENXIO); 28851 } 28852 28853 /* 28854 * To prevent race conditions with the eject 28855 * command, keep track of an eject command as 28856 * it progresses. If we are already handling 28857 * an eject command in the driver for the given 28858 * unit and another request to eject is received 28859 * immediately return EAGAIN so we don't lose 28860 * the command if the current eject command fails. 28861 */ 28862 mutex_enter(SD_MUTEX(un)); 28863 if (un->un_f_ejecting == TRUE) { 28864 mutex_exit(SD_MUTEX(un)); 28865 return (EAGAIN); 28866 } 28867 un->un_f_ejecting = TRUE; 28868 mutex_exit(SD_MUTEX(un)); 28869 28870 ssc = sd_ssc_init(un); 28871 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28872 SD_PATH_STANDARD); 28873 sd_ssc_fini(ssc); 28874 28875 if (rval != 0) { 28876 mutex_enter(SD_MUTEX(un)); 28877 un->un_f_ejecting = FALSE; 28878 mutex_exit(SD_MUTEX(un)); 28879 return (rval); 28880 } 28881 28882 ssc = sd_ssc_init(un); 28883 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 28884 SD_TARGET_EJECT, SD_PATH_STANDARD); 28885 sd_ssc_fini(ssc); 28886 28887 if (rval == 0) { 28888 mutex_enter(SD_MUTEX(un)); 28889 sr_ejected(un); 28890 un->un_mediastate = DKIO_EJECTED; 28891 un->un_f_ejecting = FALSE; 28892 cv_broadcast(&un->un_state_cv); 28893 mutex_exit(SD_MUTEX(un)); 28894 } else { 28895 mutex_enter(SD_MUTEX(un)); 28896 un->un_f_ejecting = FALSE; 28897 mutex_exit(SD_MUTEX(un)); 28898 } 28899 return (rval); 28900 } 28901 28902 28903 /* 28904 * Function: sr_ejected() 28905 * 28906 * Description: This routine updates the soft state structure to invalidate the 28907 * geometry information after the media has been ejected or a 28908 * media eject has been detected. 28909 * 28910 * Arguments: un - driver soft state (unit) structure 28911 */ 28912 28913 static void 28914 sr_ejected(struct sd_lun *un) 28915 { 28916 struct sd_errstats *stp; 28917 28918 ASSERT(un != NULL); 28919 ASSERT(mutex_owned(SD_MUTEX(un))); 28920 28921 un->un_f_blockcount_is_valid = FALSE; 28922 un->un_f_tgt_blocksize_is_valid = FALSE; 28923 mutex_exit(SD_MUTEX(un)); 28924 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28925 mutex_enter(SD_MUTEX(un)); 28926 28927 if (un->un_errstats != NULL) { 28928 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28929 stp->sd_capacity.value.ui64 = 0; 28930 } 28931 } 28932 28933 28934 /* 28935 * Function: sr_check_wp() 28936 * 28937 * Description: This routine checks the write protection of a removable 28938 * media disk and hotpluggable devices via the write protect bit of 28939 * the Mode Page Header device specific field. Some devices choke 28940 * on unsupported mode page. In order to workaround this issue, 28941 * this routine has been implemented to use 0x3f mode page(request 28942 * for all pages) for all device types. 28943 * 28944 * Arguments: dev - the device 'dev_t' 28945 * 28946 * Return Code: int indicating if the device is write protected (1) or not (0) 28947 * 28948 * Context: Kernel thread. 28949 * 28950 */ 28951 28952 static int 28953 sr_check_wp(dev_t dev) 28954 { 28955 struct sd_lun *un; 28956 uchar_t device_specific; 28957 uchar_t *sense; 28958 int hdrlen; 28959 int rval = FALSE; 28960 int status; 28961 sd_ssc_t *ssc; 28962 28963 /* 28964 * Note: The return codes for this routine should be reworked to 28965 * properly handle the case of a NULL softstate. 28966 */ 28967 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28968 return (FALSE); 28969 } 28970 28971 if (un->un_f_cfg_is_atapi == TRUE) { 28972 /* 28973 * The mode page contents are not required; set the allocation 28974 * length for the mode page header only 28975 */ 28976 hdrlen = MODE_HEADER_LENGTH_GRP2; 28977 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28978 ssc = sd_ssc_init(un); 28979 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28980 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28981 sd_ssc_fini(ssc); 28982 if (status != 0) 28983 goto err_exit; 28984 device_specific = 28985 ((struct mode_header_grp2 *)sense)->device_specific; 28986 } else { 28987 hdrlen = MODE_HEADER_LENGTH; 28988 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28989 ssc = sd_ssc_init(un); 28990 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28991 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28992 sd_ssc_fini(ssc); 28993 if (status != 0) 28994 goto err_exit; 28995 device_specific = 28996 ((struct mode_header *)sense)->device_specific; 28997 } 28998 28999 29000 /* 29001 * Write protect mode sense failed; not all disks 29002 * understand this query. Return FALSE assuming that 29003 * these devices are not writable. 29004 */ 29005 if (device_specific & WRITE_PROTECT) { 29006 rval = TRUE; 29007 } 29008 29009 err_exit: 29010 kmem_free(sense, hdrlen); 29011 return (rval); 29012 } 29013 29014 /* 29015 * Function: sr_volume_ctrl() 29016 * 29017 * Description: This routine is the driver entry point for handling CD-ROM 29018 * audio output volume ioctl requests. (CDROMVOLCTRL) 29019 * 29020 * Arguments: dev - the device 'dev_t' 29021 * data - pointer to user audio volume control structure 29022 * flag - this argument is a pass through to ddi_copyxxx() 29023 * directly from the mode argument of ioctl(). 29024 * 29025 * Return Code: the code returned by sd_send_scsi_cmd() 29026 * EFAULT if ddi_copyxxx() fails 29027 * ENXIO if fail ddi_get_soft_state 29028 * EINVAL if data pointer is NULL 29029 * 29030 */ 29031 29032 static int 29033 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 29034 { 29035 struct sd_lun *un; 29036 struct cdrom_volctrl volume; 29037 struct cdrom_volctrl *vol = &volume; 29038 uchar_t *sense_page; 29039 uchar_t *select_page; 29040 uchar_t *sense; 29041 uchar_t *select; 29042 int sense_buflen; 29043 int select_buflen; 29044 int rval; 29045 sd_ssc_t *ssc; 29046 29047 if (data == NULL) { 29048 return (EINVAL); 29049 } 29050 29051 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29052 (un->un_state == SD_STATE_OFFLINE)) { 29053 return (ENXIO); 29054 } 29055 29056 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 29057 return (EFAULT); 29058 } 29059 29060 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29061 struct mode_header_grp2 *sense_mhp; 29062 struct mode_header_grp2 *select_mhp; 29063 int bd_len; 29064 29065 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 29066 select_buflen = MODE_HEADER_LENGTH_GRP2 + 29067 MODEPAGE_AUDIO_CTRL_LEN; 29068 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29069 select = kmem_zalloc(select_buflen, KM_SLEEP); 29070 ssc = sd_ssc_init(un); 29071 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 29072 sense_buflen, MODEPAGE_AUDIO_CTRL, 29073 SD_PATH_STANDARD); 29074 sd_ssc_fini(ssc); 29075 29076 if (rval != 0) { 29077 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 29078 "sr_volume_ctrl: Mode Sense Failed\n"); 29079 kmem_free(sense, sense_buflen); 29080 kmem_free(select, select_buflen); 29081 return (rval); 29082 } 29083 sense_mhp = (struct mode_header_grp2 *)sense; 29084 select_mhp = (struct mode_header_grp2 *)select; 29085 bd_len = (sense_mhp->bdesc_length_hi << 8) | 29086 sense_mhp->bdesc_length_lo; 29087 if (bd_len > MODE_BLK_DESC_LENGTH) { 29088 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29089 "sr_volume_ctrl: Mode Sense returned invalid " 29090 "block descriptor length\n"); 29091 kmem_free(sense, sense_buflen); 29092 kmem_free(select, select_buflen); 29093 return (EIO); 29094 } 29095 sense_page = (uchar_t *) 29096 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 29097 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 29098 select_mhp->length_msb = 0; 29099 select_mhp->length_lsb = 0; 29100 select_mhp->bdesc_length_hi = 0; 29101 select_mhp->bdesc_length_lo = 0; 29102 } else { 29103 struct mode_header *sense_mhp, *select_mhp; 29104 29105 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29106 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29107 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29108 select = kmem_zalloc(select_buflen, KM_SLEEP); 29109 ssc = sd_ssc_init(un); 29110 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 29111 sense_buflen, MODEPAGE_AUDIO_CTRL, 29112 SD_PATH_STANDARD); 29113 sd_ssc_fini(ssc); 29114 29115 if (rval != 0) { 29116 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29117 "sr_volume_ctrl: Mode Sense Failed\n"); 29118 kmem_free(sense, sense_buflen); 29119 kmem_free(select, select_buflen); 29120 return (rval); 29121 } 29122 sense_mhp = (struct mode_header *)sense; 29123 select_mhp = (struct mode_header *)select; 29124 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 29125 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29126 "sr_volume_ctrl: Mode Sense returned invalid " 29127 "block descriptor length\n"); 29128 kmem_free(sense, sense_buflen); 29129 kmem_free(select, select_buflen); 29130 return (EIO); 29131 } 29132 sense_page = (uchar_t *) 29133 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 29134 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 29135 select_mhp->length = 0; 29136 select_mhp->bdesc_length = 0; 29137 } 29138 /* 29139 * Note: An audio control data structure could be created and overlayed 29140 * on the following in place of the array indexing method implemented. 29141 */ 29142 29143 /* Build the select data for the user volume data */ 29144 select_page[0] = MODEPAGE_AUDIO_CTRL; 29145 select_page[1] = 0xE; 29146 /* Set the immediate bit */ 29147 select_page[2] = 0x04; 29148 /* Zero out reserved fields */ 29149 select_page[3] = 0x00; 29150 select_page[4] = 0x00; 29151 /* Return sense data for fields not to be modified */ 29152 select_page[5] = sense_page[5]; 29153 select_page[6] = sense_page[6]; 29154 select_page[7] = sense_page[7]; 29155 /* Set the user specified volume levels for channel 0 and 1 */ 29156 select_page[8] = 0x01; 29157 select_page[9] = vol->channel0; 29158 select_page[10] = 0x02; 29159 select_page[11] = vol->channel1; 29160 /* Channel 2 and 3 are currently unsupported so return the sense data */ 29161 select_page[12] = sense_page[12]; 29162 select_page[13] = sense_page[13]; 29163 select_page[14] = sense_page[14]; 29164 select_page[15] = sense_page[15]; 29165 29166 ssc = sd_ssc_init(un); 29167 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29168 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 29169 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29170 } else { 29171 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 29172 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29173 } 29174 sd_ssc_fini(ssc); 29175 29176 kmem_free(sense, sense_buflen); 29177 kmem_free(select, select_buflen); 29178 return (rval); 29179 } 29180 29181 29182 /* 29183 * Function: sr_read_sony_session_offset() 29184 * 29185 * Description: This routine is the driver entry point for handling CD-ROM 29186 * ioctl requests for session offset information. (CDROMREADOFFSET) 29187 * The address of the first track in the last session of a 29188 * multi-session CD-ROM is returned 29189 * 29190 * Note: This routine uses a vendor specific key value in the 29191 * command control field without implementing any vendor check here 29192 * or in the ioctl routine. 29193 * 29194 * Arguments: dev - the device 'dev_t' 29195 * data - pointer to an int to hold the requested address 29196 * flag - this argument is a pass through to ddi_copyxxx() 29197 * directly from the mode argument of ioctl(). 29198 * 29199 * Return Code: the code returned by sd_send_scsi_cmd() 29200 * EFAULT if ddi_copyxxx() fails 29201 * ENXIO if fail ddi_get_soft_state 29202 * EINVAL if data pointer is NULL 29203 */ 29204 29205 static int 29206 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 29207 { 29208 struct sd_lun *un; 29209 struct uscsi_cmd *com; 29210 caddr_t buffer; 29211 char cdb[CDB_GROUP1]; 29212 int session_offset = 0; 29213 int rval; 29214 29215 if (data == NULL) { 29216 return (EINVAL); 29217 } 29218 29219 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29220 (un->un_state == SD_STATE_OFFLINE)) { 29221 return (ENXIO); 29222 } 29223 29224 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 29225 bzero(cdb, CDB_GROUP1); 29226 cdb[0] = SCMD_READ_TOC; 29227 /* 29228 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 29229 * (4 byte TOC response header + 8 byte response data) 29230 */ 29231 cdb[8] = SONY_SESSION_OFFSET_LEN; 29232 /* Byte 9 is the control byte. A vendor specific value is used */ 29233 cdb[9] = SONY_SESSION_OFFSET_KEY; 29234 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 29235 com->uscsi_cdb = cdb; 29236 com->uscsi_cdblen = CDB_GROUP1; 29237 com->uscsi_bufaddr = buffer; 29238 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 29239 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 29240 29241 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 29242 SD_PATH_STANDARD); 29243 if (rval != 0) { 29244 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29245 kmem_free(com, sizeof (*com)); 29246 return (rval); 29247 } 29248 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 29249 session_offset = 29250 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 29251 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 29252 /* 29253 * Offset returned offset in current lbasize block's. Convert to 29254 * 2k block's to return to the user 29255 */ 29256 if (un->un_tgt_blocksize == CDROM_BLK_512) { 29257 session_offset >>= 2; 29258 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 29259 session_offset >>= 1; 29260 } 29261 } 29262 29263 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 29264 rval = EFAULT; 29265 } 29266 29267 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29268 kmem_free(com, sizeof (*com)); 29269 return (rval); 29270 } 29271 29272 29273 /* 29274 * Function: sd_wm_cache_constructor() 29275 * 29276 * Description: Cache Constructor for the wmap cache for the read/modify/write 29277 * devices. 29278 * 29279 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29280 * un - sd_lun structure for the device. 29281 * flag - the km flags passed to constructor 29282 * 29283 * Return Code: 0 on success. 29284 * -1 on failure. 29285 */ 29286 29287 /*ARGSUSED*/ 29288 static int 29289 sd_wm_cache_constructor(void *wm, void *un, int flags) 29290 { 29291 bzero(wm, sizeof (struct sd_w_map)); 29292 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 29293 return (0); 29294 } 29295 29296 29297 /* 29298 * Function: sd_wm_cache_destructor() 29299 * 29300 * Description: Cache destructor for the wmap cache for the read/modify/write 29301 * devices. 29302 * 29303 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29304 * un - sd_lun structure for the device. 29305 */ 29306 /*ARGSUSED*/ 29307 static void 29308 sd_wm_cache_destructor(void *wm, void *un) 29309 { 29310 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 29311 } 29312 29313 29314 /* 29315 * Function: sd_range_lock() 29316 * 29317 * Description: Lock the range of blocks specified as parameter to ensure 29318 * that read, modify write is atomic and no other i/o writes 29319 * to the same location. The range is specified in terms 29320 * of start and end blocks. Block numbers are the actual 29321 * media block numbers and not system. 29322 * 29323 * Arguments: un - sd_lun structure for the device. 29324 * startb - The starting block number 29325 * endb - The end block number 29326 * typ - type of i/o - simple/read_modify_write 29327 * 29328 * Return Code: wm - pointer to the wmap structure. 29329 * 29330 * Context: This routine can sleep. 29331 */ 29332 29333 static struct sd_w_map * 29334 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 29335 { 29336 struct sd_w_map *wmp = NULL; 29337 struct sd_w_map *sl_wmp = NULL; 29338 struct sd_w_map *tmp_wmp; 29339 wm_state state = SD_WM_CHK_LIST; 29340 29341 29342 ASSERT(un != NULL); 29343 ASSERT(!mutex_owned(SD_MUTEX(un))); 29344 29345 mutex_enter(SD_MUTEX(un)); 29346 29347 while (state != SD_WM_DONE) { 29348 29349 switch (state) { 29350 case SD_WM_CHK_LIST: 29351 /* 29352 * This is the starting state. Check the wmap list 29353 * to see if the range is currently available. 29354 */ 29355 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 29356 /* 29357 * If this is a simple write and no rmw 29358 * i/o is pending then try to lock the 29359 * range as the range should be available. 29360 */ 29361 state = SD_WM_LOCK_RANGE; 29362 } else { 29363 tmp_wmp = sd_get_range(un, startb, endb); 29364 if (tmp_wmp != NULL) { 29365 if ((wmp != NULL) && ONLIST(un, wmp)) { 29366 /* 29367 * Should not keep onlist wmps 29368 * while waiting this macro 29369 * will also do wmp = NULL; 29370 */ 29371 FREE_ONLIST_WMAP(un, wmp); 29372 } 29373 /* 29374 * sl_wmp is the wmap on which wait 29375 * is done, since the tmp_wmp points 29376 * to the inuse wmap, set sl_wmp to 29377 * tmp_wmp and change the state to sleep 29378 */ 29379 sl_wmp = tmp_wmp; 29380 state = SD_WM_WAIT_MAP; 29381 } else { 29382 state = SD_WM_LOCK_RANGE; 29383 } 29384 29385 } 29386 break; 29387 29388 case SD_WM_LOCK_RANGE: 29389 ASSERT(un->un_wm_cache); 29390 /* 29391 * The range need to be locked, try to get a wmap. 29392 * First attempt it with NO_SLEEP, want to avoid a sleep 29393 * if possible as we will have to release the sd mutex 29394 * if we have to sleep. 29395 */ 29396 if (wmp == NULL) 29397 wmp = kmem_cache_alloc(un->un_wm_cache, 29398 KM_NOSLEEP); 29399 if (wmp == NULL) { 29400 mutex_exit(SD_MUTEX(un)); 29401 _NOTE(DATA_READABLE_WITHOUT_LOCK 29402 (sd_lun::un_wm_cache)) 29403 wmp = kmem_cache_alloc(un->un_wm_cache, 29404 KM_SLEEP); 29405 mutex_enter(SD_MUTEX(un)); 29406 /* 29407 * we released the mutex so recheck and go to 29408 * check list state. 29409 */ 29410 state = SD_WM_CHK_LIST; 29411 } else { 29412 /* 29413 * We exit out of state machine since we 29414 * have the wmap. Do the housekeeping first. 29415 * place the wmap on the wmap list if it is not 29416 * on it already and then set the state to done. 29417 */ 29418 wmp->wm_start = startb; 29419 wmp->wm_end = endb; 29420 wmp->wm_flags = typ | SD_WM_BUSY; 29421 if (typ & SD_WTYPE_RMW) { 29422 un->un_rmw_count++; 29423 } 29424 /* 29425 * If not already on the list then link 29426 */ 29427 if (!ONLIST(un, wmp)) { 29428 wmp->wm_next = un->un_wm; 29429 wmp->wm_prev = NULL; 29430 if (wmp->wm_next) 29431 wmp->wm_next->wm_prev = wmp; 29432 un->un_wm = wmp; 29433 } 29434 state = SD_WM_DONE; 29435 } 29436 break; 29437 29438 case SD_WM_WAIT_MAP: 29439 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29440 /* 29441 * Wait is done on sl_wmp, which is set in the 29442 * check_list state. 29443 */ 29444 sl_wmp->wm_wanted_count++; 29445 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29446 sl_wmp->wm_wanted_count--; 29447 /* 29448 * We can reuse the memory from the completed sl_wmp 29449 * lock range for our new lock, but only if noone is 29450 * waiting for it. 29451 */ 29452 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 29453 if (sl_wmp->wm_wanted_count == 0) { 29454 if (wmp != NULL) { 29455 CHK_N_FREEWMP(un, wmp); 29456 } 29457 wmp = sl_wmp; 29458 } 29459 sl_wmp = NULL; 29460 /* 29461 * After waking up, need to recheck for availability of 29462 * range. 29463 */ 29464 state = SD_WM_CHK_LIST; 29465 break; 29466 29467 default: 29468 panic("sd_range_lock: " 29469 "Unknown state %d in sd_range_lock", state); 29470 /*NOTREACHED*/ 29471 } /* switch(state) */ 29472 29473 } /* while(state != SD_WM_DONE) */ 29474 29475 mutex_exit(SD_MUTEX(un)); 29476 29477 ASSERT(wmp != NULL); 29478 29479 return (wmp); 29480 } 29481 29482 29483 /* 29484 * Function: sd_get_range() 29485 * 29486 * Description: Find if there any overlapping I/O to this one 29487 * Returns the write-map of 1st such I/O, NULL otherwise. 29488 * 29489 * Arguments: un - sd_lun structure for the device. 29490 * startb - The starting block number 29491 * endb - The end block number 29492 * 29493 * Return Code: wm - pointer to the wmap structure. 29494 */ 29495 29496 static struct sd_w_map * 29497 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29498 { 29499 struct sd_w_map *wmp; 29500 29501 ASSERT(un != NULL); 29502 29503 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29504 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29505 continue; 29506 } 29507 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29508 break; 29509 } 29510 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29511 break; 29512 } 29513 } 29514 29515 return (wmp); 29516 } 29517 29518 29519 /* 29520 * Function: sd_free_inlist_wmap() 29521 * 29522 * Description: Unlink and free a write map struct. 29523 * 29524 * Arguments: un - sd_lun structure for the device. 29525 * wmp - sd_w_map which needs to be unlinked. 29526 */ 29527 29528 static void 29529 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29530 { 29531 ASSERT(un != NULL); 29532 29533 if (un->un_wm == wmp) { 29534 un->un_wm = wmp->wm_next; 29535 } else { 29536 wmp->wm_prev->wm_next = wmp->wm_next; 29537 } 29538 29539 if (wmp->wm_next) { 29540 wmp->wm_next->wm_prev = wmp->wm_prev; 29541 } 29542 29543 wmp->wm_next = wmp->wm_prev = NULL; 29544 29545 kmem_cache_free(un->un_wm_cache, wmp); 29546 } 29547 29548 29549 /* 29550 * Function: sd_range_unlock() 29551 * 29552 * Description: Unlock the range locked by wm. 29553 * Free write map if nobody else is waiting on it. 29554 * 29555 * Arguments: un - sd_lun structure for the device. 29556 * wmp - sd_w_map which needs to be unlinked. 29557 */ 29558 29559 static void 29560 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29561 { 29562 ASSERT(un != NULL); 29563 ASSERT(wm != NULL); 29564 ASSERT(!mutex_owned(SD_MUTEX(un))); 29565 29566 mutex_enter(SD_MUTEX(un)); 29567 29568 if (wm->wm_flags & SD_WTYPE_RMW) { 29569 un->un_rmw_count--; 29570 } 29571 29572 if (wm->wm_wanted_count) { 29573 wm->wm_flags = 0; 29574 /* 29575 * Broadcast that the wmap is available now. 29576 */ 29577 cv_broadcast(&wm->wm_avail); 29578 } else { 29579 /* 29580 * If no one is waiting on the map, it should be free'ed. 29581 */ 29582 sd_free_inlist_wmap(un, wm); 29583 } 29584 29585 mutex_exit(SD_MUTEX(un)); 29586 } 29587 29588 29589 /* 29590 * Function: sd_read_modify_write_task 29591 * 29592 * Description: Called from a taskq thread to initiate the write phase of 29593 * a read-modify-write request. This is used for targets where 29594 * un->un_sys_blocksize != un->un_tgt_blocksize. 29595 * 29596 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29597 * 29598 * Context: Called under taskq thread context. 29599 */ 29600 29601 static void 29602 sd_read_modify_write_task(void *arg) 29603 { 29604 struct sd_mapblocksize_info *bsp; 29605 struct buf *bp; 29606 struct sd_xbuf *xp; 29607 struct sd_lun *un; 29608 29609 bp = arg; /* The bp is given in arg */ 29610 ASSERT(bp != NULL); 29611 29612 /* Get the pointer to the layer-private data struct */ 29613 xp = SD_GET_XBUF(bp); 29614 ASSERT(xp != NULL); 29615 bsp = xp->xb_private; 29616 ASSERT(bsp != NULL); 29617 29618 un = SD_GET_UN(bp); 29619 ASSERT(un != NULL); 29620 ASSERT(!mutex_owned(SD_MUTEX(un))); 29621 29622 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29623 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29624 29625 /* 29626 * This is the write phase of a read-modify-write request, called 29627 * under the context of a taskq thread in response to the completion 29628 * of the read portion of the rmw request completing under interrupt 29629 * context. The write request must be sent from here down the iostart 29630 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29631 * we use the layer index saved in the layer-private data area. 29632 */ 29633 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29634 29635 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29636 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29637 } 29638 29639 29640 /* 29641 * Function: sddump_do_read_of_rmw() 29642 * 29643 * Description: This routine will be called from sddump, If sddump is called 29644 * with an I/O which not aligned on device blocksize boundary 29645 * then the write has to be converted to read-modify-write. 29646 * Do the read part here in order to keep sddump simple. 29647 * Note - That the sd_mutex is held across the call to this 29648 * routine. 29649 * 29650 * Arguments: un - sd_lun 29651 * blkno - block number in terms of media block size. 29652 * nblk - number of blocks. 29653 * bpp - pointer to pointer to the buf structure. On return 29654 * from this function, *bpp points to the valid buffer 29655 * to which the write has to be done. 29656 * 29657 * Return Code: 0 for success or errno-type return code 29658 */ 29659 29660 static int 29661 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29662 struct buf **bpp) 29663 { 29664 int err; 29665 int i; 29666 int rval; 29667 struct buf *bp; 29668 struct scsi_pkt *pkt = NULL; 29669 uint32_t target_blocksize; 29670 29671 ASSERT(un != NULL); 29672 ASSERT(mutex_owned(SD_MUTEX(un))); 29673 29674 target_blocksize = un->un_tgt_blocksize; 29675 29676 mutex_exit(SD_MUTEX(un)); 29677 29678 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29679 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29680 if (bp == NULL) { 29681 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29682 "no resources for dumping; giving up"); 29683 err = ENOMEM; 29684 goto done; 29685 } 29686 29687 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29688 blkno, nblk); 29689 if (rval != 0) { 29690 scsi_free_consistent_buf(bp); 29691 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29692 "no resources for dumping; giving up"); 29693 err = ENOMEM; 29694 goto done; 29695 } 29696 29697 pkt->pkt_flags |= FLAG_NOINTR; 29698 29699 err = EIO; 29700 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29701 29702 /* 29703 * Scsi_poll returns 0 (success) if the command completes and 29704 * the status block is STATUS_GOOD. We should only check 29705 * errors if this condition is not true. Even then we should 29706 * send our own request sense packet only if we have a check 29707 * condition and auto request sense has not been performed by 29708 * the hba. 29709 */ 29710 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29711 29712 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29713 err = 0; 29714 break; 29715 } 29716 29717 /* 29718 * Check CMD_DEV_GONE 1st, give up if device is gone, 29719 * no need to read RQS data. 29720 */ 29721 if (pkt->pkt_reason == CMD_DEV_GONE) { 29722 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29723 "Error while dumping state with rmw..." 29724 "Device is gone\n"); 29725 break; 29726 } 29727 29728 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29729 SD_INFO(SD_LOG_DUMP, un, 29730 "sddump: read failed with CHECK, try # %d\n", i); 29731 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29732 (void) sd_send_polled_RQS(un); 29733 } 29734 29735 continue; 29736 } 29737 29738 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29739 int reset_retval = 0; 29740 29741 SD_INFO(SD_LOG_DUMP, un, 29742 "sddump: read failed with BUSY, try # %d\n", i); 29743 29744 if (un->un_f_lun_reset_enabled == TRUE) { 29745 reset_retval = scsi_reset(SD_ADDRESS(un), 29746 RESET_LUN); 29747 } 29748 if (reset_retval == 0) { 29749 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29750 } 29751 (void) sd_send_polled_RQS(un); 29752 29753 } else { 29754 SD_INFO(SD_LOG_DUMP, un, 29755 "sddump: read failed with 0x%x, try # %d\n", 29756 SD_GET_PKT_STATUS(pkt), i); 29757 mutex_enter(SD_MUTEX(un)); 29758 sd_reset_target(un, pkt); 29759 mutex_exit(SD_MUTEX(un)); 29760 } 29761 29762 /* 29763 * If we are not getting anywhere with lun/target resets, 29764 * let's reset the bus. 29765 */ 29766 if (i > SD_NDUMP_RETRIES/2) { 29767 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29768 (void) sd_send_polled_RQS(un); 29769 } 29770 29771 } 29772 scsi_destroy_pkt(pkt); 29773 29774 if (err != 0) { 29775 scsi_free_consistent_buf(bp); 29776 *bpp = NULL; 29777 } else { 29778 *bpp = bp; 29779 } 29780 29781 done: 29782 mutex_enter(SD_MUTEX(un)); 29783 return (err); 29784 } 29785 29786 29787 /* 29788 * Function: sd_failfast_flushq 29789 * 29790 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29791 * in b_flags and move them onto the failfast queue, then kick 29792 * off a thread to return all bp's on the failfast queue to 29793 * their owners with an error set. 29794 * 29795 * Arguments: un - pointer to the soft state struct for the instance. 29796 * 29797 * Context: may execute in interrupt context. 29798 */ 29799 29800 static void 29801 sd_failfast_flushq(struct sd_lun *un) 29802 { 29803 struct buf *bp; 29804 struct buf *next_waitq_bp; 29805 struct buf *prev_waitq_bp = NULL; 29806 29807 ASSERT(un != NULL); 29808 ASSERT(mutex_owned(SD_MUTEX(un))); 29809 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29810 ASSERT(un->un_failfast_bp == NULL); 29811 29812 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29813 "sd_failfast_flushq: entry: un:0x%p\n", un); 29814 29815 /* 29816 * Check if we should flush all bufs when entering failfast state, or 29817 * just those with B_FAILFAST set. 29818 */ 29819 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29820 /* 29821 * Move *all* bp's on the wait queue to the failfast flush 29822 * queue, including those that do NOT have B_FAILFAST set. 29823 */ 29824 if (un->un_failfast_headp == NULL) { 29825 ASSERT(un->un_failfast_tailp == NULL); 29826 un->un_failfast_headp = un->un_waitq_headp; 29827 } else { 29828 ASSERT(un->un_failfast_tailp != NULL); 29829 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29830 } 29831 29832 un->un_failfast_tailp = un->un_waitq_tailp; 29833 29834 /* update kstat for each bp moved out of the waitq */ 29835 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29836 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29837 } 29838 29839 /* empty the waitq */ 29840 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29841 29842 } else { 29843 /* 29844 * Go thru the wait queue, pick off all entries with 29845 * B_FAILFAST set, and move these onto the failfast queue. 29846 */ 29847 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29848 /* 29849 * Save the pointer to the next bp on the wait queue, 29850 * so we get to it on the next iteration of this loop. 29851 */ 29852 next_waitq_bp = bp->av_forw; 29853 29854 /* 29855 * If this bp from the wait queue does NOT have 29856 * B_FAILFAST set, just move on to the next element 29857 * in the wait queue. Note, this is the only place 29858 * where it is correct to set prev_waitq_bp. 29859 */ 29860 if ((bp->b_flags & B_FAILFAST) == 0) { 29861 prev_waitq_bp = bp; 29862 continue; 29863 } 29864 29865 /* 29866 * Remove the bp from the wait queue. 29867 */ 29868 if (bp == un->un_waitq_headp) { 29869 /* The bp is the first element of the waitq. */ 29870 un->un_waitq_headp = next_waitq_bp; 29871 if (un->un_waitq_headp == NULL) { 29872 /* The wait queue is now empty */ 29873 un->un_waitq_tailp = NULL; 29874 } 29875 } else { 29876 /* 29877 * The bp is either somewhere in the middle 29878 * or at the end of the wait queue. 29879 */ 29880 ASSERT(un->un_waitq_headp != NULL); 29881 ASSERT(prev_waitq_bp != NULL); 29882 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29883 == 0); 29884 if (bp == un->un_waitq_tailp) { 29885 /* bp is the last entry on the waitq. */ 29886 ASSERT(next_waitq_bp == NULL); 29887 un->un_waitq_tailp = prev_waitq_bp; 29888 } 29889 prev_waitq_bp->av_forw = next_waitq_bp; 29890 } 29891 bp->av_forw = NULL; 29892 29893 /* 29894 * update kstat since the bp is moved out of 29895 * the waitq 29896 */ 29897 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29898 29899 /* 29900 * Now put the bp onto the failfast queue. 29901 */ 29902 if (un->un_failfast_headp == NULL) { 29903 /* failfast queue is currently empty */ 29904 ASSERT(un->un_failfast_tailp == NULL); 29905 un->un_failfast_headp = 29906 un->un_failfast_tailp = bp; 29907 } else { 29908 /* Add the bp to the end of the failfast q */ 29909 ASSERT(un->un_failfast_tailp != NULL); 29910 ASSERT(un->un_failfast_tailp->b_flags & 29911 B_FAILFAST); 29912 un->un_failfast_tailp->av_forw = bp; 29913 un->un_failfast_tailp = bp; 29914 } 29915 } 29916 } 29917 29918 /* 29919 * Now return all bp's on the failfast queue to their owners. 29920 */ 29921 while ((bp = un->un_failfast_headp) != NULL) { 29922 29923 un->un_failfast_headp = bp->av_forw; 29924 if (un->un_failfast_headp == NULL) { 29925 un->un_failfast_tailp = NULL; 29926 } 29927 29928 /* 29929 * We want to return the bp with a failure error code, but 29930 * we do not want a call to sd_start_cmds() to occur here, 29931 * so use sd_return_failed_command_no_restart() instead of 29932 * sd_return_failed_command(). 29933 */ 29934 sd_return_failed_command_no_restart(un, bp, EIO); 29935 } 29936 29937 /* Flush the xbuf queues if required. */ 29938 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29939 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29940 } 29941 29942 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29943 "sd_failfast_flushq: exit: un:0x%p\n", un); 29944 } 29945 29946 29947 /* 29948 * Function: sd_failfast_flushq_callback 29949 * 29950 * Description: Return TRUE if the given bp meets the criteria for failfast 29951 * flushing. Used with ddi_xbuf_flushq(9F). 29952 * 29953 * Arguments: bp - ptr to buf struct to be examined. 29954 * 29955 * Context: Any 29956 */ 29957 29958 static int 29959 sd_failfast_flushq_callback(struct buf *bp) 29960 { 29961 /* 29962 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29963 * state is entered; OR (2) the given bp has B_FAILFAST set. 29964 */ 29965 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29966 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29967 } 29968 29969 29970 29971 /* 29972 * Function: sd_setup_next_xfer 29973 * 29974 * Description: Prepare next I/O operation using DMA_PARTIAL 29975 * 29976 */ 29977 29978 static int 29979 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29980 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29981 { 29982 ssize_t num_blks_not_xfered; 29983 daddr_t strt_blk_num; 29984 ssize_t bytes_not_xfered; 29985 int rval; 29986 29987 ASSERT(pkt->pkt_resid == 0); 29988 29989 /* 29990 * Calculate next block number and amount to be transferred. 29991 * 29992 * How much data NOT transfered to the HBA yet. 29993 */ 29994 bytes_not_xfered = xp->xb_dma_resid; 29995 29996 /* 29997 * figure how many blocks NOT transfered to the HBA yet. 29998 */ 29999 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 30000 30001 /* 30002 * set starting block number to the end of what WAS transfered. 30003 */ 30004 strt_blk_num = xp->xb_blkno + 30005 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 30006 30007 /* 30008 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 30009 * will call scsi_initpkt with NULL_FUNC so we do not have to release 30010 * the disk mutex here. 30011 */ 30012 rval = sd_setup_next_rw_pkt(un, pkt, bp, 30013 strt_blk_num, num_blks_not_xfered); 30014 30015 if (rval == 0) { 30016 30017 /* 30018 * Success. 30019 * 30020 * Adjust things if there are still more blocks to be 30021 * transfered. 30022 */ 30023 xp->xb_dma_resid = pkt->pkt_resid; 30024 pkt->pkt_resid = 0; 30025 30026 return (1); 30027 } 30028 30029 /* 30030 * There's really only one possible return value from 30031 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 30032 * returns NULL. 30033 */ 30034 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 30035 30036 bp->b_resid = bp->b_bcount; 30037 bp->b_flags |= B_ERROR; 30038 30039 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30040 "Error setting up next portion of DMA transfer\n"); 30041 30042 return (0); 30043 } 30044 30045 /* 30046 * Function: sd_panic_for_res_conflict 30047 * 30048 * Description: Call panic with a string formatted with "Reservation Conflict" 30049 * and a human readable identifier indicating the SD instance 30050 * that experienced the reservation conflict. 30051 * 30052 * Arguments: un - pointer to the soft state struct for the instance. 30053 * 30054 * Context: may execute in interrupt context. 30055 */ 30056 30057 #define SD_RESV_CONFLICT_FMT_LEN 40 30058 void 30059 sd_panic_for_res_conflict(struct sd_lun *un) 30060 { 30061 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 30062 char path_str[MAXPATHLEN]; 30063 30064 (void) snprintf(panic_str, sizeof (panic_str), 30065 "Reservation Conflict\nDisk: %s", 30066 ddi_pathname(SD_DEVINFO(un), path_str)); 30067 30068 panic(panic_str); 30069 } 30070 30071 /* 30072 * Note: The following sd_faultinjection_ioctl( ) routines implement 30073 * driver support for handling fault injection for error analysis 30074 * causing faults in multiple layers of the driver. 30075 * 30076 */ 30077 30078 #ifdef SD_FAULT_INJECTION 30079 static uint_t sd_fault_injection_on = 0; 30080 30081 /* 30082 * Function: sd_faultinjection_ioctl() 30083 * 30084 * Description: This routine is the driver entry point for handling 30085 * faultinjection ioctls to inject errors into the 30086 * layer model 30087 * 30088 * Arguments: cmd - the ioctl cmd received 30089 * arg - the arguments from user and returns 30090 */ 30091 30092 static void 30093 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) 30094 { 30095 uint_t i = 0; 30096 uint_t rval; 30097 30098 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 30099 30100 mutex_enter(SD_MUTEX(un)); 30101 30102 switch (cmd) { 30103 case SDIOCRUN: 30104 /* Allow pushed faults to be injected */ 30105 SD_INFO(SD_LOG_SDTEST, un, 30106 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 30107 30108 sd_fault_injection_on = 1; 30109 30110 SD_INFO(SD_LOG_IOERR, un, 30111 "sd_faultinjection_ioctl: run finished\n"); 30112 break; 30113 30114 case SDIOCSTART: 30115 /* Start Injection Session */ 30116 SD_INFO(SD_LOG_SDTEST, un, 30117 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 30118 30119 sd_fault_injection_on = 0; 30120 un->sd_injection_mask = 0xFFFFFFFF; 30121 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30122 un->sd_fi_fifo_pkt[i] = NULL; 30123 un->sd_fi_fifo_xb[i] = NULL; 30124 un->sd_fi_fifo_un[i] = NULL; 30125 un->sd_fi_fifo_arq[i] = NULL; 30126 } 30127 un->sd_fi_fifo_start = 0; 30128 un->sd_fi_fifo_end = 0; 30129 30130 mutex_enter(&(un->un_fi_mutex)); 30131 un->sd_fi_log[0] = '\0'; 30132 un->sd_fi_buf_len = 0; 30133 mutex_exit(&(un->un_fi_mutex)); 30134 30135 SD_INFO(SD_LOG_IOERR, un, 30136 "sd_faultinjection_ioctl: start finished\n"); 30137 break; 30138 30139 case SDIOCSTOP: 30140 /* Stop Injection Session */ 30141 SD_INFO(SD_LOG_SDTEST, un, 30142 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 30143 sd_fault_injection_on = 0; 30144 un->sd_injection_mask = 0x0; 30145 30146 /* Empty stray or unuseds structs from fifo */ 30147 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30148 if (un->sd_fi_fifo_pkt[i] != NULL) { 30149 kmem_free(un->sd_fi_fifo_pkt[i], 30150 sizeof (struct sd_fi_pkt)); 30151 } 30152 if (un->sd_fi_fifo_xb[i] != NULL) { 30153 kmem_free(un->sd_fi_fifo_xb[i], 30154 sizeof (struct sd_fi_xb)); 30155 } 30156 if (un->sd_fi_fifo_un[i] != NULL) { 30157 kmem_free(un->sd_fi_fifo_un[i], 30158 sizeof (struct sd_fi_un)); 30159 } 30160 if (un->sd_fi_fifo_arq[i] != NULL) { 30161 kmem_free(un->sd_fi_fifo_arq[i], 30162 sizeof (struct sd_fi_arq)); 30163 } 30164 un->sd_fi_fifo_pkt[i] = NULL; 30165 un->sd_fi_fifo_un[i] = NULL; 30166 un->sd_fi_fifo_xb[i] = NULL; 30167 un->sd_fi_fifo_arq[i] = NULL; 30168 } 30169 un->sd_fi_fifo_start = 0; 30170 un->sd_fi_fifo_end = 0; 30171 30172 SD_INFO(SD_LOG_IOERR, un, 30173 "sd_faultinjection_ioctl: stop finished\n"); 30174 break; 30175 30176 case SDIOCINSERTPKT: 30177 /* Store a packet struct to be pushed onto fifo */ 30178 SD_INFO(SD_LOG_SDTEST, un, 30179 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 30180 30181 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30182 30183 sd_fault_injection_on = 0; 30184 30185 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 30186 if (un->sd_fi_fifo_pkt[i] != NULL) { 30187 kmem_free(un->sd_fi_fifo_pkt[i], 30188 sizeof (struct sd_fi_pkt)); 30189 } 30190 if (arg != NULL) { 30191 un->sd_fi_fifo_pkt[i] = 30192 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 30193 if (un->sd_fi_fifo_pkt[i] == NULL) { 30194 /* Alloc failed don't store anything */ 30195 break; 30196 } 30197 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 30198 sizeof (struct sd_fi_pkt), 0); 30199 if (rval == -1) { 30200 kmem_free(un->sd_fi_fifo_pkt[i], 30201 sizeof (struct sd_fi_pkt)); 30202 un->sd_fi_fifo_pkt[i] = NULL; 30203 } 30204 } else { 30205 SD_INFO(SD_LOG_IOERR, un, 30206 "sd_faultinjection_ioctl: pkt null\n"); 30207 } 30208 break; 30209 30210 case SDIOCINSERTXB: 30211 /* Store a xb struct to be pushed onto fifo */ 30212 SD_INFO(SD_LOG_SDTEST, un, 30213 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 30214 30215 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30216 30217 sd_fault_injection_on = 0; 30218 30219 if (un->sd_fi_fifo_xb[i] != NULL) { 30220 kmem_free(un->sd_fi_fifo_xb[i], 30221 sizeof (struct sd_fi_xb)); 30222 un->sd_fi_fifo_xb[i] = NULL; 30223 } 30224 if (arg != NULL) { 30225 un->sd_fi_fifo_xb[i] = 30226 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 30227 if (un->sd_fi_fifo_xb[i] == NULL) { 30228 /* Alloc failed don't store anything */ 30229 break; 30230 } 30231 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 30232 sizeof (struct sd_fi_xb), 0); 30233 30234 if (rval == -1) { 30235 kmem_free(un->sd_fi_fifo_xb[i], 30236 sizeof (struct sd_fi_xb)); 30237 un->sd_fi_fifo_xb[i] = NULL; 30238 } 30239 } else { 30240 SD_INFO(SD_LOG_IOERR, un, 30241 "sd_faultinjection_ioctl: xb null\n"); 30242 } 30243 break; 30244 30245 case SDIOCINSERTUN: 30246 /* Store a un struct to be pushed onto fifo */ 30247 SD_INFO(SD_LOG_SDTEST, un, 30248 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 30249 30250 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30251 30252 sd_fault_injection_on = 0; 30253 30254 if (un->sd_fi_fifo_un[i] != NULL) { 30255 kmem_free(un->sd_fi_fifo_un[i], 30256 sizeof (struct sd_fi_un)); 30257 un->sd_fi_fifo_un[i] = NULL; 30258 } 30259 if (arg != NULL) { 30260 un->sd_fi_fifo_un[i] = 30261 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 30262 if (un->sd_fi_fifo_un[i] == NULL) { 30263 /* Alloc failed don't store anything */ 30264 break; 30265 } 30266 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 30267 sizeof (struct sd_fi_un), 0); 30268 if (rval == -1) { 30269 kmem_free(un->sd_fi_fifo_un[i], 30270 sizeof (struct sd_fi_un)); 30271 un->sd_fi_fifo_un[i] = NULL; 30272 } 30273 30274 } else { 30275 SD_INFO(SD_LOG_IOERR, un, 30276 "sd_faultinjection_ioctl: un null\n"); 30277 } 30278 30279 break; 30280 30281 case SDIOCINSERTARQ: 30282 /* Store a arq struct to be pushed onto fifo */ 30283 SD_INFO(SD_LOG_SDTEST, un, 30284 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 30285 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30286 30287 sd_fault_injection_on = 0; 30288 30289 if (un->sd_fi_fifo_arq[i] != NULL) { 30290 kmem_free(un->sd_fi_fifo_arq[i], 30291 sizeof (struct sd_fi_arq)); 30292 un->sd_fi_fifo_arq[i] = NULL; 30293 } 30294 if (arg != NULL) { 30295 un->sd_fi_fifo_arq[i] = 30296 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 30297 if (un->sd_fi_fifo_arq[i] == NULL) { 30298 /* Alloc failed don't store anything */ 30299 break; 30300 } 30301 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 30302 sizeof (struct sd_fi_arq), 0); 30303 if (rval == -1) { 30304 kmem_free(un->sd_fi_fifo_arq[i], 30305 sizeof (struct sd_fi_arq)); 30306 un->sd_fi_fifo_arq[i] = NULL; 30307 } 30308 30309 } else { 30310 SD_INFO(SD_LOG_IOERR, un, 30311 "sd_faultinjection_ioctl: arq null\n"); 30312 } 30313 30314 break; 30315 30316 case SDIOCPUSH: 30317 /* Push stored xb, pkt, un, and arq onto fifo */ 30318 sd_fault_injection_on = 0; 30319 30320 if (arg != NULL) { 30321 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 30322 if (rval != -1 && 30323 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30324 un->sd_fi_fifo_end += i; 30325 } 30326 } else { 30327 SD_INFO(SD_LOG_IOERR, un, 30328 "sd_faultinjection_ioctl: push arg null\n"); 30329 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30330 un->sd_fi_fifo_end++; 30331 } 30332 } 30333 SD_INFO(SD_LOG_IOERR, un, 30334 "sd_faultinjection_ioctl: push to end=%d\n", 30335 un->sd_fi_fifo_end); 30336 break; 30337 30338 case SDIOCRETRIEVE: 30339 /* Return buffer of log from Injection session */ 30340 SD_INFO(SD_LOG_SDTEST, un, 30341 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 30342 30343 sd_fault_injection_on = 0; 30344 30345 mutex_enter(&(un->un_fi_mutex)); 30346 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 30347 un->sd_fi_buf_len+1, 0); 30348 mutex_exit(&(un->un_fi_mutex)); 30349 30350 if (rval == -1) { 30351 /* 30352 * arg is possibly invalid setting 30353 * it to NULL for return 30354 */ 30355 arg = NULL; 30356 } 30357 break; 30358 } 30359 30360 mutex_exit(SD_MUTEX(un)); 30361 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n"); 30362 } 30363 30364 30365 /* 30366 * Function: sd_injection_log() 30367 * 30368 * Description: This routine adds buff to the already existing injection log 30369 * for retrieval via faultinjection_ioctl for use in fault 30370 * detection and recovery 30371 * 30372 * Arguments: buf - the string to add to the log 30373 */ 30374 30375 static void 30376 sd_injection_log(char *buf, struct sd_lun *un) 30377 { 30378 uint_t len; 30379 30380 ASSERT(un != NULL); 30381 ASSERT(buf != NULL); 30382 30383 mutex_enter(&(un->un_fi_mutex)); 30384 30385 len = min(strlen(buf), 255); 30386 /* Add logged value to Injection log to be returned later */ 30387 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 30388 uint_t offset = strlen((char *)un->sd_fi_log); 30389 char *destp = (char *)un->sd_fi_log + offset; 30390 int i; 30391 for (i = 0; i < len; i++) { 30392 *destp++ = *buf++; 30393 } 30394 un->sd_fi_buf_len += len; 30395 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30396 } 30397 30398 mutex_exit(&(un->un_fi_mutex)); 30399 } 30400 30401 30402 /* 30403 * Function: sd_faultinjection() 30404 * 30405 * Description: This routine takes the pkt and changes its 30406 * content based on error injection scenerio. 30407 * 30408 * Arguments: pktp - packet to be changed 30409 */ 30410 30411 static void 30412 sd_faultinjection(struct scsi_pkt *pktp) 30413 { 30414 uint_t i; 30415 struct sd_fi_pkt *fi_pkt; 30416 struct sd_fi_xb *fi_xb; 30417 struct sd_fi_un *fi_un; 30418 struct sd_fi_arq *fi_arq; 30419 struct buf *bp; 30420 struct sd_xbuf *xb; 30421 struct sd_lun *un; 30422 30423 ASSERT(pktp != NULL); 30424 30425 /* pull bp xb and un from pktp */ 30426 bp = (struct buf *)pktp->pkt_private; 30427 xb = SD_GET_XBUF(bp); 30428 un = SD_GET_UN(bp); 30429 30430 ASSERT(un != NULL); 30431 30432 mutex_enter(SD_MUTEX(un)); 30433 30434 SD_TRACE(SD_LOG_SDTEST, un, 30435 "sd_faultinjection: entry Injection from sdintr\n"); 30436 30437 /* if injection is off return */ 30438 if (sd_fault_injection_on == 0 || 30439 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30440 mutex_exit(SD_MUTEX(un)); 30441 return; 30442 } 30443 30444 SD_INFO(SD_LOG_SDTEST, un, 30445 "sd_faultinjection: is working for copying\n"); 30446 30447 /* take next set off fifo */ 30448 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30449 30450 fi_pkt = un->sd_fi_fifo_pkt[i]; 30451 fi_xb = un->sd_fi_fifo_xb[i]; 30452 fi_un = un->sd_fi_fifo_un[i]; 30453 fi_arq = un->sd_fi_fifo_arq[i]; 30454 30455 30456 /* set variables accordingly */ 30457 /* set pkt if it was on fifo */ 30458 if (fi_pkt != NULL) { 30459 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30460 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30461 if (fi_pkt->pkt_cdbp != 0xff) 30462 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30463 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30464 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30465 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30466 30467 } 30468 /* set xb if it was on fifo */ 30469 if (fi_xb != NULL) { 30470 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30471 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30472 if (fi_xb->xb_retry_count != 0) 30473 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30474 SD_CONDSET(xb, xb, xb_victim_retry_count, 30475 "xb_victim_retry_count"); 30476 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30477 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30478 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30479 30480 /* copy in block data from sense */ 30481 /* 30482 * if (fi_xb->xb_sense_data[0] != -1) { 30483 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30484 * SENSE_LENGTH); 30485 * } 30486 */ 30487 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 30488 30489 /* copy in extended sense codes */ 30490 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30491 xb, es_code, "es_code"); 30492 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30493 xb, es_key, "es_key"); 30494 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30495 xb, es_add_code, "es_add_code"); 30496 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30497 xb, es_qual_code, "es_qual_code"); 30498 struct scsi_extended_sense *esp; 30499 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 30500 esp->es_class = CLASS_EXTENDED_SENSE; 30501 } 30502 30503 /* set un if it was on fifo */ 30504 if (fi_un != NULL) { 30505 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30506 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30507 SD_CONDSET(un, un, un_reset_retry_count, 30508 "un_reset_retry_count"); 30509 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30510 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30511 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30512 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30513 "un_f_allow_bus_device_reset"); 30514 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30515 30516 } 30517 30518 /* copy in auto request sense if it was on fifo */ 30519 if (fi_arq != NULL) { 30520 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30521 } 30522 30523 /* free structs */ 30524 if (un->sd_fi_fifo_pkt[i] != NULL) { 30525 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30526 } 30527 if (un->sd_fi_fifo_xb[i] != NULL) { 30528 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30529 } 30530 if (un->sd_fi_fifo_un[i] != NULL) { 30531 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30532 } 30533 if (un->sd_fi_fifo_arq[i] != NULL) { 30534 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30535 } 30536 30537 /* 30538 * kmem_free does not gurantee to set to NULL 30539 * since we uses these to determine if we set 30540 * values or not lets confirm they are always 30541 * NULL after free 30542 */ 30543 un->sd_fi_fifo_pkt[i] = NULL; 30544 un->sd_fi_fifo_un[i] = NULL; 30545 un->sd_fi_fifo_xb[i] = NULL; 30546 un->sd_fi_fifo_arq[i] = NULL; 30547 30548 un->sd_fi_fifo_start++; 30549 30550 mutex_exit(SD_MUTEX(un)); 30551 30552 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30553 } 30554 30555 #endif /* SD_FAULT_INJECTION */ 30556 30557 /* 30558 * This routine is invoked in sd_unit_attach(). Before calling it, the 30559 * properties in conf file should be processed already, and "hotpluggable" 30560 * property was processed also. 30561 * 30562 * The sd driver distinguishes 3 different type of devices: removable media, 30563 * non-removable media, and hotpluggable. Below the differences are defined: 30564 * 30565 * 1. Device ID 30566 * 30567 * The device ID of a device is used to identify this device. Refer to 30568 * ddi_devid_register(9F). 30569 * 30570 * For a non-removable media disk device which can provide 0x80 or 0x83 30571 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30572 * device ID is created to identify this device. For other non-removable 30573 * media devices, a default device ID is created only if this device has 30574 * at least 2 alter cylinders. Otherwise, this device has no devid. 30575 * 30576 * ------------------------------------------------------- 30577 * removable media hotpluggable | Can Have Device ID 30578 * ------------------------------------------------------- 30579 * false false | Yes 30580 * false true | Yes 30581 * true x | No 30582 * ------------------------------------------------------ 30583 * 30584 * 30585 * 2. SCSI group 4 commands 30586 * 30587 * In SCSI specs, only some commands in group 4 command set can use 30588 * 8-byte addresses that can be used to access >2TB storage spaces. 30589 * Other commands have no such capability. Without supporting group4, 30590 * it is impossible to make full use of storage spaces of a disk with 30591 * capacity larger than 2TB. 30592 * 30593 * ----------------------------------------------- 30594 * removable media hotpluggable LP64 | Group 30595 * ----------------------------------------------- 30596 * false false false | 1 30597 * false false true | 4 30598 * false true false | 1 30599 * false true true | 4 30600 * true x x | 5 30601 * ----------------------------------------------- 30602 * 30603 * 30604 * 3. Check for VTOC Label 30605 * 30606 * If a direct-access disk has no EFI label, sd will check if it has a 30607 * valid VTOC label. Now, sd also does that check for removable media 30608 * and hotpluggable devices. 30609 * 30610 * -------------------------------------------------------------- 30611 * Direct-Access removable media hotpluggable | Check Label 30612 * ------------------------------------------------------------- 30613 * false false false | No 30614 * false false true | No 30615 * false true false | Yes 30616 * false true true | Yes 30617 * true x x | Yes 30618 * -------------------------------------------------------------- 30619 * 30620 * 30621 * 4. Building default VTOC label 30622 * 30623 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30624 * If those devices have no valid VTOC label, sd(7d) will attempt to 30625 * create default VTOC for them. Currently sd creates default VTOC label 30626 * for all devices on x86 platform (VTOC_16), but only for removable 30627 * media devices on SPARC (VTOC_8). 30628 * 30629 * ----------------------------------------------------------- 30630 * removable media hotpluggable platform | Default Label 30631 * ----------------------------------------------------------- 30632 * false false sparc | No 30633 * false true x86 | Yes 30634 * false true sparc | Yes 30635 * true x x | Yes 30636 * ---------------------------------------------------------- 30637 * 30638 * 30639 * 5. Supported blocksizes of target devices 30640 * 30641 * Sd supports non-512-byte blocksize for removable media devices only. 30642 * For other devices, only 512-byte blocksize is supported. This may be 30643 * changed in near future because some RAID devices require non-512-byte 30644 * blocksize 30645 * 30646 * ----------------------------------------------------------- 30647 * removable media hotpluggable | non-512-byte blocksize 30648 * ----------------------------------------------------------- 30649 * false false | No 30650 * false true | No 30651 * true x | Yes 30652 * ----------------------------------------------------------- 30653 * 30654 * 30655 * 6. Automatic mount & unmount 30656 * 30657 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30658 * if a device is removable media device. It return 1 for removable media 30659 * devices, and 0 for others. 30660 * 30661 * The automatic mounting subsystem should distinguish between the types 30662 * of devices and apply automounting policies to each. 30663 * 30664 * 30665 * 7. fdisk partition management 30666 * 30667 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30668 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30669 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30670 * fdisk partitions on both x86 and SPARC platform. 30671 * 30672 * ----------------------------------------------------------- 30673 * platform removable media USB/1394 | fdisk supported 30674 * ----------------------------------------------------------- 30675 * x86 X X | true 30676 * ------------------------------------------------------------ 30677 * sparc X X | false 30678 * ------------------------------------------------------------ 30679 * 30680 * 30681 * 8. MBOOT/MBR 30682 * 30683 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30684 * read/write mboot for removable media devices on sparc platform. 30685 * 30686 * ----------------------------------------------------------- 30687 * platform removable media USB/1394 | mboot supported 30688 * ----------------------------------------------------------- 30689 * x86 X X | true 30690 * ------------------------------------------------------------ 30691 * sparc false false | false 30692 * sparc false true | true 30693 * sparc true false | true 30694 * sparc true true | true 30695 * ------------------------------------------------------------ 30696 * 30697 * 30698 * 9. error handling during opening device 30699 * 30700 * If failed to open a disk device, an errno is returned. For some kinds 30701 * of errors, different errno is returned depending on if this device is 30702 * a removable media device. This brings USB/1394 hard disks in line with 30703 * expected hard disk behavior. It is not expected that this breaks any 30704 * application. 30705 * 30706 * ------------------------------------------------------ 30707 * removable media hotpluggable | errno 30708 * ------------------------------------------------------ 30709 * false false | EIO 30710 * false true | EIO 30711 * true x | ENXIO 30712 * ------------------------------------------------------ 30713 * 30714 * 30715 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30716 * 30717 * These IOCTLs are applicable only to removable media devices. 30718 * 30719 * ----------------------------------------------------------- 30720 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30721 * ----------------------------------------------------------- 30722 * false false | No 30723 * false true | No 30724 * true x | Yes 30725 * ----------------------------------------------------------- 30726 * 30727 * 30728 * 12. Kstats for partitions 30729 * 30730 * sd creates partition kstat for non-removable media devices. USB and 30731 * Firewire hard disks now have partition kstats 30732 * 30733 * ------------------------------------------------------ 30734 * removable media hotpluggable | kstat 30735 * ------------------------------------------------------ 30736 * false false | Yes 30737 * false true | Yes 30738 * true x | No 30739 * ------------------------------------------------------ 30740 * 30741 * 30742 * 13. Removable media & hotpluggable properties 30743 * 30744 * Sd driver creates a "removable-media" property for removable media 30745 * devices. Parent nexus drivers create a "hotpluggable" property if 30746 * it supports hotplugging. 30747 * 30748 * --------------------------------------------------------------------- 30749 * removable media hotpluggable | "removable-media" " hotpluggable" 30750 * --------------------------------------------------------------------- 30751 * false false | No No 30752 * false true | No Yes 30753 * true false | Yes No 30754 * true true | Yes Yes 30755 * --------------------------------------------------------------------- 30756 * 30757 * 30758 * 14. Power Management 30759 * 30760 * sd only power manages removable media devices or devices that support 30761 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30762 * 30763 * A parent nexus that supports hotplugging can also set "pm-capable" 30764 * if the disk can be power managed. 30765 * 30766 * ------------------------------------------------------------ 30767 * removable media hotpluggable pm-capable | power manage 30768 * ------------------------------------------------------------ 30769 * false false false | No 30770 * false false true | Yes 30771 * false true false | No 30772 * false true true | Yes 30773 * true x x | Yes 30774 * ------------------------------------------------------------ 30775 * 30776 * USB and firewire hard disks can now be power managed independently 30777 * of the framebuffer 30778 * 30779 * 30780 * 15. Support for USB disks with capacity larger than 1TB 30781 * 30782 * Currently, sd doesn't permit a fixed disk device with capacity 30783 * larger than 1TB to be used in a 32-bit operating system environment. 30784 * However, sd doesn't do that for removable media devices. Instead, it 30785 * assumes that removable media devices cannot have a capacity larger 30786 * than 1TB. Therefore, using those devices on 32-bit system is partially 30787 * supported, which can cause some unexpected results. 30788 * 30789 * --------------------------------------------------------------------- 30790 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30791 * --------------------------------------------------------------------- 30792 * false false | true | no 30793 * false true | true | no 30794 * true false | true | Yes 30795 * true true | true | Yes 30796 * --------------------------------------------------------------------- 30797 * 30798 * 30799 * 16. Check write-protection at open time 30800 * 30801 * When a removable media device is being opened for writing without NDELAY 30802 * flag, sd will check if this device is writable. If attempting to open 30803 * without NDELAY flag a write-protected device, this operation will abort. 30804 * 30805 * ------------------------------------------------------------ 30806 * removable media USB/1394 | WP Check 30807 * ------------------------------------------------------------ 30808 * false false | No 30809 * false true | No 30810 * true false | Yes 30811 * true true | Yes 30812 * ------------------------------------------------------------ 30813 * 30814 * 30815 * 17. syslog when corrupted VTOC is encountered 30816 * 30817 * Currently, if an invalid VTOC is encountered, sd only print syslog 30818 * for fixed SCSI disks. 30819 * ------------------------------------------------------------ 30820 * removable media USB/1394 | print syslog 30821 * ------------------------------------------------------------ 30822 * false false | Yes 30823 * false true | No 30824 * true false | No 30825 * true true | No 30826 * ------------------------------------------------------------ 30827 */ 30828 static void 30829 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30830 { 30831 int pm_cap; 30832 30833 ASSERT(un->un_sd); 30834 ASSERT(un->un_sd->sd_inq); 30835 30836 /* 30837 * Enable SYNC CACHE support for all devices. 30838 */ 30839 un->un_f_sync_cache_supported = TRUE; 30840 30841 /* 30842 * Set the sync cache required flag to false. 30843 * This would ensure that there is no SYNC CACHE 30844 * sent when there are no writes 30845 */ 30846 un->un_f_sync_cache_required = FALSE; 30847 30848 if (un->un_sd->sd_inq->inq_rmb) { 30849 /* 30850 * The media of this device is removable. And for this kind 30851 * of devices, it is possible to change medium after opening 30852 * devices. Thus we should support this operation. 30853 */ 30854 un->un_f_has_removable_media = TRUE; 30855 30856 /* 30857 * support non-512-byte blocksize of removable media devices 30858 */ 30859 un->un_f_non_devbsize_supported = TRUE; 30860 30861 /* 30862 * Assume that all removable media devices support DOOR_LOCK 30863 */ 30864 un->un_f_doorlock_supported = TRUE; 30865 30866 /* 30867 * For a removable media device, it is possible to be opened 30868 * with NDELAY flag when there is no media in drive, in this 30869 * case we don't care if device is writable. But if without 30870 * NDELAY flag, we need to check if media is write-protected. 30871 */ 30872 un->un_f_chk_wp_open = TRUE; 30873 30874 /* 30875 * need to start a SCSI watch thread to monitor media state, 30876 * when media is being inserted or ejected, notify syseventd. 30877 */ 30878 un->un_f_monitor_media_state = TRUE; 30879 30880 /* 30881 * Some devices don't support START_STOP_UNIT command. 30882 * Therefore, we'd better check if a device supports it 30883 * before sending it. 30884 */ 30885 un->un_f_check_start_stop = TRUE; 30886 30887 /* 30888 * support eject media ioctl: 30889 * FDEJECT, DKIOCEJECT, CDROMEJECT 30890 */ 30891 un->un_f_eject_media_supported = TRUE; 30892 30893 /* 30894 * Because many removable-media devices don't support 30895 * LOG_SENSE, we couldn't use this command to check if 30896 * a removable media device support power-management. 30897 * We assume that they support power-management via 30898 * START_STOP_UNIT command and can be spun up and down 30899 * without limitations. 30900 */ 30901 un->un_f_pm_supported = TRUE; 30902 30903 /* 30904 * Need to create a zero length (Boolean) property 30905 * removable-media for the removable media devices. 30906 * Note that the return value of the property is not being 30907 * checked, since if unable to create the property 30908 * then do not want the attach to fail altogether. Consistent 30909 * with other property creation in attach. 30910 */ 30911 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30912 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30913 30914 } else { 30915 /* 30916 * create device ID for device 30917 */ 30918 un->un_f_devid_supported = TRUE; 30919 30920 /* 30921 * Spin up non-removable-media devices once it is attached 30922 */ 30923 un->un_f_attach_spinup = TRUE; 30924 30925 /* 30926 * According to SCSI specification, Sense data has two kinds of 30927 * format: fixed format, and descriptor format. At present, we 30928 * don't support descriptor format sense data for removable 30929 * media. 30930 */ 30931 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30932 un->un_f_descr_format_supported = TRUE; 30933 } 30934 30935 /* 30936 * kstats are created only for non-removable media devices. 30937 * 30938 * Set this in sd.conf to 0 in order to disable kstats. The 30939 * default is 1, so they are enabled by default. 30940 */ 30941 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30942 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30943 "enable-partition-kstats", 1)); 30944 30945 /* 30946 * Check if HBA has set the "pm-capable" property. 30947 * If "pm-capable" exists and is non-zero then we can 30948 * power manage the device without checking the start/stop 30949 * cycle count log sense page. 30950 * 30951 * If "pm-capable" exists and is set to be false (0), 30952 * then we should not power manage the device. 30953 * 30954 * If "pm-capable" doesn't exist then pm_cap will 30955 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30956 * sd will check the start/stop cycle count log sense page 30957 * and power manage the device if the cycle count limit has 30958 * not been exceeded. 30959 */ 30960 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30961 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30962 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) { 30963 un->un_f_log_sense_supported = TRUE; 30964 if (!un->un_f_power_condition_disabled && 30965 SD_INQUIRY(un)->inq_ansi == 6) { 30966 un->un_f_power_condition_supported = TRUE; 30967 } 30968 } else { 30969 /* 30970 * pm-capable property exists. 30971 * 30972 * Convert "TRUE" values for pm_cap to 30973 * SD_PM_CAPABLE_IS_TRUE to make it easier to check 30974 * later. "TRUE" values are any values defined in 30975 * inquiry.h. 30976 */ 30977 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) { 30978 un->un_f_log_sense_supported = FALSE; 30979 } else { 30980 /* SD_PM_CAPABLE_IS_TRUE case */ 30981 un->un_f_pm_supported = TRUE; 30982 if (!un->un_f_power_condition_disabled && 30983 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) { 30984 un->un_f_power_condition_supported = 30985 TRUE; 30986 } 30987 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) { 30988 un->un_f_log_sense_supported = TRUE; 30989 un->un_f_pm_log_sense_smart = 30990 SD_PM_CAP_SMART_LOG(pm_cap); 30991 } 30992 } 30993 30994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30995 "sd_unit_attach: un:0x%p pm-capable " 30996 "property set to %d.\n", un, un->un_f_pm_supported); 30997 } 30998 } 30999 31000 if (un->un_f_is_hotpluggable) { 31001 31002 /* 31003 * Have to watch hotpluggable devices as well, since 31004 * that's the only way for userland applications to 31005 * detect hot removal while device is busy/mounted. 31006 */ 31007 un->un_f_monitor_media_state = TRUE; 31008 31009 un->un_f_check_start_stop = TRUE; 31010 31011 } 31012 } 31013 31014 /* 31015 * sd_tg_rdwr: 31016 * Provides rdwr access for cmlb via sd_tgops. The start_block is 31017 * in sys block size, req_length in bytes. 31018 * 31019 */ 31020 static int 31021 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 31022 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 31023 { 31024 struct sd_lun *un; 31025 int path_flag = (int)(uintptr_t)tg_cookie; 31026 char *dkl = NULL; 31027 diskaddr_t real_addr = start_block; 31028 diskaddr_t first_byte, end_block; 31029 31030 size_t buffer_size = reqlength; 31031 int rval = 0; 31032 diskaddr_t cap; 31033 uint32_t lbasize; 31034 sd_ssc_t *ssc; 31035 31036 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31037 if (un == NULL) 31038 return (ENXIO); 31039 31040 if (cmd != TG_READ && cmd != TG_WRITE) 31041 return (EINVAL); 31042 31043 ssc = sd_ssc_init(un); 31044 mutex_enter(SD_MUTEX(un)); 31045 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 31046 mutex_exit(SD_MUTEX(un)); 31047 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31048 &lbasize, path_flag); 31049 if (rval != 0) 31050 goto done1; 31051 mutex_enter(SD_MUTEX(un)); 31052 sd_update_block_info(un, lbasize, cap); 31053 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 31054 mutex_exit(SD_MUTEX(un)); 31055 rval = EIO; 31056 goto done; 31057 } 31058 } 31059 31060 if (NOT_DEVBSIZE(un)) { 31061 /* 31062 * sys_blocksize != tgt_blocksize, need to re-adjust 31063 * blkno and save the index to beginning of dk_label 31064 */ 31065 first_byte = SD_SYSBLOCKS2BYTES(start_block); 31066 real_addr = first_byte / un->un_tgt_blocksize; 31067 31068 end_block = (first_byte + reqlength + 31069 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 31070 31071 /* round up buffer size to multiple of target block size */ 31072 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 31073 31074 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 31075 "label_addr: 0x%x allocation size: 0x%x\n", 31076 real_addr, buffer_size); 31077 31078 if (((first_byte % un->un_tgt_blocksize) != 0) || 31079 (reqlength % un->un_tgt_blocksize) != 0) 31080 /* the request is not aligned */ 31081 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 31082 } 31083 31084 /* 31085 * The MMC standard allows READ CAPACITY to be 31086 * inaccurate by a bounded amount (in the interest of 31087 * response latency). As a result, failed READs are 31088 * commonplace (due to the reading of metadata and not 31089 * data). Depending on the per-Vendor/drive Sense data, 31090 * the failed READ can cause many (unnecessary) retries. 31091 */ 31092 31093 if (ISCD(un) && (cmd == TG_READ) && 31094 (un->un_f_blockcount_is_valid == TRUE) && 31095 ((start_block == (un->un_blockcount - 1))|| 31096 (start_block == (un->un_blockcount - 2)))) { 31097 path_flag = SD_PATH_DIRECT_PRIORITY; 31098 } 31099 31100 mutex_exit(SD_MUTEX(un)); 31101 if (cmd == TG_READ) { 31102 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 31103 buffer_size, real_addr, path_flag); 31104 if (dkl != NULL) 31105 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 31106 real_addr), bufaddr, reqlength); 31107 } else { 31108 if (dkl) { 31109 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 31110 real_addr, path_flag); 31111 if (rval) { 31112 goto done1; 31113 } 31114 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 31115 real_addr), reqlength); 31116 } 31117 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 31118 buffer_size, real_addr, path_flag); 31119 } 31120 31121 done1: 31122 if (dkl != NULL) 31123 kmem_free(dkl, buffer_size); 31124 31125 if (rval != 0) { 31126 if (rval == EIO) 31127 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 31128 else 31129 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31130 } 31131 done: 31132 sd_ssc_fini(ssc); 31133 return (rval); 31134 } 31135 31136 31137 static int 31138 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 31139 { 31140 31141 struct sd_lun *un; 31142 diskaddr_t cap; 31143 uint32_t lbasize; 31144 int path_flag = (int)(uintptr_t)tg_cookie; 31145 int ret = 0; 31146 31147 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31148 if (un == NULL) 31149 return (ENXIO); 31150 31151 switch (cmd) { 31152 case TG_GETPHYGEOM: 31153 case TG_GETVIRTGEOM: 31154 case TG_GETCAPACITY: 31155 case TG_GETBLOCKSIZE: 31156 mutex_enter(SD_MUTEX(un)); 31157 31158 if ((un->un_f_blockcount_is_valid == TRUE) && 31159 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 31160 cap = un->un_blockcount; 31161 lbasize = un->un_tgt_blocksize; 31162 mutex_exit(SD_MUTEX(un)); 31163 } else { 31164 sd_ssc_t *ssc; 31165 mutex_exit(SD_MUTEX(un)); 31166 ssc = sd_ssc_init(un); 31167 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31168 &lbasize, path_flag); 31169 if (ret != 0) { 31170 if (ret == EIO) 31171 sd_ssc_assessment(ssc, 31172 SD_FMT_STATUS_CHECK); 31173 else 31174 sd_ssc_assessment(ssc, 31175 SD_FMT_IGNORE); 31176 sd_ssc_fini(ssc); 31177 return (ret); 31178 } 31179 sd_ssc_fini(ssc); 31180 mutex_enter(SD_MUTEX(un)); 31181 sd_update_block_info(un, lbasize, cap); 31182 if ((un->un_f_blockcount_is_valid == FALSE) || 31183 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 31184 mutex_exit(SD_MUTEX(un)); 31185 return (EIO); 31186 } 31187 mutex_exit(SD_MUTEX(un)); 31188 } 31189 31190 if (cmd == TG_GETCAPACITY) { 31191 *(diskaddr_t *)arg = cap; 31192 return (0); 31193 } 31194 31195 if (cmd == TG_GETBLOCKSIZE) { 31196 *(uint32_t *)arg = lbasize; 31197 return (0); 31198 } 31199 31200 if (cmd == TG_GETPHYGEOM) 31201 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 31202 cap, lbasize, path_flag); 31203 else 31204 /* TG_GETVIRTGEOM */ 31205 ret = sd_get_virtual_geometry(un, 31206 (cmlb_geom_t *)arg, cap, lbasize); 31207 31208 return (ret); 31209 31210 case TG_GETATTR: 31211 mutex_enter(SD_MUTEX(un)); 31212 ((tg_attribute_t *)arg)->media_is_writable = 31213 un->un_f_mmc_writable_media; 31214 ((tg_attribute_t *)arg)->media_is_solid_state = 31215 un->un_f_is_solid_state; 31216 ((tg_attribute_t *)arg)->media_is_rotational = 31217 un->un_f_is_rotational; 31218 mutex_exit(SD_MUTEX(un)); 31219 return (0); 31220 default: 31221 return (ENOTTY); 31222 31223 } 31224 } 31225 31226 /* 31227 * Function: sd_ssc_ereport_post 31228 * 31229 * Description: Will be called when SD driver need to post an ereport. 31230 * 31231 * Context: Kernel thread or interrupt context. 31232 */ 31233 31234 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown" 31235 31236 static void 31237 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 31238 { 31239 int uscsi_path_instance = 0; 31240 uchar_t uscsi_pkt_reason; 31241 uint32_t uscsi_pkt_state; 31242 uint32_t uscsi_pkt_statistics; 31243 uint64_t uscsi_ena; 31244 uchar_t op_code; 31245 uint8_t *sensep; 31246 union scsi_cdb *cdbp; 31247 uint_t cdblen = 0; 31248 uint_t senlen = 0; 31249 struct sd_lun *un; 31250 dev_info_t *dip; 31251 char *devid; 31252 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 31253 SSC_FLAGS_INVALID_STATUS | 31254 SSC_FLAGS_INVALID_SENSE | 31255 SSC_FLAGS_INVALID_DATA; 31256 char assessment[16]; 31257 31258 ASSERT(ssc != NULL); 31259 ASSERT(ssc->ssc_uscsi_cmd != NULL); 31260 ASSERT(ssc->ssc_uscsi_info != NULL); 31261 31262 un = ssc->ssc_un; 31263 ASSERT(un != NULL); 31264 31265 dip = un->un_sd->sd_dev; 31266 31267 /* 31268 * Get the devid: 31269 * devid will only be passed to non-transport error reports. 31270 */ 31271 devid = DEVI(dip)->devi_devid_str; 31272 31273 /* 31274 * If we are syncing or dumping, the command will not be executed 31275 * so we bypass this situation. 31276 */ 31277 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 31278 (un->un_state == SD_STATE_DUMPING)) 31279 return; 31280 31281 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 31282 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 31283 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 31284 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 31285 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 31286 31287 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 31288 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 31289 31290 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 31291 if (cdbp == NULL) { 31292 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 31293 "sd_ssc_ereport_post meet empty cdb\n"); 31294 return; 31295 } 31296 31297 op_code = cdbp->scc_cmd; 31298 31299 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 31300 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 31301 ssc->ssc_uscsi_cmd->uscsi_rqresid); 31302 31303 if (senlen > 0) 31304 ASSERT(sensep != NULL); 31305 31306 /* 31307 * Initialize drv_assess to corresponding values. 31308 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 31309 * on the sense-key returned back. 31310 */ 31311 switch (drv_assess) { 31312 case SD_FM_DRV_RECOVERY: 31313 (void) sprintf(assessment, "%s", "recovered"); 31314 break; 31315 case SD_FM_DRV_RETRY: 31316 (void) sprintf(assessment, "%s", "retry"); 31317 break; 31318 case SD_FM_DRV_NOTICE: 31319 (void) sprintf(assessment, "%s", "info"); 31320 break; 31321 case SD_FM_DRV_FATAL: 31322 default: 31323 (void) sprintf(assessment, "%s", "unknown"); 31324 } 31325 /* 31326 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 31327 * command, we will post ereport.io.scsi.cmd.disk.recovered. 31328 * driver-assessment will always be "recovered" here. 31329 */ 31330 if (drv_assess == SD_FM_DRV_RECOVERY) { 31331 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31332 "cmd.disk.recovered", uscsi_ena, devid, NULL, 31333 DDI_NOSLEEP, NULL, 31334 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31335 DEVID_IF_KNOWN(devid), 31336 "driver-assessment", DATA_TYPE_STRING, assessment, 31337 "op-code", DATA_TYPE_UINT8, op_code, 31338 "cdb", DATA_TYPE_UINT8_ARRAY, 31339 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31340 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31341 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31342 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31343 NULL); 31344 return; 31345 } 31346 31347 /* 31348 * If there is un-expected/un-decodable data, we should post 31349 * ereport.io.scsi.cmd.disk.dev.uderr. 31350 * driver-assessment will be set based on parameter drv_assess. 31351 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 31352 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 31353 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 31354 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 31355 */ 31356 if (ssc->ssc_flags & ssc_invalid_flags) { 31357 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 31358 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31359 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, 31360 NULL, DDI_NOSLEEP, NULL, 31361 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31362 DEVID_IF_KNOWN(devid), 31363 "driver-assessment", DATA_TYPE_STRING, 31364 drv_assess == SD_FM_DRV_FATAL ? 31365 "fail" : assessment, 31366 "op-code", DATA_TYPE_UINT8, op_code, 31367 "cdb", DATA_TYPE_UINT8_ARRAY, 31368 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31369 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31370 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31371 "pkt-stats", DATA_TYPE_UINT32, 31372 uscsi_pkt_statistics, 31373 "stat-code", DATA_TYPE_UINT8, 31374 ssc->ssc_uscsi_cmd->uscsi_status, 31375 "un-decode-info", DATA_TYPE_STRING, 31376 ssc->ssc_info, 31377 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31378 senlen, sensep, 31379 NULL); 31380 } else { 31381 /* 31382 * For other type of invalid data, the 31383 * un-decode-value field would be empty because the 31384 * un-decodable content could be seen from upper 31385 * level payload or inside un-decode-info. 31386 */ 31387 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31388 NULL, 31389 "cmd.disk.dev.uderr", uscsi_ena, devid, 31390 NULL, DDI_NOSLEEP, NULL, 31391 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31392 DEVID_IF_KNOWN(devid), 31393 "driver-assessment", DATA_TYPE_STRING, 31394 drv_assess == SD_FM_DRV_FATAL ? 31395 "fail" : assessment, 31396 "op-code", DATA_TYPE_UINT8, op_code, 31397 "cdb", DATA_TYPE_UINT8_ARRAY, 31398 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31399 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31400 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31401 "pkt-stats", DATA_TYPE_UINT32, 31402 uscsi_pkt_statistics, 31403 "stat-code", DATA_TYPE_UINT8, 31404 ssc->ssc_uscsi_cmd->uscsi_status, 31405 "un-decode-info", DATA_TYPE_STRING, 31406 ssc->ssc_info, 31407 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31408 0, NULL, 31409 NULL); 31410 } 31411 ssc->ssc_flags &= ~ssc_invalid_flags; 31412 return; 31413 } 31414 31415 if (uscsi_pkt_reason != CMD_CMPLT || 31416 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 31417 /* 31418 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 31419 * set inside sd_start_cmds due to errors(bad packet or 31420 * fatal transport error), we should take it as a 31421 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 31422 * driver-assessment will be set based on drv_assess. 31423 * We will set devid to NULL because it is a transport 31424 * error. 31425 */ 31426 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 31427 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 31428 31429 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31430 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL, 31431 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31432 DEVID_IF_KNOWN(devid), 31433 "driver-assessment", DATA_TYPE_STRING, 31434 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31435 "op-code", DATA_TYPE_UINT8, op_code, 31436 "cdb", DATA_TYPE_UINT8_ARRAY, 31437 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31438 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31439 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 31440 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31441 NULL); 31442 } else { 31443 /* 31444 * If we got here, we have a completed command, and we need 31445 * to further investigate the sense data to see what kind 31446 * of ereport we should post. 31447 * No ereport is needed if sense-key is KEY_RECOVERABLE_ERROR 31448 * and asc/ascq is "ATA PASS-THROUGH INFORMATION AVAILABLE". 31449 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr if sense-key is 31450 * KEY_MEDIUM_ERROR. 31451 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 31452 * driver-assessment will be set based on the parameter 31453 * drv_assess. 31454 */ 31455 if (senlen > 0) { 31456 /* 31457 * Here we have sense data available. 31458 */ 31459 uint8_t sense_key = scsi_sense_key(sensep); 31460 uint8_t sense_asc = scsi_sense_asc(sensep); 31461 uint8_t sense_ascq = scsi_sense_ascq(sensep); 31462 31463 if (sense_key == KEY_RECOVERABLE_ERROR && 31464 sense_asc == 0x00 && sense_ascq == 0x1d) 31465 return; 31466 31467 if (sense_key == KEY_MEDIUM_ERROR) { 31468 /* 31469 * driver-assessment should be "fatal" if 31470 * drv_assess is SD_FM_DRV_FATAL. 31471 */ 31472 scsi_fm_ereport_post(un->un_sd, 31473 uscsi_path_instance, NULL, 31474 "cmd.disk.dev.rqs.merr", 31475 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, 31476 FM_VERSION, DATA_TYPE_UINT8, 31477 FM_EREPORT_VERS0, 31478 DEVID_IF_KNOWN(devid), 31479 "driver-assessment", 31480 DATA_TYPE_STRING, 31481 drv_assess == SD_FM_DRV_FATAL ? 31482 "fatal" : assessment, 31483 "op-code", 31484 DATA_TYPE_UINT8, op_code, 31485 "cdb", 31486 DATA_TYPE_UINT8_ARRAY, cdblen, 31487 ssc->ssc_uscsi_cmd->uscsi_cdb, 31488 "pkt-reason", 31489 DATA_TYPE_UINT8, uscsi_pkt_reason, 31490 "pkt-state", 31491 DATA_TYPE_UINT8, uscsi_pkt_state, 31492 "pkt-stats", 31493 DATA_TYPE_UINT32, 31494 uscsi_pkt_statistics, 31495 "stat-code", 31496 DATA_TYPE_UINT8, 31497 ssc->ssc_uscsi_cmd->uscsi_status, 31498 "key", 31499 DATA_TYPE_UINT8, 31500 scsi_sense_key(sensep), 31501 "asc", 31502 DATA_TYPE_UINT8, 31503 scsi_sense_asc(sensep), 31504 "ascq", 31505 DATA_TYPE_UINT8, 31506 scsi_sense_ascq(sensep), 31507 "sense-data", 31508 DATA_TYPE_UINT8_ARRAY, 31509 senlen, sensep, 31510 "lba", 31511 DATA_TYPE_UINT64, 31512 ssc->ssc_uscsi_info->ui_lba, 31513 NULL); 31514 } else { 31515 /* 31516 * if sense-key == 0x4(hardware 31517 * error), driver-assessment should 31518 * be "fatal" if drv_assess is 31519 * SD_FM_DRV_FATAL. 31520 */ 31521 scsi_fm_ereport_post(un->un_sd, 31522 uscsi_path_instance, NULL, 31523 "cmd.disk.dev.rqs.derr", 31524 uscsi_ena, devid, 31525 NULL, DDI_NOSLEEP, NULL, 31526 FM_VERSION, 31527 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31528 DEVID_IF_KNOWN(devid), 31529 "driver-assessment", 31530 DATA_TYPE_STRING, 31531 drv_assess == SD_FM_DRV_FATAL ? 31532 (sense_key == 0x4 ? 31533 "fatal" : "fail") : assessment, 31534 "op-code", 31535 DATA_TYPE_UINT8, op_code, 31536 "cdb", 31537 DATA_TYPE_UINT8_ARRAY, cdblen, 31538 ssc->ssc_uscsi_cmd->uscsi_cdb, 31539 "pkt-reason", 31540 DATA_TYPE_UINT8, uscsi_pkt_reason, 31541 "pkt-state", 31542 DATA_TYPE_UINT8, uscsi_pkt_state, 31543 "pkt-stats", 31544 DATA_TYPE_UINT32, 31545 uscsi_pkt_statistics, 31546 "stat-code", 31547 DATA_TYPE_UINT8, 31548 ssc->ssc_uscsi_cmd->uscsi_status, 31549 "key", 31550 DATA_TYPE_UINT8, 31551 scsi_sense_key(sensep), 31552 "asc", 31553 DATA_TYPE_UINT8, 31554 scsi_sense_asc(sensep), 31555 "ascq", 31556 DATA_TYPE_UINT8, 31557 scsi_sense_ascq(sensep), 31558 "sense-data", 31559 DATA_TYPE_UINT8_ARRAY, 31560 senlen, sensep, 31561 NULL); 31562 } 31563 } else { 31564 /* 31565 * For stat_code == STATUS_GOOD, this is not a 31566 * hardware error. 31567 */ 31568 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31569 return; 31570 31571 /* 31572 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31573 * stat-code but with sense data unavailable. 31574 * driver-assessment will be set based on parameter 31575 * drv_assess. 31576 */ 31577 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31578 NULL, 31579 "cmd.disk.dev.serr", uscsi_ena, 31580 devid, NULL, DDI_NOSLEEP, NULL, 31581 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31582 DEVID_IF_KNOWN(devid), 31583 "driver-assessment", DATA_TYPE_STRING, 31584 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31585 "op-code", DATA_TYPE_UINT8, op_code, 31586 "cdb", 31587 DATA_TYPE_UINT8_ARRAY, 31588 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31589 "pkt-reason", 31590 DATA_TYPE_UINT8, uscsi_pkt_reason, 31591 "pkt-state", 31592 DATA_TYPE_UINT8, uscsi_pkt_state, 31593 "pkt-stats", 31594 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31595 "stat-code", 31596 DATA_TYPE_UINT8, 31597 ssc->ssc_uscsi_cmd->uscsi_status, 31598 NULL); 31599 } 31600 } 31601 } 31602 31603 /* 31604 * Function: sd_ssc_extract_info 31605 * 31606 * Description: Extract information available to help generate ereport. 31607 * 31608 * Context: Kernel thread or interrupt context. 31609 */ 31610 static void 31611 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31612 struct buf *bp, struct sd_xbuf *xp) 31613 { 31614 size_t senlen = 0; 31615 union scsi_cdb *cdbp; 31616 int path_instance; 31617 /* 31618 * Need scsi_cdb_size array to determine the cdb length. 31619 */ 31620 extern uchar_t scsi_cdb_size[]; 31621 31622 ASSERT(un != NULL); 31623 ASSERT(pktp != NULL); 31624 ASSERT(bp != NULL); 31625 ASSERT(xp != NULL); 31626 ASSERT(ssc != NULL); 31627 ASSERT(mutex_owned(SD_MUTEX(un))); 31628 31629 /* 31630 * Transfer the cdb buffer pointer here. 31631 */ 31632 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31633 31634 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31635 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31636 31637 /* 31638 * Transfer the sense data buffer pointer if sense data is available, 31639 * calculate the sense data length first. 31640 */ 31641 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31642 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31643 /* 31644 * For arq case, we will enter here. 31645 */ 31646 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31647 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31648 } else { 31649 senlen = SENSE_LENGTH; 31650 } 31651 } else { 31652 /* 31653 * For non-arq case, we will enter this branch. 31654 */ 31655 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31656 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31657 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31658 } 31659 31660 } 31661 31662 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31663 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31664 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31665 31666 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31667 31668 /* 31669 * Only transfer path_instance when scsi_pkt was properly allocated. 31670 */ 31671 path_instance = pktp->pkt_path_instance; 31672 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31673 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31674 else 31675 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31676 31677 /* 31678 * Copy in the other fields we may need when posting ereport. 31679 */ 31680 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31681 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31682 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31683 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31684 31685 /* 31686 * For partially read/write command, we will not create ena 31687 * in case of a successful command be reconized as recovered. 31688 */ 31689 if ((pktp->pkt_reason == CMD_CMPLT) && 31690 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31691 (senlen == 0)) { 31692 return; 31693 } 31694 31695 /* 31696 * To associate ereports of a single command execution flow, we 31697 * need a shared ena for a specific command. 31698 */ 31699 if (xp->xb_ena == 0) 31700 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31701 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31702 } 31703 31704 31705 /* 31706 * Function: sd_check_bdc_vpd 31707 * 31708 * Description: Query the optional INQUIRY VPD page 0xb1. If the device 31709 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION 31710 * RATE. 31711 * 31712 * Set the following based on RPM value: 31713 * = 0 device is not solid state, non-rotational 31714 * = 1 device is solid state, non-rotational 31715 * > 1 device is not solid state, rotational 31716 * 31717 * Context: Kernel thread or interrupt context. 31718 */ 31719 31720 static void 31721 sd_check_bdc_vpd(sd_ssc_t *ssc) 31722 { 31723 int rval = 0; 31724 uchar_t *inqb1 = NULL; 31725 size_t inqb1_len = MAX_INQUIRY_SIZE; 31726 size_t inqb1_resid = 0; 31727 struct sd_lun *un; 31728 31729 ASSERT(ssc != NULL); 31730 un = ssc->ssc_un; 31731 ASSERT(un != NULL); 31732 ASSERT(!mutex_owned(SD_MUTEX(un))); 31733 31734 mutex_enter(SD_MUTEX(un)); 31735 un->un_f_is_rotational = TRUE; 31736 un->un_f_is_solid_state = FALSE; 31737 31738 if (ISCD(un)) { 31739 mutex_exit(SD_MUTEX(un)); 31740 return; 31741 } 31742 31743 if (sd_check_vpd_page_support(ssc) == 0 && 31744 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) { 31745 mutex_exit(SD_MUTEX(un)); 31746 /* collect page b1 data */ 31747 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP); 31748 31749 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len, 31750 0x01, 0xB1, &inqb1_resid); 31751 31752 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) { 31753 SD_TRACE(SD_LOG_COMMON, un, 31754 "sd_check_bdc_vpd: \ 31755 successfully get VPD page: %x \ 31756 PAGE LENGTH: %x BYTE 4: %x \ 31757 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4], 31758 inqb1[5]); 31759 31760 mutex_enter(SD_MUTEX(un)); 31761 /* 31762 * Check the MEDIUM ROTATION RATE. 31763 */ 31764 if (inqb1[4] == 0) { 31765 if (inqb1[5] == 0) { 31766 un->un_f_is_rotational = FALSE; 31767 } else if (inqb1[5] == 1) { 31768 un->un_f_is_rotational = FALSE; 31769 un->un_f_is_solid_state = TRUE; 31770 /* 31771 * Solid state drives don't need 31772 * disksort. 31773 */ 31774 un->un_f_disksort_disabled = TRUE; 31775 } 31776 } 31777 mutex_exit(SD_MUTEX(un)); 31778 } else if (rval != 0) { 31779 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31780 } 31781 31782 kmem_free(inqb1, inqb1_len); 31783 } else { 31784 mutex_exit(SD_MUTEX(un)); 31785 } 31786 } 31787 31788 /* 31789 * Function: sd_check_emulation_mode 31790 * 31791 * Description: Check whether the SSD is at emulation mode 31792 * by issuing READ_CAPACITY_16 to see whether 31793 * we can get physical block size of the drive. 31794 * 31795 * Context: Kernel thread or interrupt context. 31796 */ 31797 31798 static void 31799 sd_check_emulation_mode(sd_ssc_t *ssc) 31800 { 31801 int rval = 0; 31802 uint64_t capacity; 31803 uint_t lbasize; 31804 uint_t pbsize; 31805 int i; 31806 int devid_len; 31807 struct sd_lun *un; 31808 31809 ASSERT(ssc != NULL); 31810 un = ssc->ssc_un; 31811 ASSERT(un != NULL); 31812 ASSERT(!mutex_owned(SD_MUTEX(un))); 31813 31814 mutex_enter(SD_MUTEX(un)); 31815 if (ISCD(un)) { 31816 mutex_exit(SD_MUTEX(un)); 31817 return; 31818 } 31819 31820 if (un->un_f_descr_format_supported) { 31821 mutex_exit(SD_MUTEX(un)); 31822 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 31823 &pbsize, SD_PATH_DIRECT); 31824 mutex_enter(SD_MUTEX(un)); 31825 31826 if (rval != 0) { 31827 un->un_phy_blocksize = DEV_BSIZE; 31828 } else { 31829 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) { 31830 un->un_phy_blocksize = DEV_BSIZE; 31831 } else if (pbsize > un->un_phy_blocksize) { 31832 /* 31833 * Don't reset the physical blocksize 31834 * unless we've detected a larger value. 31835 */ 31836 un->un_phy_blocksize = pbsize; 31837 } 31838 } 31839 } 31840 31841 for (i = 0; i < sd_flash_dev_table_size; i++) { 31842 devid_len = (int)strlen(sd_flash_dev_table[i]); 31843 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len) 31844 == SD_SUCCESS) { 31845 un->un_phy_blocksize = SSD_SECSIZE; 31846 if (un->un_f_is_solid_state && 31847 un->un_phy_blocksize != un->un_tgt_blocksize) 31848 un->un_f_enable_rmw = TRUE; 31849 } 31850 } 31851 31852 mutex_exit(SD_MUTEX(un)); 31853 } 31854