1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 27 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 28 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 29 * Copyright 2017 Nexenta Systems, Inc. 30 */ 31 /* 32 * Copyright 2011 cyril.galibern@opensvc.com 33 */ 34 35 /* 36 * SCSI disk target driver. 37 */ 38 #include <sys/scsi/scsi.h> 39 #include <sys/dkbad.h> 40 #include <sys/dklabel.h> 41 #include <sys/dkio.h> 42 #include <sys/fdio.h> 43 #include <sys/cdio.h> 44 #include <sys/mhd.h> 45 #include <sys/vtoc.h> 46 #include <sys/dktp/fdisk.h> 47 #include <sys/kstat.h> 48 #include <sys/vtrace.h> 49 #include <sys/note.h> 50 #include <sys/thread.h> 51 #include <sys/proc.h> 52 #include <sys/efi_partition.h> 53 #include <sys/var.h> 54 #include <sys/aio_req.h> 55 56 #ifdef __lock_lint 57 #define _LP64 58 #define __amd64 59 #endif 60 61 #if (defined(__fibre)) 62 /* Note: is there a leadville version of the following? */ 63 #include <sys/fc4/fcal_linkapp.h> 64 #endif 65 #include <sys/taskq.h> 66 #include <sys/uuid.h> 67 #include <sys/byteorder.h> 68 #include <sys/sdt.h> 69 70 #include "sd_xbuf.h" 71 72 #include <sys/scsi/targets/sddef.h> 73 #include <sys/cmlb.h> 74 #include <sys/sysevent/eventdefs.h> 75 #include <sys/sysevent/dev.h> 76 77 #include <sys/fm/protocol.h> 78 79 /* 80 * Loadable module info. 81 */ 82 #if (defined(__fibre)) 83 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 84 #else /* !__fibre */ 85 #define SD_MODULE_NAME "SCSI Disk Driver" 86 #endif /* !__fibre */ 87 88 /* 89 * Define the interconnect type, to allow the driver to distinguish 90 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 91 * 92 * This is really for backward compatibility. In the future, the driver 93 * should actually check the "interconnect-type" property as reported by 94 * the HBA; however at present this property is not defined by all HBAs, 95 * so we will use this #define (1) to permit the driver to run in 96 * backward-compatibility mode; and (2) to print a notification message 97 * if an FC HBA does not support the "interconnect-type" property. The 98 * behavior of the driver will be to assume parallel SCSI behaviors unless 99 * the "interconnect-type" property is defined by the HBA **AND** has a 100 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 101 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 102 * Channel behaviors (as per the old ssd). (Note that the 103 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 104 * will result in the driver assuming parallel SCSI behaviors.) 105 * 106 * (see common/sys/scsi/impl/services.h) 107 * 108 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 109 * since some FC HBAs may already support that, and there is some code in 110 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 111 * default would confuse that code, and besides things should work fine 112 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 113 * "interconnect_type" property. 114 * 115 */ 116 #if (defined(__fibre)) 117 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 118 #else 119 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 120 #endif 121 122 /* 123 * The name of the driver, established from the module name in _init. 124 */ 125 static char *sd_label = NULL; 126 127 /* 128 * Driver name is unfortunately prefixed on some driver.conf properties. 129 */ 130 #if (defined(__fibre)) 131 #define sd_max_xfer_size ssd_max_xfer_size 132 #define sd_config_list ssd_config_list 133 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 134 static char *sd_config_list = "ssd-config-list"; 135 #else 136 static char *sd_max_xfer_size = "sd_max_xfer_size"; 137 static char *sd_config_list = "sd-config-list"; 138 #endif 139 140 /* 141 * Driver global variables 142 */ 143 144 #if (defined(__fibre)) 145 /* 146 * These #defines are to avoid namespace collisions that occur because this 147 * code is currently used to compile two separate driver modules: sd and ssd. 148 * All global variables need to be treated this way (even if declared static) 149 * in order to allow the debugger to resolve the names properly. 150 * It is anticipated that in the near future the ssd module will be obsoleted, 151 * at which time this namespace issue should go away. 152 */ 153 #define sd_state ssd_state 154 #define sd_io_time ssd_io_time 155 #define sd_failfast_enable ssd_failfast_enable 156 #define sd_ua_retry_count ssd_ua_retry_count 157 #define sd_report_pfa ssd_report_pfa 158 #define sd_max_throttle ssd_max_throttle 159 #define sd_min_throttle ssd_min_throttle 160 #define sd_rot_delay ssd_rot_delay 161 162 #define sd_retry_on_reservation_conflict \ 163 ssd_retry_on_reservation_conflict 164 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 165 #define sd_resv_conflict_name ssd_resv_conflict_name 166 167 #define sd_component_mask ssd_component_mask 168 #define sd_level_mask ssd_level_mask 169 #define sd_debug_un ssd_debug_un 170 #define sd_error_level ssd_error_level 171 172 #define sd_xbuf_active_limit ssd_xbuf_active_limit 173 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 174 175 #define sd_tr ssd_tr 176 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 177 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 178 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 179 #define sd_check_media_time ssd_check_media_time 180 #define sd_wait_cmds_complete ssd_wait_cmds_complete 181 #define sd_label_mutex ssd_label_mutex 182 #define sd_detach_mutex ssd_detach_mutex 183 #define sd_log_buf ssd_log_buf 184 #define sd_log_mutex ssd_log_mutex 185 186 #define sd_disk_table ssd_disk_table 187 #define sd_disk_table_size ssd_disk_table_size 188 #define sd_sense_mutex ssd_sense_mutex 189 #define sd_cdbtab ssd_cdbtab 190 191 #define sd_cb_ops ssd_cb_ops 192 #define sd_ops ssd_ops 193 #define sd_additional_codes ssd_additional_codes 194 #define sd_tgops ssd_tgops 195 196 #define sd_minor_data ssd_minor_data 197 #define sd_minor_data_efi ssd_minor_data_efi 198 199 #define sd_tq ssd_tq 200 #define sd_wmr_tq ssd_wmr_tq 201 #define sd_taskq_name ssd_taskq_name 202 #define sd_wmr_taskq_name ssd_wmr_taskq_name 203 #define sd_taskq_minalloc ssd_taskq_minalloc 204 #define sd_taskq_maxalloc ssd_taskq_maxalloc 205 206 #define sd_dump_format_string ssd_dump_format_string 207 208 #define sd_iostart_chain ssd_iostart_chain 209 #define sd_iodone_chain ssd_iodone_chain 210 211 #define sd_pm_idletime ssd_pm_idletime 212 213 #define sd_force_pm_supported ssd_force_pm_supported 214 215 #define sd_dtype_optical_bind ssd_dtype_optical_bind 216 217 #define sd_ssc_init ssd_ssc_init 218 #define sd_ssc_send ssd_ssc_send 219 #define sd_ssc_fini ssd_ssc_fini 220 #define sd_ssc_assessment ssd_ssc_assessment 221 #define sd_ssc_post ssd_ssc_post 222 #define sd_ssc_print ssd_ssc_print 223 #define sd_ssc_ereport_post ssd_ssc_ereport_post 224 #define sd_ssc_set_info ssd_ssc_set_info 225 #define sd_ssc_extract_info ssd_ssc_extract_info 226 227 #endif 228 229 #ifdef SDDEBUG 230 int sd_force_pm_supported = 0; 231 #endif /* SDDEBUG */ 232 233 void *sd_state = NULL; 234 int sd_io_time = SD_IO_TIME; 235 int sd_failfast_enable = 1; 236 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 237 int sd_report_pfa = 1; 238 int sd_max_throttle = SD_MAX_THROTTLE; 239 int sd_min_throttle = SD_MIN_THROTTLE; 240 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 241 int sd_qfull_throttle_enable = TRUE; 242 243 int sd_retry_on_reservation_conflict = 1; 244 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 245 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 246 247 static int sd_dtype_optical_bind = -1; 248 249 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 250 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 251 252 /* 253 * Global data for debug logging. To enable debug printing, sd_component_mask 254 * and sd_level_mask should be set to the desired bit patterns as outlined in 255 * sddef.h. 256 */ 257 uint_t sd_component_mask = 0x0; 258 uint_t sd_level_mask = 0x0; 259 struct sd_lun *sd_debug_un = NULL; 260 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 261 262 /* Note: these may go away in the future... */ 263 static uint32_t sd_xbuf_active_limit = 512; 264 static uint32_t sd_xbuf_reserve_limit = 16; 265 266 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 267 268 /* 269 * Timer value used to reset the throttle after it has been reduced 270 * (typically in response to TRAN_BUSY or STATUS_QFULL) 271 */ 272 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 273 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 274 275 /* 276 * Interval value associated with the media change scsi watch. 277 */ 278 static int sd_check_media_time = 3000000; 279 280 /* 281 * Wait value used for in progress operations during a DDI_SUSPEND 282 */ 283 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 284 285 /* 286 * sd_label_mutex protects a static buffer used in the disk label 287 * component of the driver 288 */ 289 static kmutex_t sd_label_mutex; 290 291 /* 292 * sd_detach_mutex protects un_layer_count, un_detach_count, and 293 * un_opens_in_progress in the sd_lun structure. 294 */ 295 static kmutex_t sd_detach_mutex; 296 297 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 298 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 299 300 /* 301 * Global buffer and mutex for debug logging 302 */ 303 static char sd_log_buf[1024]; 304 static kmutex_t sd_log_mutex; 305 306 /* 307 * Structs and globals for recording attached lun information. 308 * This maintains a chain. Each node in the chain represents a SCSI controller. 309 * The structure records the number of luns attached to each target connected 310 * with the controller. 311 * For parallel scsi device only. 312 */ 313 struct sd_scsi_hba_tgt_lun { 314 struct sd_scsi_hba_tgt_lun *next; 315 dev_info_t *pdip; 316 int nlun[NTARGETS_WIDE]; 317 }; 318 319 /* 320 * Flag to indicate the lun is attached or detached 321 */ 322 #define SD_SCSI_LUN_ATTACH 0 323 #define SD_SCSI_LUN_DETACH 1 324 325 static kmutex_t sd_scsi_target_lun_mutex; 326 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 327 328 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 329 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 330 331 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 332 sd_scsi_target_lun_head)) 333 334 /* 335 * "Smart" Probe Caching structs, globals, #defines, etc. 336 * For parallel scsi and non-self-identify device only. 337 */ 338 339 /* 340 * The following resources and routines are implemented to support 341 * "smart" probing, which caches the scsi_probe() results in an array, 342 * in order to help avoid long probe times. 343 */ 344 struct sd_scsi_probe_cache { 345 struct sd_scsi_probe_cache *next; 346 dev_info_t *pdip; 347 int cache[NTARGETS_WIDE]; 348 }; 349 350 static kmutex_t sd_scsi_probe_cache_mutex; 351 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 352 353 /* 354 * Really we only need protection on the head of the linked list, but 355 * better safe than sorry. 356 */ 357 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 358 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 359 360 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 361 sd_scsi_probe_cache_head)) 362 363 /* 364 * Power attribute table 365 */ 366 static sd_power_attr_ss sd_pwr_ss = { 367 { "NAME=spindle-motor", "0=off", "1=on", NULL }, 368 {0, 100}, 369 {30, 0}, 370 {20000, 0} 371 }; 372 373 static sd_power_attr_pc sd_pwr_pc = { 374 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle", 375 "3=active", NULL }, 376 {0, 0, 0, 100}, 377 {90, 90, 20, 0}, 378 {15000, 15000, 1000, 0} 379 }; 380 381 /* 382 * Power level to power condition 383 */ 384 static int sd_pl2pc[] = { 385 SD_TARGET_START_VALID, 386 SD_TARGET_STANDBY, 387 SD_TARGET_IDLE, 388 SD_TARGET_ACTIVE 389 }; 390 391 /* 392 * Vendor specific data name property declarations 393 */ 394 395 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 396 397 static sd_tunables seagate_properties = { 398 SEAGATE_THROTTLE_VALUE, 399 0, 400 0, 401 0, 402 0, 403 0, 404 0, 405 0, 406 0 407 }; 408 409 410 static sd_tunables fujitsu_properties = { 411 FUJITSU_THROTTLE_VALUE, 412 0, 413 0, 414 0, 415 0, 416 0, 417 0, 418 0, 419 0 420 }; 421 422 static sd_tunables ibm_properties = { 423 IBM_THROTTLE_VALUE, 424 0, 425 0, 426 0, 427 0, 428 0, 429 0, 430 0, 431 0 432 }; 433 434 static sd_tunables purple_properties = { 435 PURPLE_THROTTLE_VALUE, 436 0, 437 0, 438 PURPLE_BUSY_RETRIES, 439 PURPLE_RESET_RETRY_COUNT, 440 PURPLE_RESERVE_RELEASE_TIME, 441 0, 442 0, 443 0 444 }; 445 446 static sd_tunables sve_properties = { 447 SVE_THROTTLE_VALUE, 448 0, 449 0, 450 SVE_BUSY_RETRIES, 451 SVE_RESET_RETRY_COUNT, 452 SVE_RESERVE_RELEASE_TIME, 453 SVE_MIN_THROTTLE_VALUE, 454 SVE_DISKSORT_DISABLED_FLAG, 455 0 456 }; 457 458 static sd_tunables maserati_properties = { 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0, 466 MASERATI_DISKSORT_DISABLED_FLAG, 467 MASERATI_LUN_RESET_ENABLED_FLAG 468 }; 469 470 static sd_tunables pirus_properties = { 471 PIRUS_THROTTLE_VALUE, 472 0, 473 PIRUS_NRR_COUNT, 474 PIRUS_BUSY_RETRIES, 475 PIRUS_RESET_RETRY_COUNT, 476 0, 477 PIRUS_MIN_THROTTLE_VALUE, 478 PIRUS_DISKSORT_DISABLED_FLAG, 479 PIRUS_LUN_RESET_ENABLED_FLAG 480 }; 481 482 #endif 483 484 #if (defined(__sparc) && !defined(__fibre)) || \ 485 (defined(__i386) || defined(__amd64)) 486 487 488 static sd_tunables elite_properties = { 489 ELITE_THROTTLE_VALUE, 490 0, 491 0, 492 0, 493 0, 494 0, 495 0, 496 0, 497 0 498 }; 499 500 static sd_tunables st31200n_properties = { 501 ST31200N_THROTTLE_VALUE, 502 0, 503 0, 504 0, 505 0, 506 0, 507 0, 508 0, 509 0 510 }; 511 512 #endif /* Fibre or not */ 513 514 static sd_tunables lsi_properties_scsi = { 515 LSI_THROTTLE_VALUE, 516 0, 517 LSI_NOTREADY_RETRIES, 518 0, 519 0, 520 0, 521 0, 522 0, 523 0 524 }; 525 526 static sd_tunables symbios_properties = { 527 SYMBIOS_THROTTLE_VALUE, 528 0, 529 SYMBIOS_NOTREADY_RETRIES, 530 0, 531 0, 532 0, 533 0, 534 0, 535 0 536 }; 537 538 static sd_tunables lsi_properties = { 539 0, 540 0, 541 LSI_NOTREADY_RETRIES, 542 0, 543 0, 544 0, 545 0, 546 0, 547 0 548 }; 549 550 static sd_tunables lsi_oem_properties = { 551 0, 552 0, 553 LSI_OEM_NOTREADY_RETRIES, 554 0, 555 0, 556 0, 557 0, 558 0, 559 0, 560 1 561 }; 562 563 564 565 #if (defined(SD_PROP_TST)) 566 567 #define SD_TST_CTYPE_VAL CTYPE_CDROM 568 #define SD_TST_THROTTLE_VAL 16 569 #define SD_TST_NOTREADY_VAL 12 570 #define SD_TST_BUSY_VAL 60 571 #define SD_TST_RST_RETRY_VAL 36 572 #define SD_TST_RSV_REL_TIME 60 573 574 static sd_tunables tst_properties = { 575 SD_TST_THROTTLE_VAL, 576 SD_TST_CTYPE_VAL, 577 SD_TST_NOTREADY_VAL, 578 SD_TST_BUSY_VAL, 579 SD_TST_RST_RETRY_VAL, 580 SD_TST_RSV_REL_TIME, 581 0, 582 0, 583 0 584 }; 585 #endif 586 587 /* This is similar to the ANSI toupper implementation */ 588 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 589 590 /* 591 * Static Driver Configuration Table 592 * 593 * This is the table of disks which need throttle adjustment (or, perhaps 594 * something else as defined by the flags at a future time.) device_id 595 * is a string consisting of concatenated vid (vendor), pid (product/model) 596 * and revision strings as defined in the scsi_inquiry structure. Offsets of 597 * the parts of the string are as defined by the sizes in the scsi_inquiry 598 * structure. Device type is searched as far as the device_id string is 599 * defined. Flags defines which values are to be set in the driver from the 600 * properties list. 601 * 602 * Entries below which begin and end with a "*" are a special case. 603 * These do not have a specific vendor, and the string which follows 604 * can appear anywhere in the 16 byte PID portion of the inquiry data. 605 * 606 * Entries below which begin and end with a " " (blank) are a special 607 * case. The comparison function will treat multiple consecutive blanks 608 * as equivalent to a single blank. For example, this causes a 609 * sd_disk_table entry of " NEC CDROM " to match a device's id string 610 * of "NEC CDROM". 611 * 612 * Note: The MD21 controller type has been obsoleted. 613 * ST318202F is a Legacy device 614 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 615 * made with an FC connection. The entries here are a legacy. 616 */ 617 static sd_disk_config_t sd_disk_table[] = { 618 #if defined(__fibre) || defined(__i386) || defined(__amd64) 619 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 620 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 621 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 622 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 623 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 624 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 625 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 626 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 627 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 628 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 629 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 630 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 631 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 632 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 633 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 634 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 635 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 636 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 637 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 638 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 639 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 640 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 641 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 642 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 643 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 644 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 645 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 646 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 647 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 648 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 649 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 650 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 651 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 652 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 653 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 654 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 655 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 656 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 657 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 658 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 659 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 660 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 661 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 662 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 663 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 664 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 665 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 666 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 667 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 668 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 669 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 670 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 671 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 672 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 673 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 674 { "SUN T3", SD_CONF_BSET_THROTTLE | 675 SD_CONF_BSET_BSY_RETRY_COUNT| 676 SD_CONF_BSET_RST_RETRIES| 677 SD_CONF_BSET_RSV_REL_TIME, 678 &purple_properties }, 679 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 680 SD_CONF_BSET_BSY_RETRY_COUNT| 681 SD_CONF_BSET_RST_RETRIES| 682 SD_CONF_BSET_RSV_REL_TIME| 683 SD_CONF_BSET_MIN_THROTTLE| 684 SD_CONF_BSET_DISKSORT_DISABLED, 685 &sve_properties }, 686 { "SUN T4", SD_CONF_BSET_THROTTLE | 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_RSV_REL_TIME, 690 &purple_properties }, 691 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 692 SD_CONF_BSET_LUN_RESET_ENABLED, 693 &maserati_properties }, 694 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 695 SD_CONF_BSET_NRR_COUNT| 696 SD_CONF_BSET_BSY_RETRY_COUNT| 697 SD_CONF_BSET_RST_RETRIES| 698 SD_CONF_BSET_MIN_THROTTLE| 699 SD_CONF_BSET_DISKSORT_DISABLED| 700 SD_CONF_BSET_LUN_RESET_ENABLED, 701 &pirus_properties }, 702 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 703 SD_CONF_BSET_NRR_COUNT| 704 SD_CONF_BSET_BSY_RETRY_COUNT| 705 SD_CONF_BSET_RST_RETRIES| 706 SD_CONF_BSET_MIN_THROTTLE| 707 SD_CONF_BSET_DISKSORT_DISABLED| 708 SD_CONF_BSET_LUN_RESET_ENABLED, 709 &pirus_properties }, 710 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 711 SD_CONF_BSET_NRR_COUNT| 712 SD_CONF_BSET_BSY_RETRY_COUNT| 713 SD_CONF_BSET_RST_RETRIES| 714 SD_CONF_BSET_MIN_THROTTLE| 715 SD_CONF_BSET_DISKSORT_DISABLED| 716 SD_CONF_BSET_LUN_RESET_ENABLED, 717 &pirus_properties }, 718 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 719 SD_CONF_BSET_NRR_COUNT| 720 SD_CONF_BSET_BSY_RETRY_COUNT| 721 SD_CONF_BSET_RST_RETRIES| 722 SD_CONF_BSET_MIN_THROTTLE| 723 SD_CONF_BSET_DISKSORT_DISABLED| 724 SD_CONF_BSET_LUN_RESET_ENABLED, 725 &pirus_properties }, 726 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 727 SD_CONF_BSET_NRR_COUNT| 728 SD_CONF_BSET_BSY_RETRY_COUNT| 729 SD_CONF_BSET_RST_RETRIES| 730 SD_CONF_BSET_MIN_THROTTLE| 731 SD_CONF_BSET_DISKSORT_DISABLED| 732 SD_CONF_BSET_LUN_RESET_ENABLED, 733 &pirus_properties }, 734 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 735 SD_CONF_BSET_NRR_COUNT| 736 SD_CONF_BSET_BSY_RETRY_COUNT| 737 SD_CONF_BSET_RST_RETRIES| 738 SD_CONF_BSET_MIN_THROTTLE| 739 SD_CONF_BSET_DISKSORT_DISABLED| 740 SD_CONF_BSET_LUN_RESET_ENABLED, 741 &pirus_properties }, 742 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 743 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 744 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 745 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 746 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 747 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 748 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 749 #endif /* fibre or NON-sparc platforms */ 750 #if ((defined(__sparc) && !defined(__fibre)) ||\ 751 (defined(__i386) || defined(__amd64))) 752 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 753 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 754 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 755 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 756 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 757 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 758 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 759 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 760 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 761 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 762 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 763 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 764 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 765 &symbios_properties }, 766 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 767 &lsi_properties_scsi }, 768 #if defined(__i386) || defined(__amd64) 769 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 770 | SD_CONF_BSET_READSUB_BCD 771 | SD_CONF_BSET_READ_TOC_ADDR_BCD 772 | SD_CONF_BSET_NO_READ_HEADER 773 | SD_CONF_BSET_READ_CD_XD4), NULL }, 774 775 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 776 | SD_CONF_BSET_READSUB_BCD 777 | SD_CONF_BSET_READ_TOC_ADDR_BCD 778 | SD_CONF_BSET_NO_READ_HEADER 779 | SD_CONF_BSET_READ_CD_XD4), NULL }, 780 #endif /* __i386 || __amd64 */ 781 #endif /* sparc NON-fibre or NON-sparc platforms */ 782 783 #if (defined(SD_PROP_TST)) 784 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 785 | SD_CONF_BSET_CTYPE 786 | SD_CONF_BSET_NRR_COUNT 787 | SD_CONF_BSET_FAB_DEVID 788 | SD_CONF_BSET_NOCACHE 789 | SD_CONF_BSET_BSY_RETRY_COUNT 790 | SD_CONF_BSET_PLAYMSF_BCD 791 | SD_CONF_BSET_READSUB_BCD 792 | SD_CONF_BSET_READ_TOC_TRK_BCD 793 | SD_CONF_BSET_READ_TOC_ADDR_BCD 794 | SD_CONF_BSET_NO_READ_HEADER 795 | SD_CONF_BSET_READ_CD_XD4 796 | SD_CONF_BSET_RST_RETRIES 797 | SD_CONF_BSET_RSV_REL_TIME 798 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 799 #endif 800 }; 801 802 static const int sd_disk_table_size = 803 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 804 805 /* 806 * Emulation mode disk drive VID/PID table 807 */ 808 static char sd_flash_dev_table[][25] = { 809 "ATA MARVELL SD88SA02", 810 "MARVELL SD88SA02", 811 "TOSHIBA THNSNV05", 812 }; 813 814 static const int sd_flash_dev_table_size = 815 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]); 816 817 #define SD_INTERCONNECT_PARALLEL 0 818 #define SD_INTERCONNECT_FABRIC 1 819 #define SD_INTERCONNECT_FIBRE 2 820 #define SD_INTERCONNECT_SSA 3 821 #define SD_INTERCONNECT_SATA 4 822 #define SD_INTERCONNECT_SAS 5 823 824 #define SD_IS_PARALLEL_SCSI(un) \ 825 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 826 #define SD_IS_SERIAL(un) \ 827 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 828 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 829 830 /* 831 * Definitions used by device id registration routines 832 */ 833 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 834 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 835 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 836 837 static kmutex_t sd_sense_mutex = {0}; 838 839 /* 840 * Macros for updates of the driver state 841 */ 842 #define New_state(un, s) \ 843 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 844 #define Restore_state(un) \ 845 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 846 847 static struct sd_cdbinfo sd_cdbtab[] = { 848 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 849 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 850 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 851 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 852 }; 853 854 /* 855 * Specifies the number of seconds that must have elapsed since the last 856 * cmd. has completed for a device to be declared idle to the PM framework. 857 */ 858 static int sd_pm_idletime = 1; 859 860 /* 861 * Internal function prototypes 862 */ 863 864 #if (defined(__fibre)) 865 /* 866 * These #defines are to avoid namespace collisions that occur because this 867 * code is currently used to compile two separate driver modules: sd and ssd. 868 * All function names need to be treated this way (even if declared static) 869 * in order to allow the debugger to resolve the names properly. 870 * It is anticipated that in the near future the ssd module will be obsoleted, 871 * at which time this ugliness should go away. 872 */ 873 #define sd_log_trace ssd_log_trace 874 #define sd_log_info ssd_log_info 875 #define sd_log_err ssd_log_err 876 #define sdprobe ssdprobe 877 #define sdinfo ssdinfo 878 #define sd_prop_op ssd_prop_op 879 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 880 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 881 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 882 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 883 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 884 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 885 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 886 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 887 #define sd_spin_up_unit ssd_spin_up_unit 888 #define sd_enable_descr_sense ssd_enable_descr_sense 889 #define sd_reenable_dsense_task ssd_reenable_dsense_task 890 #define sd_set_mmc_caps ssd_set_mmc_caps 891 #define sd_read_unit_properties ssd_read_unit_properties 892 #define sd_process_sdconf_file ssd_process_sdconf_file 893 #define sd_process_sdconf_table ssd_process_sdconf_table 894 #define sd_sdconf_id_match ssd_sdconf_id_match 895 #define sd_blank_cmp ssd_blank_cmp 896 #define sd_chk_vers1_data ssd_chk_vers1_data 897 #define sd_set_vers1_properties ssd_set_vers1_properties 898 #define sd_check_bdc_vpd ssd_check_bdc_vpd 899 #define sd_check_emulation_mode ssd_check_emulation_mode 900 901 #define sd_get_physical_geometry ssd_get_physical_geometry 902 #define sd_get_virtual_geometry ssd_get_virtual_geometry 903 #define sd_update_block_info ssd_update_block_info 904 #define sd_register_devid ssd_register_devid 905 #define sd_get_devid ssd_get_devid 906 #define sd_create_devid ssd_create_devid 907 #define sd_write_deviceid ssd_write_deviceid 908 #define sd_check_vpd_page_support ssd_check_vpd_page_support 909 #define sd_setup_pm ssd_setup_pm 910 #define sd_create_pm_components ssd_create_pm_components 911 #define sd_ddi_suspend ssd_ddi_suspend 912 #define sd_ddi_resume ssd_ddi_resume 913 #define sd_pm_state_change ssd_pm_state_change 914 #define sdpower ssdpower 915 #define sdattach ssdattach 916 #define sddetach ssddetach 917 #define sd_unit_attach ssd_unit_attach 918 #define sd_unit_detach ssd_unit_detach 919 #define sd_set_unit_attributes ssd_set_unit_attributes 920 #define sd_create_errstats ssd_create_errstats 921 #define sd_set_errstats ssd_set_errstats 922 #define sd_set_pstats ssd_set_pstats 923 #define sddump ssddump 924 #define sd_scsi_poll ssd_scsi_poll 925 #define sd_send_polled_RQS ssd_send_polled_RQS 926 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 927 #define sd_init_event_callbacks ssd_init_event_callbacks 928 #define sd_event_callback ssd_event_callback 929 #define sd_cache_control ssd_cache_control 930 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 931 #define sd_get_write_cache_changeable ssd_get_write_cache_changeable 932 #define sd_get_nv_sup ssd_get_nv_sup 933 #define sd_make_device ssd_make_device 934 #define sdopen ssdopen 935 #define sdclose ssdclose 936 #define sd_ready_and_valid ssd_ready_and_valid 937 #define sdmin ssdmin 938 #define sdread ssdread 939 #define sdwrite ssdwrite 940 #define sdaread ssdaread 941 #define sdawrite ssdawrite 942 #define sdstrategy ssdstrategy 943 #define sdioctl ssdioctl 944 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 945 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 946 #define sd_checksum_iostart ssd_checksum_iostart 947 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 948 #define sd_pm_iostart ssd_pm_iostart 949 #define sd_core_iostart ssd_core_iostart 950 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 951 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 952 #define sd_checksum_iodone ssd_checksum_iodone 953 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 954 #define sd_pm_iodone ssd_pm_iodone 955 #define sd_initpkt_for_buf ssd_initpkt_for_buf 956 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 957 #define sd_setup_rw_pkt ssd_setup_rw_pkt 958 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 959 #define sd_buf_iodone ssd_buf_iodone 960 #define sd_uscsi_strategy ssd_uscsi_strategy 961 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 962 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 963 #define sd_uscsi_iodone ssd_uscsi_iodone 964 #define sd_xbuf_strategy ssd_xbuf_strategy 965 #define sd_xbuf_init ssd_xbuf_init 966 #define sd_pm_entry ssd_pm_entry 967 #define sd_pm_exit ssd_pm_exit 968 969 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 970 #define sd_pm_timeout_handler ssd_pm_timeout_handler 971 972 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 973 #define sdintr ssdintr 974 #define sd_start_cmds ssd_start_cmds 975 #define sd_send_scsi_cmd ssd_send_scsi_cmd 976 #define sd_bioclone_alloc ssd_bioclone_alloc 977 #define sd_bioclone_free ssd_bioclone_free 978 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 979 #define sd_shadow_buf_free ssd_shadow_buf_free 980 #define sd_print_transport_rejected_message \ 981 ssd_print_transport_rejected_message 982 #define sd_retry_command ssd_retry_command 983 #define sd_set_retry_bp ssd_set_retry_bp 984 #define sd_send_request_sense_command ssd_send_request_sense_command 985 #define sd_start_retry_command ssd_start_retry_command 986 #define sd_start_direct_priority_command \ 987 ssd_start_direct_priority_command 988 #define sd_return_failed_command ssd_return_failed_command 989 #define sd_return_failed_command_no_restart \ 990 ssd_return_failed_command_no_restart 991 #define sd_return_command ssd_return_command 992 #define sd_sync_with_callback ssd_sync_with_callback 993 #define sdrunout ssdrunout 994 #define sd_mark_rqs_busy ssd_mark_rqs_busy 995 #define sd_mark_rqs_idle ssd_mark_rqs_idle 996 #define sd_reduce_throttle ssd_reduce_throttle 997 #define sd_restore_throttle ssd_restore_throttle 998 #define sd_print_incomplete_msg ssd_print_incomplete_msg 999 #define sd_init_cdb_limits ssd_init_cdb_limits 1000 #define sd_pkt_status_good ssd_pkt_status_good 1001 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 1002 #define sd_pkt_status_busy ssd_pkt_status_busy 1003 #define sd_pkt_status_reservation_conflict \ 1004 ssd_pkt_status_reservation_conflict 1005 #define sd_pkt_status_qfull ssd_pkt_status_qfull 1006 #define sd_handle_request_sense ssd_handle_request_sense 1007 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 1008 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 1009 #define sd_validate_sense_data ssd_validate_sense_data 1010 #define sd_decode_sense ssd_decode_sense 1011 #define sd_print_sense_msg ssd_print_sense_msg 1012 #define sd_sense_key_no_sense ssd_sense_key_no_sense 1013 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 1014 #define sd_sense_key_not_ready ssd_sense_key_not_ready 1015 #define sd_sense_key_medium_or_hardware_error \ 1016 ssd_sense_key_medium_or_hardware_error 1017 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 1018 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 1019 #define sd_sense_key_fail_command ssd_sense_key_fail_command 1020 #define sd_sense_key_blank_check ssd_sense_key_blank_check 1021 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 1022 #define sd_sense_key_default ssd_sense_key_default 1023 #define sd_print_retry_msg ssd_print_retry_msg 1024 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 1025 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 1026 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 1027 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 1028 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 1029 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 1030 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 1031 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 1032 #define sd_pkt_reason_default ssd_pkt_reason_default 1033 #define sd_reset_target ssd_reset_target 1034 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 1035 #define sd_start_stop_unit_task ssd_start_stop_unit_task 1036 #define sd_taskq_create ssd_taskq_create 1037 #define sd_taskq_delete ssd_taskq_delete 1038 #define sd_target_change_task ssd_target_change_task 1039 #define sd_log_dev_status_event ssd_log_dev_status_event 1040 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 1041 #define sd_log_eject_request_event ssd_log_eject_request_event 1042 #define sd_media_change_task ssd_media_change_task 1043 #define sd_handle_mchange ssd_handle_mchange 1044 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 1045 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 1046 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1047 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1048 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1049 sd_send_scsi_feature_GET_CONFIGURATION 1050 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1051 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1052 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1053 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1054 ssd_send_scsi_PERSISTENT_RESERVE_IN 1055 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1056 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1057 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1058 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1059 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1060 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1061 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1062 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1063 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1064 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \ 1065 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 1066 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid 1067 #define sd_alloc_rqs ssd_alloc_rqs 1068 #define sd_free_rqs ssd_free_rqs 1069 #define sd_dump_memory ssd_dump_memory 1070 #define sd_get_media_info_com ssd_get_media_info_com 1071 #define sd_get_media_info ssd_get_media_info 1072 #define sd_get_media_info_ext ssd_get_media_info_ext 1073 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1074 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1075 #define sd_strtok_r ssd_strtok_r 1076 #define sd_set_properties ssd_set_properties 1077 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1078 #define sd_setup_next_xfer ssd_setup_next_xfer 1079 #define sd_dkio_get_temp ssd_dkio_get_temp 1080 #define sd_check_mhd ssd_check_mhd 1081 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1082 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1083 #define sd_sname ssd_sname 1084 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1085 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1086 #define sd_take_ownership ssd_take_ownership 1087 #define sd_reserve_release ssd_reserve_release 1088 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1089 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1090 #define sd_persistent_reservation_in_read_keys \ 1091 ssd_persistent_reservation_in_read_keys 1092 #define sd_persistent_reservation_in_read_resv \ 1093 ssd_persistent_reservation_in_read_resv 1094 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1095 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1096 #define sd_mhdioc_release ssd_mhdioc_release 1097 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1098 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1099 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1100 #define sr_change_blkmode ssr_change_blkmode 1101 #define sr_change_speed ssr_change_speed 1102 #define sr_atapi_change_speed ssr_atapi_change_speed 1103 #define sr_pause_resume ssr_pause_resume 1104 #define sr_play_msf ssr_play_msf 1105 #define sr_play_trkind ssr_play_trkind 1106 #define sr_read_all_subcodes ssr_read_all_subcodes 1107 #define sr_read_subchannel ssr_read_subchannel 1108 #define sr_read_tocentry ssr_read_tocentry 1109 #define sr_read_tochdr ssr_read_tochdr 1110 #define sr_read_cdda ssr_read_cdda 1111 #define sr_read_cdxa ssr_read_cdxa 1112 #define sr_read_mode1 ssr_read_mode1 1113 #define sr_read_mode2 ssr_read_mode2 1114 #define sr_read_cd_mode2 ssr_read_cd_mode2 1115 #define sr_sector_mode ssr_sector_mode 1116 #define sr_eject ssr_eject 1117 #define sr_ejected ssr_ejected 1118 #define sr_check_wp ssr_check_wp 1119 #define sd_watch_request_submit ssd_watch_request_submit 1120 #define sd_check_media ssd_check_media 1121 #define sd_media_watch_cb ssd_media_watch_cb 1122 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1123 #define sr_volume_ctrl ssr_volume_ctrl 1124 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1125 #define sd_log_page_supported ssd_log_page_supported 1126 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1127 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1128 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1129 #define sd_range_lock ssd_range_lock 1130 #define sd_get_range ssd_get_range 1131 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1132 #define sd_range_unlock ssd_range_unlock 1133 #define sd_read_modify_write_task ssd_read_modify_write_task 1134 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1135 1136 #define sd_iostart_chain ssd_iostart_chain 1137 #define sd_iodone_chain ssd_iodone_chain 1138 #define sd_initpkt_map ssd_initpkt_map 1139 #define sd_destroypkt_map ssd_destroypkt_map 1140 #define sd_chain_type_map ssd_chain_type_map 1141 #define sd_chain_index_map ssd_chain_index_map 1142 1143 #define sd_failfast_flushctl ssd_failfast_flushctl 1144 #define sd_failfast_flushq ssd_failfast_flushq 1145 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1146 1147 #define sd_is_lsi ssd_is_lsi 1148 #define sd_tg_rdwr ssd_tg_rdwr 1149 #define sd_tg_getinfo ssd_tg_getinfo 1150 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1151 1152 #endif /* #if (defined(__fibre)) */ 1153 1154 1155 int _init(void); 1156 int _fini(void); 1157 int _info(struct modinfo *modinfop); 1158 1159 /*PRINTFLIKE3*/ 1160 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1161 /*PRINTFLIKE3*/ 1162 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1163 /*PRINTFLIKE3*/ 1164 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1165 1166 static int sdprobe(dev_info_t *devi); 1167 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1168 void **result); 1169 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1170 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1171 1172 /* 1173 * Smart probe for parallel scsi 1174 */ 1175 static void sd_scsi_probe_cache_init(void); 1176 static void sd_scsi_probe_cache_fini(void); 1177 static void sd_scsi_clear_probe_cache(void); 1178 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1179 1180 /* 1181 * Attached luns on target for parallel scsi 1182 */ 1183 static void sd_scsi_target_lun_init(void); 1184 static void sd_scsi_target_lun_fini(void); 1185 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1186 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1187 1188 static int sd_spin_up_unit(sd_ssc_t *ssc); 1189 1190 /* 1191 * Using sd_ssc_init to establish sd_ssc_t struct 1192 * Using sd_ssc_send to send uscsi internal command 1193 * Using sd_ssc_fini to free sd_ssc_t struct 1194 */ 1195 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1196 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1197 int flag, enum uio_seg dataspace, int path_flag); 1198 static void sd_ssc_fini(sd_ssc_t *ssc); 1199 1200 /* 1201 * Using sd_ssc_assessment to set correct type-of-assessment 1202 * Using sd_ssc_post to post ereport & system log 1203 * sd_ssc_post will call sd_ssc_print to print system log 1204 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1205 */ 1206 static void sd_ssc_assessment(sd_ssc_t *ssc, 1207 enum sd_type_assessment tp_assess); 1208 1209 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1210 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1211 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1212 enum sd_driver_assessment drv_assess); 1213 1214 /* 1215 * Using sd_ssc_set_info to mark an un-decodable-data error. 1216 * Using sd_ssc_extract_info to transfer information from internal 1217 * data structures to sd_ssc_t. 1218 */ 1219 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1220 const char *fmt, ...); 1221 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1222 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1223 1224 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1225 enum uio_seg dataspace, int path_flag); 1226 1227 #ifdef _LP64 1228 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1229 static void sd_reenable_dsense_task(void *arg); 1230 #endif /* _LP64 */ 1231 1232 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1233 1234 static void sd_read_unit_properties(struct sd_lun *un); 1235 static int sd_process_sdconf_file(struct sd_lun *un); 1236 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1237 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1238 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1239 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1240 int *data_list, sd_tunables *values); 1241 static void sd_process_sdconf_table(struct sd_lun *un); 1242 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1243 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1244 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1245 int list_len, char *dataname_ptr); 1246 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1247 sd_tunables *prop_list); 1248 1249 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1250 int reservation_flag); 1251 static int sd_get_devid(sd_ssc_t *ssc); 1252 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1253 static int sd_write_deviceid(sd_ssc_t *ssc); 1254 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1255 1256 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1257 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1258 1259 static int sd_ddi_suspend(dev_info_t *devi); 1260 static int sd_ddi_resume(dev_info_t *devi); 1261 static int sd_pm_state_change(struct sd_lun *un, int level, int flag); 1262 static int sdpower(dev_info_t *devi, int component, int level); 1263 1264 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1265 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1266 static int sd_unit_attach(dev_info_t *devi); 1267 static int sd_unit_detach(dev_info_t *devi); 1268 1269 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1270 static void sd_create_errstats(struct sd_lun *un, int instance); 1271 static void sd_set_errstats(struct sd_lun *un); 1272 static void sd_set_pstats(struct sd_lun *un); 1273 1274 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1275 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1276 static int sd_send_polled_RQS(struct sd_lun *un); 1277 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1278 1279 #if (defined(__fibre)) 1280 /* 1281 * Event callbacks (photon) 1282 */ 1283 static void sd_init_event_callbacks(struct sd_lun *un); 1284 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1285 #endif 1286 1287 /* 1288 * Defines for sd_cache_control 1289 */ 1290 1291 #define SD_CACHE_ENABLE 1 1292 #define SD_CACHE_DISABLE 0 1293 #define SD_CACHE_NOCHANGE -1 1294 1295 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1296 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1297 static void sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable); 1298 static void sd_get_nv_sup(sd_ssc_t *ssc); 1299 static dev_t sd_make_device(dev_info_t *devi); 1300 static void sd_check_bdc_vpd(sd_ssc_t *ssc); 1301 static void sd_check_emulation_mode(sd_ssc_t *ssc); 1302 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1303 uint64_t capacity); 1304 1305 /* 1306 * Driver entry point functions. 1307 */ 1308 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1309 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1310 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1311 1312 static void sdmin(struct buf *bp); 1313 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1314 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1315 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1316 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1317 1318 static int sdstrategy(struct buf *bp); 1319 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1320 1321 /* 1322 * Function prototypes for layering functions in the iostart chain. 1323 */ 1324 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1325 struct buf *bp); 1326 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1327 struct buf *bp); 1328 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1329 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1330 struct buf *bp); 1331 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1332 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1333 1334 /* 1335 * Function prototypes for layering functions in the iodone chain. 1336 */ 1337 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1338 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1339 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1340 struct buf *bp); 1341 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1342 struct buf *bp); 1343 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1344 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1345 struct buf *bp); 1346 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1347 1348 /* 1349 * Prototypes for functions to support buf(9S) based IO. 1350 */ 1351 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1352 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1353 static void sd_destroypkt_for_buf(struct buf *); 1354 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1355 struct buf *bp, int flags, 1356 int (*callback)(caddr_t), caddr_t callback_arg, 1357 diskaddr_t lba, uint32_t blockcount); 1358 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1359 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1360 1361 /* 1362 * Prototypes for functions to support USCSI IO. 1363 */ 1364 static int sd_uscsi_strategy(struct buf *bp); 1365 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1366 static void sd_destroypkt_for_uscsi(struct buf *); 1367 1368 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1369 uchar_t chain_type, void *pktinfop); 1370 1371 static int sd_pm_entry(struct sd_lun *un); 1372 static void sd_pm_exit(struct sd_lun *un); 1373 1374 static void sd_pm_idletimeout_handler(void *arg); 1375 1376 /* 1377 * sd_core internal functions (used at the sd_core_io layer). 1378 */ 1379 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1380 static void sdintr(struct scsi_pkt *pktp); 1381 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1382 1383 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1384 enum uio_seg dataspace, int path_flag); 1385 1386 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1387 daddr_t blkno, int (*func)(struct buf *)); 1388 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1389 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1390 static void sd_bioclone_free(struct buf *bp); 1391 static void sd_shadow_buf_free(struct buf *bp); 1392 1393 static void sd_print_transport_rejected_message(struct sd_lun *un, 1394 struct sd_xbuf *xp, int code); 1395 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1396 void *arg, int code); 1397 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1398 void *arg, int code); 1399 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1400 void *arg, int code); 1401 1402 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1403 int retry_check_flag, 1404 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1405 int c), 1406 void *user_arg, int failure_code, clock_t retry_delay, 1407 void (*statp)(kstat_io_t *)); 1408 1409 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1410 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1411 1412 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1413 struct scsi_pkt *pktp); 1414 static void sd_start_retry_command(void *arg); 1415 static void sd_start_direct_priority_command(void *arg); 1416 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1417 int errcode); 1418 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1419 struct buf *bp, int errcode); 1420 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1421 static void sd_sync_with_callback(struct sd_lun *un); 1422 static int sdrunout(caddr_t arg); 1423 1424 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1425 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1426 1427 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1428 static void sd_restore_throttle(void *arg); 1429 1430 static void sd_init_cdb_limits(struct sd_lun *un); 1431 1432 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 1435 /* 1436 * Error handling functions 1437 */ 1438 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1443 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 1447 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1448 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1449 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1450 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1451 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1452 struct sd_xbuf *xp, size_t actual_len); 1453 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1454 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1455 1456 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1457 void *arg, int code); 1458 1459 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1460 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1461 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1462 uint8_t *sense_datap, 1463 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1464 static void sd_sense_key_not_ready(struct sd_lun *un, 1465 uint8_t *sense_datap, 1466 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1467 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1468 uint8_t *sense_datap, 1469 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1470 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1471 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1472 static void sd_sense_key_unit_attention(struct sd_lun *un, 1473 uint8_t *sense_datap, 1474 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1475 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1476 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1477 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1478 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1479 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1480 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1481 static void sd_sense_key_default(struct sd_lun *un, 1482 uint8_t *sense_datap, 1483 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1484 1485 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1486 void *arg, int flag); 1487 1488 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1489 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1490 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1491 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1492 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1493 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1494 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1495 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1496 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1497 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1498 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1499 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1500 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1501 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1502 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1503 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1504 1505 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1506 1507 static void sd_start_stop_unit_callback(void *arg); 1508 static void sd_start_stop_unit_task(void *arg); 1509 1510 static void sd_taskq_create(void); 1511 static void sd_taskq_delete(void); 1512 static void sd_target_change_task(void *arg); 1513 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag); 1514 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1515 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag); 1516 static void sd_media_change_task(void *arg); 1517 1518 static int sd_handle_mchange(struct sd_lun *un); 1519 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1520 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1521 uint32_t *lbap, int path_flag); 1522 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1523 uint32_t *lbap, uint32_t *psp, int path_flag); 1524 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, 1525 int flag, int path_flag); 1526 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1527 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1528 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1529 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1530 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1531 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1532 uchar_t usr_cmd, uchar_t *usr_bufp); 1533 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1534 struct dk_callback *dkc); 1535 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1536 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1537 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1538 uchar_t *bufaddr, uint_t buflen, int path_flag); 1539 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1540 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1541 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1542 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1543 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1544 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1545 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1546 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1547 size_t buflen, daddr_t start_block, int path_flag); 1548 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1549 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1550 path_flag) 1551 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1552 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1553 path_flag) 1554 1555 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1556 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1557 uint16_t param_ptr, int path_flag); 1558 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, 1559 uchar_t *bufaddr, size_t buflen, uchar_t class_req); 1560 static boolean_t sd_gesn_media_data_valid(uchar_t *data); 1561 1562 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1563 static void sd_free_rqs(struct sd_lun *un); 1564 1565 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1566 uchar_t *data, int len, int fmt); 1567 static void sd_panic_for_res_conflict(struct sd_lun *un); 1568 1569 /* 1570 * Disk Ioctl Function Prototypes 1571 */ 1572 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1573 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1574 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1575 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1576 1577 /* 1578 * Multi-host Ioctl Prototypes 1579 */ 1580 static int sd_check_mhd(dev_t dev, int interval); 1581 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1582 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1583 static char *sd_sname(uchar_t status); 1584 static void sd_mhd_resvd_recover(void *arg); 1585 static void sd_resv_reclaim_thread(); 1586 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1587 static int sd_reserve_release(dev_t dev, int cmd); 1588 static void sd_rmv_resv_reclaim_req(dev_t dev); 1589 static void sd_mhd_reset_notify_cb(caddr_t arg); 1590 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1591 mhioc_inkeys_t *usrp, int flag); 1592 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1593 mhioc_inresvs_t *usrp, int flag); 1594 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1595 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1596 static int sd_mhdioc_release(dev_t dev); 1597 static int sd_mhdioc_register_devid(dev_t dev); 1598 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1599 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1600 1601 /* 1602 * SCSI removable prototypes 1603 */ 1604 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1605 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1606 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1607 static int sr_pause_resume(dev_t dev, int mode); 1608 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1609 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1610 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1611 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1612 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1613 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1614 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1615 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1616 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1617 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1618 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1619 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1620 static int sr_eject(dev_t dev); 1621 static void sr_ejected(register struct sd_lun *un); 1622 static int sr_check_wp(dev_t dev); 1623 static opaque_t sd_watch_request_submit(struct sd_lun *un); 1624 static int sd_check_media(dev_t dev, enum dkio_state state); 1625 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1626 static void sd_delayed_cv_broadcast(void *arg); 1627 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1628 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1629 1630 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1631 1632 /* 1633 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1634 */ 1635 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1636 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1637 static void sd_wm_cache_destructor(void *wm, void *un); 1638 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1639 daddr_t endb, ushort_t typ); 1640 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1641 daddr_t endb); 1642 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1643 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1644 static void sd_read_modify_write_task(void * arg); 1645 static int 1646 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1647 struct buf **bpp); 1648 1649 1650 /* 1651 * Function prototypes for failfast support. 1652 */ 1653 static void sd_failfast_flushq(struct sd_lun *un); 1654 static int sd_failfast_flushq_callback(struct buf *bp); 1655 1656 /* 1657 * Function prototypes to check for lsi devices 1658 */ 1659 static void sd_is_lsi(struct sd_lun *un); 1660 1661 /* 1662 * Function prototypes for partial DMA support 1663 */ 1664 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1665 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1666 1667 1668 /* Function prototypes for cmlb */ 1669 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1670 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1671 1672 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1673 1674 /* 1675 * For printing RMW warning message timely 1676 */ 1677 static void sd_rmw_msg_print_handler(void *arg); 1678 1679 /* 1680 * Constants for failfast support: 1681 * 1682 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1683 * failfast processing being performed. 1684 * 1685 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1686 * failfast processing on all bufs with B_FAILFAST set. 1687 */ 1688 1689 #define SD_FAILFAST_INACTIVE 0 1690 #define SD_FAILFAST_ACTIVE 1 1691 1692 /* 1693 * Bitmask to control behavior of buf(9S) flushes when a transition to 1694 * the failfast state occurs. Optional bits include: 1695 * 1696 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1697 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1698 * be flushed. 1699 * 1700 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1701 * driver, in addition to the regular wait queue. This includes the xbuf 1702 * queues. When clear, only the driver's wait queue will be flushed. 1703 */ 1704 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1705 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1706 1707 /* 1708 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1709 * to flush all queues within the driver. 1710 */ 1711 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1712 1713 1714 /* 1715 * SD Testing Fault Injection 1716 */ 1717 #ifdef SD_FAULT_INJECTION 1718 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1719 static void sd_faultinjection(struct scsi_pkt *pktp); 1720 static void sd_injection_log(char *buf, struct sd_lun *un); 1721 #endif 1722 1723 /* 1724 * Device driver ops vector 1725 */ 1726 static struct cb_ops sd_cb_ops = { 1727 sdopen, /* open */ 1728 sdclose, /* close */ 1729 sdstrategy, /* strategy */ 1730 nodev, /* print */ 1731 sddump, /* dump */ 1732 sdread, /* read */ 1733 sdwrite, /* write */ 1734 sdioctl, /* ioctl */ 1735 nodev, /* devmap */ 1736 nodev, /* mmap */ 1737 nodev, /* segmap */ 1738 nochpoll, /* poll */ 1739 sd_prop_op, /* cb_prop_op */ 1740 0, /* streamtab */ 1741 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1742 CB_REV, /* cb_rev */ 1743 sdaread, /* async I/O read entry point */ 1744 sdawrite /* async I/O write entry point */ 1745 }; 1746 1747 struct dev_ops sd_ops = { 1748 DEVO_REV, /* devo_rev, */ 1749 0, /* refcnt */ 1750 sdinfo, /* info */ 1751 nulldev, /* identify */ 1752 sdprobe, /* probe */ 1753 sdattach, /* attach */ 1754 sddetach, /* detach */ 1755 nodev, /* reset */ 1756 &sd_cb_ops, /* driver operations */ 1757 NULL, /* bus operations */ 1758 sdpower, /* power */ 1759 ddi_quiesce_not_needed, /* quiesce */ 1760 }; 1761 1762 /* 1763 * This is the loadable module wrapper. 1764 */ 1765 #include <sys/modctl.h> 1766 1767 #ifndef XPV_HVM_DRIVER 1768 static struct modldrv modldrv = { 1769 &mod_driverops, /* Type of module. This one is a driver */ 1770 SD_MODULE_NAME, /* Module name. */ 1771 &sd_ops /* driver ops */ 1772 }; 1773 1774 static struct modlinkage modlinkage = { 1775 MODREV_1, &modldrv, NULL 1776 }; 1777 1778 #else /* XPV_HVM_DRIVER */ 1779 static struct modlmisc modlmisc = { 1780 &mod_miscops, /* Type of module. This one is a misc */ 1781 "HVM " SD_MODULE_NAME, /* Module name. */ 1782 }; 1783 1784 static struct modlinkage modlinkage = { 1785 MODREV_1, &modlmisc, NULL 1786 }; 1787 1788 #endif /* XPV_HVM_DRIVER */ 1789 1790 static cmlb_tg_ops_t sd_tgops = { 1791 TG_DK_OPS_VERSION_1, 1792 sd_tg_rdwr, 1793 sd_tg_getinfo 1794 }; 1795 1796 static struct scsi_asq_key_strings sd_additional_codes[] = { 1797 0x81, 0, "Logical Unit is Reserved", 1798 0x85, 0, "Audio Address Not Valid", 1799 0xb6, 0, "Media Load Mechanism Failed", 1800 0xB9, 0, "Audio Play Operation Aborted", 1801 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1802 0x53, 2, "Medium removal prevented", 1803 0x6f, 0, "Authentication failed during key exchange", 1804 0x6f, 1, "Key not present", 1805 0x6f, 2, "Key not established", 1806 0x6f, 3, "Read without proper authentication", 1807 0x6f, 4, "Mismatched region to this logical unit", 1808 0x6f, 5, "Region reset count error", 1809 0xffff, 0x0, NULL 1810 }; 1811 1812 1813 /* 1814 * Struct for passing printing information for sense data messages 1815 */ 1816 struct sd_sense_info { 1817 int ssi_severity; 1818 int ssi_pfa_flag; 1819 }; 1820 1821 /* 1822 * Table of function pointers for iostart-side routines. Separate "chains" 1823 * of layered function calls are formed by placing the function pointers 1824 * sequentially in the desired order. Functions are called according to an 1825 * incrementing table index ordering. The last function in each chain must 1826 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1827 * in the sd_iodone_chain[] array. 1828 * 1829 * Note: It may seem more natural to organize both the iostart and iodone 1830 * functions together, into an array of structures (or some similar 1831 * organization) with a common index, rather than two separate arrays which 1832 * must be maintained in synchronization. The purpose of this division is 1833 * to achieve improved performance: individual arrays allows for more 1834 * effective cache line utilization on certain platforms. 1835 */ 1836 1837 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1838 1839 1840 static sd_chain_t sd_iostart_chain[] = { 1841 1842 /* Chain for buf IO for disk drive targets (PM enabled) */ 1843 sd_mapblockaddr_iostart, /* Index: 0 */ 1844 sd_pm_iostart, /* Index: 1 */ 1845 sd_core_iostart, /* Index: 2 */ 1846 1847 /* Chain for buf IO for disk drive targets (PM disabled) */ 1848 sd_mapblockaddr_iostart, /* Index: 3 */ 1849 sd_core_iostart, /* Index: 4 */ 1850 1851 /* 1852 * Chain for buf IO for removable-media or large sector size 1853 * disk drive targets with RMW needed (PM enabled) 1854 */ 1855 sd_mapblockaddr_iostart, /* Index: 5 */ 1856 sd_mapblocksize_iostart, /* Index: 6 */ 1857 sd_pm_iostart, /* Index: 7 */ 1858 sd_core_iostart, /* Index: 8 */ 1859 1860 /* 1861 * Chain for buf IO for removable-media or large sector size 1862 * disk drive targets with RMW needed (PM disabled) 1863 */ 1864 sd_mapblockaddr_iostart, /* Index: 9 */ 1865 sd_mapblocksize_iostart, /* Index: 10 */ 1866 sd_core_iostart, /* Index: 11 */ 1867 1868 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1869 sd_mapblockaddr_iostart, /* Index: 12 */ 1870 sd_checksum_iostart, /* Index: 13 */ 1871 sd_pm_iostart, /* Index: 14 */ 1872 sd_core_iostart, /* Index: 15 */ 1873 1874 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1875 sd_mapblockaddr_iostart, /* Index: 16 */ 1876 sd_checksum_iostart, /* Index: 17 */ 1877 sd_core_iostart, /* Index: 18 */ 1878 1879 /* Chain for USCSI commands (all targets) */ 1880 sd_pm_iostart, /* Index: 19 */ 1881 sd_core_iostart, /* Index: 20 */ 1882 1883 /* Chain for checksumming USCSI commands (all targets) */ 1884 sd_checksum_uscsi_iostart, /* Index: 21 */ 1885 sd_pm_iostart, /* Index: 22 */ 1886 sd_core_iostart, /* Index: 23 */ 1887 1888 /* Chain for "direct" USCSI commands (all targets) */ 1889 sd_core_iostart, /* Index: 24 */ 1890 1891 /* Chain for "direct priority" USCSI commands (all targets) */ 1892 sd_core_iostart, /* Index: 25 */ 1893 1894 /* 1895 * Chain for buf IO for large sector size disk drive targets 1896 * with RMW needed with checksumming (PM enabled) 1897 */ 1898 sd_mapblockaddr_iostart, /* Index: 26 */ 1899 sd_mapblocksize_iostart, /* Index: 27 */ 1900 sd_checksum_iostart, /* Index: 28 */ 1901 sd_pm_iostart, /* Index: 29 */ 1902 sd_core_iostart, /* Index: 30 */ 1903 1904 /* 1905 * Chain for buf IO for large sector size disk drive targets 1906 * with RMW needed with checksumming (PM disabled) 1907 */ 1908 sd_mapblockaddr_iostart, /* Index: 31 */ 1909 sd_mapblocksize_iostart, /* Index: 32 */ 1910 sd_checksum_iostart, /* Index: 33 */ 1911 sd_core_iostart, /* Index: 34 */ 1912 1913 }; 1914 1915 /* 1916 * Macros to locate the first function of each iostart chain in the 1917 * sd_iostart_chain[] array. These are located by the index in the array. 1918 */ 1919 #define SD_CHAIN_DISK_IOSTART 0 1920 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1921 #define SD_CHAIN_MSS_DISK_IOSTART 5 1922 #define SD_CHAIN_RMMEDIA_IOSTART 5 1923 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1924 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1925 #define SD_CHAIN_CHKSUM_IOSTART 12 1926 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1927 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1928 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1929 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1930 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1931 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1932 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1933 1934 1935 /* 1936 * Table of function pointers for the iodone-side routines for the driver- 1937 * internal layering mechanism. The calling sequence for iodone routines 1938 * uses a decrementing table index, so the last routine called in a chain 1939 * must be at the lowest array index location for that chain. The last 1940 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1941 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1942 * of the functions in an iodone side chain must correspond to the ordering 1943 * of the iostart routines for that chain. Note that there is no iodone 1944 * side routine that corresponds to sd_core_iostart(), so there is no 1945 * entry in the table for this. 1946 */ 1947 1948 static sd_chain_t sd_iodone_chain[] = { 1949 1950 /* Chain for buf IO for disk drive targets (PM enabled) */ 1951 sd_buf_iodone, /* Index: 0 */ 1952 sd_mapblockaddr_iodone, /* Index: 1 */ 1953 sd_pm_iodone, /* Index: 2 */ 1954 1955 /* Chain for buf IO for disk drive targets (PM disabled) */ 1956 sd_buf_iodone, /* Index: 3 */ 1957 sd_mapblockaddr_iodone, /* Index: 4 */ 1958 1959 /* 1960 * Chain for buf IO for removable-media or large sector size 1961 * disk drive targets with RMW needed (PM enabled) 1962 */ 1963 sd_buf_iodone, /* Index: 5 */ 1964 sd_mapblockaddr_iodone, /* Index: 6 */ 1965 sd_mapblocksize_iodone, /* Index: 7 */ 1966 sd_pm_iodone, /* Index: 8 */ 1967 1968 /* 1969 * Chain for buf IO for removable-media or large sector size 1970 * disk drive targets with RMW needed (PM disabled) 1971 */ 1972 sd_buf_iodone, /* Index: 9 */ 1973 sd_mapblockaddr_iodone, /* Index: 10 */ 1974 sd_mapblocksize_iodone, /* Index: 11 */ 1975 1976 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1977 sd_buf_iodone, /* Index: 12 */ 1978 sd_mapblockaddr_iodone, /* Index: 13 */ 1979 sd_checksum_iodone, /* Index: 14 */ 1980 sd_pm_iodone, /* Index: 15 */ 1981 1982 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1983 sd_buf_iodone, /* Index: 16 */ 1984 sd_mapblockaddr_iodone, /* Index: 17 */ 1985 sd_checksum_iodone, /* Index: 18 */ 1986 1987 /* Chain for USCSI commands (non-checksum targets) */ 1988 sd_uscsi_iodone, /* Index: 19 */ 1989 sd_pm_iodone, /* Index: 20 */ 1990 1991 /* Chain for USCSI commands (checksum targets) */ 1992 sd_uscsi_iodone, /* Index: 21 */ 1993 sd_checksum_uscsi_iodone, /* Index: 22 */ 1994 sd_pm_iodone, /* Index: 22 */ 1995 1996 /* Chain for "direct" USCSI commands (all targets) */ 1997 sd_uscsi_iodone, /* Index: 24 */ 1998 1999 /* Chain for "direct priority" USCSI commands (all targets) */ 2000 sd_uscsi_iodone, /* Index: 25 */ 2001 2002 /* 2003 * Chain for buf IO for large sector size disk drive targets 2004 * with checksumming (PM enabled) 2005 */ 2006 sd_buf_iodone, /* Index: 26 */ 2007 sd_mapblockaddr_iodone, /* Index: 27 */ 2008 sd_mapblocksize_iodone, /* Index: 28 */ 2009 sd_checksum_iodone, /* Index: 29 */ 2010 sd_pm_iodone, /* Index: 30 */ 2011 2012 /* 2013 * Chain for buf IO for large sector size disk drive targets 2014 * with checksumming (PM disabled) 2015 */ 2016 sd_buf_iodone, /* Index: 31 */ 2017 sd_mapblockaddr_iodone, /* Index: 32 */ 2018 sd_mapblocksize_iodone, /* Index: 33 */ 2019 sd_checksum_iodone, /* Index: 34 */ 2020 }; 2021 2022 2023 /* 2024 * Macros to locate the "first" function in the sd_iodone_chain[] array for 2025 * each iodone-side chain. These are located by the array index, but as the 2026 * iodone side functions are called in a decrementing-index order, the 2027 * highest index number in each chain must be specified (as these correspond 2028 * to the first function in the iodone chain that will be called by the core 2029 * at IO completion time). 2030 */ 2031 2032 #define SD_CHAIN_DISK_IODONE 2 2033 #define SD_CHAIN_DISK_IODONE_NO_PM 4 2034 #define SD_CHAIN_RMMEDIA_IODONE 8 2035 #define SD_CHAIN_MSS_DISK_IODONE 8 2036 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 2037 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 2038 #define SD_CHAIN_CHKSUM_IODONE 15 2039 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 2040 #define SD_CHAIN_USCSI_CMD_IODONE 20 2041 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 2042 #define SD_CHAIN_DIRECT_CMD_IODONE 24 2043 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 2044 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 2045 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 2046 2047 2048 2049 /* 2050 * Array to map a layering chain index to the appropriate initpkt routine. 2051 * The redundant entries are present so that the index used for accessing 2052 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2053 * with this table as well. 2054 */ 2055 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 2056 2057 static sd_initpkt_t sd_initpkt_map[] = { 2058 2059 /* Chain for buf IO for disk drive targets (PM enabled) */ 2060 sd_initpkt_for_buf, /* Index: 0 */ 2061 sd_initpkt_for_buf, /* Index: 1 */ 2062 sd_initpkt_for_buf, /* Index: 2 */ 2063 2064 /* Chain for buf IO for disk drive targets (PM disabled) */ 2065 sd_initpkt_for_buf, /* Index: 3 */ 2066 sd_initpkt_for_buf, /* Index: 4 */ 2067 2068 /* 2069 * Chain for buf IO for removable-media or large sector size 2070 * disk drive targets (PM enabled) 2071 */ 2072 sd_initpkt_for_buf, /* Index: 5 */ 2073 sd_initpkt_for_buf, /* Index: 6 */ 2074 sd_initpkt_for_buf, /* Index: 7 */ 2075 sd_initpkt_for_buf, /* Index: 8 */ 2076 2077 /* 2078 * Chain for buf IO for removable-media or large sector size 2079 * disk drive targets (PM disabled) 2080 */ 2081 sd_initpkt_for_buf, /* Index: 9 */ 2082 sd_initpkt_for_buf, /* Index: 10 */ 2083 sd_initpkt_for_buf, /* Index: 11 */ 2084 2085 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2086 sd_initpkt_for_buf, /* Index: 12 */ 2087 sd_initpkt_for_buf, /* Index: 13 */ 2088 sd_initpkt_for_buf, /* Index: 14 */ 2089 sd_initpkt_for_buf, /* Index: 15 */ 2090 2091 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2092 sd_initpkt_for_buf, /* Index: 16 */ 2093 sd_initpkt_for_buf, /* Index: 17 */ 2094 sd_initpkt_for_buf, /* Index: 18 */ 2095 2096 /* Chain for USCSI commands (non-checksum targets) */ 2097 sd_initpkt_for_uscsi, /* Index: 19 */ 2098 sd_initpkt_for_uscsi, /* Index: 20 */ 2099 2100 /* Chain for USCSI commands (checksum targets) */ 2101 sd_initpkt_for_uscsi, /* Index: 21 */ 2102 sd_initpkt_for_uscsi, /* Index: 22 */ 2103 sd_initpkt_for_uscsi, /* Index: 22 */ 2104 2105 /* Chain for "direct" USCSI commands (all targets) */ 2106 sd_initpkt_for_uscsi, /* Index: 24 */ 2107 2108 /* Chain for "direct priority" USCSI commands (all targets) */ 2109 sd_initpkt_for_uscsi, /* Index: 25 */ 2110 2111 /* 2112 * Chain for buf IO for large sector size disk drive targets 2113 * with checksumming (PM enabled) 2114 */ 2115 sd_initpkt_for_buf, /* Index: 26 */ 2116 sd_initpkt_for_buf, /* Index: 27 */ 2117 sd_initpkt_for_buf, /* Index: 28 */ 2118 sd_initpkt_for_buf, /* Index: 29 */ 2119 sd_initpkt_for_buf, /* Index: 30 */ 2120 2121 /* 2122 * Chain for buf IO for large sector size disk drive targets 2123 * with checksumming (PM disabled) 2124 */ 2125 sd_initpkt_for_buf, /* Index: 31 */ 2126 sd_initpkt_for_buf, /* Index: 32 */ 2127 sd_initpkt_for_buf, /* Index: 33 */ 2128 sd_initpkt_for_buf, /* Index: 34 */ 2129 }; 2130 2131 2132 /* 2133 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2134 * The redundant entries are present so that the index used for accessing 2135 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2136 * with this table as well. 2137 */ 2138 typedef void (*sd_destroypkt_t)(struct buf *); 2139 2140 static sd_destroypkt_t sd_destroypkt_map[] = { 2141 2142 /* Chain for buf IO for disk drive targets (PM enabled) */ 2143 sd_destroypkt_for_buf, /* Index: 0 */ 2144 sd_destroypkt_for_buf, /* Index: 1 */ 2145 sd_destroypkt_for_buf, /* Index: 2 */ 2146 2147 /* Chain for buf IO for disk drive targets (PM disabled) */ 2148 sd_destroypkt_for_buf, /* Index: 3 */ 2149 sd_destroypkt_for_buf, /* Index: 4 */ 2150 2151 /* 2152 * Chain for buf IO for removable-media or large sector size 2153 * disk drive targets (PM enabled) 2154 */ 2155 sd_destroypkt_for_buf, /* Index: 5 */ 2156 sd_destroypkt_for_buf, /* Index: 6 */ 2157 sd_destroypkt_for_buf, /* Index: 7 */ 2158 sd_destroypkt_for_buf, /* Index: 8 */ 2159 2160 /* 2161 * Chain for buf IO for removable-media or large sector size 2162 * disk drive targets (PM disabled) 2163 */ 2164 sd_destroypkt_for_buf, /* Index: 9 */ 2165 sd_destroypkt_for_buf, /* Index: 10 */ 2166 sd_destroypkt_for_buf, /* Index: 11 */ 2167 2168 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2169 sd_destroypkt_for_buf, /* Index: 12 */ 2170 sd_destroypkt_for_buf, /* Index: 13 */ 2171 sd_destroypkt_for_buf, /* Index: 14 */ 2172 sd_destroypkt_for_buf, /* Index: 15 */ 2173 2174 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2175 sd_destroypkt_for_buf, /* Index: 16 */ 2176 sd_destroypkt_for_buf, /* Index: 17 */ 2177 sd_destroypkt_for_buf, /* Index: 18 */ 2178 2179 /* Chain for USCSI commands (non-checksum targets) */ 2180 sd_destroypkt_for_uscsi, /* Index: 19 */ 2181 sd_destroypkt_for_uscsi, /* Index: 20 */ 2182 2183 /* Chain for USCSI commands (checksum targets) */ 2184 sd_destroypkt_for_uscsi, /* Index: 21 */ 2185 sd_destroypkt_for_uscsi, /* Index: 22 */ 2186 sd_destroypkt_for_uscsi, /* Index: 22 */ 2187 2188 /* Chain for "direct" USCSI commands (all targets) */ 2189 sd_destroypkt_for_uscsi, /* Index: 24 */ 2190 2191 /* Chain for "direct priority" USCSI commands (all targets) */ 2192 sd_destroypkt_for_uscsi, /* Index: 25 */ 2193 2194 /* 2195 * Chain for buf IO for large sector size disk drive targets 2196 * with checksumming (PM disabled) 2197 */ 2198 sd_destroypkt_for_buf, /* Index: 26 */ 2199 sd_destroypkt_for_buf, /* Index: 27 */ 2200 sd_destroypkt_for_buf, /* Index: 28 */ 2201 sd_destroypkt_for_buf, /* Index: 29 */ 2202 sd_destroypkt_for_buf, /* Index: 30 */ 2203 2204 /* 2205 * Chain for buf IO for large sector size disk drive targets 2206 * with checksumming (PM enabled) 2207 */ 2208 sd_destroypkt_for_buf, /* Index: 31 */ 2209 sd_destroypkt_for_buf, /* Index: 32 */ 2210 sd_destroypkt_for_buf, /* Index: 33 */ 2211 sd_destroypkt_for_buf, /* Index: 34 */ 2212 }; 2213 2214 2215 2216 /* 2217 * Array to map a layering chain index to the appropriate chain "type". 2218 * The chain type indicates a specific property/usage of the chain. 2219 * The redundant entries are present so that the index used for accessing 2220 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2221 * with this table as well. 2222 */ 2223 2224 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2225 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2226 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2227 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2228 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2229 /* (for error recovery) */ 2230 2231 static int sd_chain_type_map[] = { 2232 2233 /* Chain for buf IO for disk drive targets (PM enabled) */ 2234 SD_CHAIN_BUFIO, /* Index: 0 */ 2235 SD_CHAIN_BUFIO, /* Index: 1 */ 2236 SD_CHAIN_BUFIO, /* Index: 2 */ 2237 2238 /* Chain for buf IO for disk drive targets (PM disabled) */ 2239 SD_CHAIN_BUFIO, /* Index: 3 */ 2240 SD_CHAIN_BUFIO, /* Index: 4 */ 2241 2242 /* 2243 * Chain for buf IO for removable-media or large sector size 2244 * disk drive targets (PM enabled) 2245 */ 2246 SD_CHAIN_BUFIO, /* Index: 5 */ 2247 SD_CHAIN_BUFIO, /* Index: 6 */ 2248 SD_CHAIN_BUFIO, /* Index: 7 */ 2249 SD_CHAIN_BUFIO, /* Index: 8 */ 2250 2251 /* 2252 * Chain for buf IO for removable-media or large sector size 2253 * disk drive targets (PM disabled) 2254 */ 2255 SD_CHAIN_BUFIO, /* Index: 9 */ 2256 SD_CHAIN_BUFIO, /* Index: 10 */ 2257 SD_CHAIN_BUFIO, /* Index: 11 */ 2258 2259 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2260 SD_CHAIN_BUFIO, /* Index: 12 */ 2261 SD_CHAIN_BUFIO, /* Index: 13 */ 2262 SD_CHAIN_BUFIO, /* Index: 14 */ 2263 SD_CHAIN_BUFIO, /* Index: 15 */ 2264 2265 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2266 SD_CHAIN_BUFIO, /* Index: 16 */ 2267 SD_CHAIN_BUFIO, /* Index: 17 */ 2268 SD_CHAIN_BUFIO, /* Index: 18 */ 2269 2270 /* Chain for USCSI commands (non-checksum targets) */ 2271 SD_CHAIN_USCSI, /* Index: 19 */ 2272 SD_CHAIN_USCSI, /* Index: 20 */ 2273 2274 /* Chain for USCSI commands (checksum targets) */ 2275 SD_CHAIN_USCSI, /* Index: 21 */ 2276 SD_CHAIN_USCSI, /* Index: 22 */ 2277 SD_CHAIN_USCSI, /* Index: 23 */ 2278 2279 /* Chain for "direct" USCSI commands (all targets) */ 2280 SD_CHAIN_DIRECT, /* Index: 24 */ 2281 2282 /* Chain for "direct priority" USCSI commands (all targets) */ 2283 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2284 2285 /* 2286 * Chain for buf IO for large sector size disk drive targets 2287 * with checksumming (PM enabled) 2288 */ 2289 SD_CHAIN_BUFIO, /* Index: 26 */ 2290 SD_CHAIN_BUFIO, /* Index: 27 */ 2291 SD_CHAIN_BUFIO, /* Index: 28 */ 2292 SD_CHAIN_BUFIO, /* Index: 29 */ 2293 SD_CHAIN_BUFIO, /* Index: 30 */ 2294 2295 /* 2296 * Chain for buf IO for large sector size disk drive targets 2297 * with checksumming (PM disabled) 2298 */ 2299 SD_CHAIN_BUFIO, /* Index: 31 */ 2300 SD_CHAIN_BUFIO, /* Index: 32 */ 2301 SD_CHAIN_BUFIO, /* Index: 33 */ 2302 SD_CHAIN_BUFIO, /* Index: 34 */ 2303 }; 2304 2305 2306 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2307 #define SD_IS_BUFIO(xp) \ 2308 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2309 2310 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2311 #define SD_IS_DIRECT_PRIORITY(xp) \ 2312 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2313 2314 2315 2316 /* 2317 * Struct, array, and macros to map a specific chain to the appropriate 2318 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2319 * 2320 * The sd_chain_index_map[] array is used at attach time to set the various 2321 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2322 * chain to be used with the instance. This allows different instances to use 2323 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2324 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2325 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2326 * dynamically & without the use of locking; and (2) a layer may update the 2327 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2328 * to allow for deferred processing of an IO within the same chain from a 2329 * different execution context. 2330 */ 2331 2332 struct sd_chain_index { 2333 int sci_iostart_index; 2334 int sci_iodone_index; 2335 }; 2336 2337 static struct sd_chain_index sd_chain_index_map[] = { 2338 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2339 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2340 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2341 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2342 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2343 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2344 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2345 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2346 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2347 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2348 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2349 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2350 2351 }; 2352 2353 2354 /* 2355 * The following are indexes into the sd_chain_index_map[] array. 2356 */ 2357 2358 /* un->un_buf_chain_type must be set to one of these */ 2359 #define SD_CHAIN_INFO_DISK 0 2360 #define SD_CHAIN_INFO_DISK_NO_PM 1 2361 #define SD_CHAIN_INFO_RMMEDIA 2 2362 #define SD_CHAIN_INFO_MSS_DISK 2 2363 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2364 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2365 #define SD_CHAIN_INFO_CHKSUM 4 2366 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2367 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2368 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2369 2370 /* un->un_uscsi_chain_type must be set to one of these */ 2371 #define SD_CHAIN_INFO_USCSI_CMD 6 2372 /* USCSI with PM disabled is the same as DIRECT */ 2373 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2374 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2375 2376 /* un->un_direct_chain_type must be set to one of these */ 2377 #define SD_CHAIN_INFO_DIRECT_CMD 8 2378 2379 /* un->un_priority_chain_type must be set to one of these */ 2380 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2381 2382 /* size for devid inquiries */ 2383 #define MAX_INQUIRY_SIZE 0xF0 2384 2385 /* 2386 * Macros used by functions to pass a given buf(9S) struct along to the 2387 * next function in the layering chain for further processing. 2388 * 2389 * In the following macros, passing more than three arguments to the called 2390 * routines causes the optimizer for the SPARC compiler to stop doing tail 2391 * call elimination which results in significant performance degradation. 2392 */ 2393 #define SD_BEGIN_IOSTART(index, un, bp) \ 2394 ((*(sd_iostart_chain[index]))(index, un, bp)) 2395 2396 #define SD_BEGIN_IODONE(index, un, bp) \ 2397 ((*(sd_iodone_chain[index]))(index, un, bp)) 2398 2399 #define SD_NEXT_IOSTART(index, un, bp) \ 2400 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2401 2402 #define SD_NEXT_IODONE(index, un, bp) \ 2403 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2404 2405 /* 2406 * Function: _init 2407 * 2408 * Description: This is the driver _init(9E) entry point. 2409 * 2410 * Return Code: Returns the value from mod_install(9F) or 2411 * ddi_soft_state_init(9F) as appropriate. 2412 * 2413 * Context: Called when driver module loaded. 2414 */ 2415 2416 int 2417 _init(void) 2418 { 2419 int err; 2420 2421 /* establish driver name from module name */ 2422 sd_label = (char *)mod_modname(&modlinkage); 2423 2424 #ifndef XPV_HVM_DRIVER 2425 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2426 SD_MAXUNIT); 2427 if (err != 0) { 2428 return (err); 2429 } 2430 2431 #else /* XPV_HVM_DRIVER */ 2432 /* Remove the leading "hvm_" from the module name */ 2433 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2434 sd_label += strlen("hvm_"); 2435 2436 #endif /* XPV_HVM_DRIVER */ 2437 2438 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2439 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2440 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2441 2442 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2443 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2444 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2445 2446 /* 2447 * it's ok to init here even for fibre device 2448 */ 2449 sd_scsi_probe_cache_init(); 2450 2451 sd_scsi_target_lun_init(); 2452 2453 /* 2454 * Creating taskq before mod_install ensures that all callers (threads) 2455 * that enter the module after a successful mod_install encounter 2456 * a valid taskq. 2457 */ 2458 sd_taskq_create(); 2459 2460 err = mod_install(&modlinkage); 2461 if (err != 0) { 2462 /* delete taskq if install fails */ 2463 sd_taskq_delete(); 2464 2465 mutex_destroy(&sd_detach_mutex); 2466 mutex_destroy(&sd_log_mutex); 2467 mutex_destroy(&sd_label_mutex); 2468 2469 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2470 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2471 cv_destroy(&sd_tr.srq_inprocess_cv); 2472 2473 sd_scsi_probe_cache_fini(); 2474 2475 sd_scsi_target_lun_fini(); 2476 2477 #ifndef XPV_HVM_DRIVER 2478 ddi_soft_state_fini(&sd_state); 2479 #endif /* !XPV_HVM_DRIVER */ 2480 return (err); 2481 } 2482 2483 return (err); 2484 } 2485 2486 2487 /* 2488 * Function: _fini 2489 * 2490 * Description: This is the driver _fini(9E) entry point. 2491 * 2492 * Return Code: Returns the value from mod_remove(9F) 2493 * 2494 * Context: Called when driver module is unloaded. 2495 */ 2496 2497 int 2498 _fini(void) 2499 { 2500 int err; 2501 2502 if ((err = mod_remove(&modlinkage)) != 0) { 2503 return (err); 2504 } 2505 2506 sd_taskq_delete(); 2507 2508 mutex_destroy(&sd_detach_mutex); 2509 mutex_destroy(&sd_log_mutex); 2510 mutex_destroy(&sd_label_mutex); 2511 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2512 2513 sd_scsi_probe_cache_fini(); 2514 2515 sd_scsi_target_lun_fini(); 2516 2517 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2518 cv_destroy(&sd_tr.srq_inprocess_cv); 2519 2520 #ifndef XPV_HVM_DRIVER 2521 ddi_soft_state_fini(&sd_state); 2522 #endif /* !XPV_HVM_DRIVER */ 2523 2524 return (err); 2525 } 2526 2527 2528 /* 2529 * Function: _info 2530 * 2531 * Description: This is the driver _info(9E) entry point. 2532 * 2533 * Arguments: modinfop - pointer to the driver modinfo structure 2534 * 2535 * Return Code: Returns the value from mod_info(9F). 2536 * 2537 * Context: Kernel thread context 2538 */ 2539 2540 int 2541 _info(struct modinfo *modinfop) 2542 { 2543 return (mod_info(&modlinkage, modinfop)); 2544 } 2545 2546 2547 /* 2548 * The following routines implement the driver message logging facility. 2549 * They provide component- and level- based debug output filtering. 2550 * Output may also be restricted to messages for a single instance by 2551 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2552 * to NULL, then messages for all instances are printed. 2553 * 2554 * These routines have been cloned from each other due to the language 2555 * constraints of macros and variable argument list processing. 2556 */ 2557 2558 2559 /* 2560 * Function: sd_log_err 2561 * 2562 * Description: This routine is called by the SD_ERROR macro for debug 2563 * logging of error conditions. 2564 * 2565 * Arguments: comp - driver component being logged 2566 * dev - pointer to driver info structure 2567 * fmt - error string and format to be logged 2568 */ 2569 2570 static void 2571 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2572 { 2573 va_list ap; 2574 dev_info_t *dev; 2575 2576 ASSERT(un != NULL); 2577 dev = SD_DEVINFO(un); 2578 ASSERT(dev != NULL); 2579 2580 /* 2581 * Filter messages based on the global component and level masks. 2582 * Also print if un matches the value of sd_debug_un, or if 2583 * sd_debug_un is set to NULL. 2584 */ 2585 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2586 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2587 mutex_enter(&sd_log_mutex); 2588 va_start(ap, fmt); 2589 (void) vsprintf(sd_log_buf, fmt, ap); 2590 va_end(ap); 2591 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2592 mutex_exit(&sd_log_mutex); 2593 } 2594 #ifdef SD_FAULT_INJECTION 2595 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2596 if (un->sd_injection_mask & comp) { 2597 mutex_enter(&sd_log_mutex); 2598 va_start(ap, fmt); 2599 (void) vsprintf(sd_log_buf, fmt, ap); 2600 va_end(ap); 2601 sd_injection_log(sd_log_buf, un); 2602 mutex_exit(&sd_log_mutex); 2603 } 2604 #endif 2605 } 2606 2607 2608 /* 2609 * Function: sd_log_info 2610 * 2611 * Description: This routine is called by the SD_INFO macro for debug 2612 * logging of general purpose informational conditions. 2613 * 2614 * Arguments: comp - driver component being logged 2615 * dev - pointer to driver info structure 2616 * fmt - info string and format to be logged 2617 */ 2618 2619 static void 2620 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2621 { 2622 va_list ap; 2623 dev_info_t *dev; 2624 2625 ASSERT(un != NULL); 2626 dev = SD_DEVINFO(un); 2627 ASSERT(dev != NULL); 2628 2629 /* 2630 * Filter messages based on the global component and level masks. 2631 * Also print if un matches the value of sd_debug_un, or if 2632 * sd_debug_un is set to NULL. 2633 */ 2634 if ((sd_component_mask & component) && 2635 (sd_level_mask & SD_LOGMASK_INFO) && 2636 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2637 mutex_enter(&sd_log_mutex); 2638 va_start(ap, fmt); 2639 (void) vsprintf(sd_log_buf, fmt, ap); 2640 va_end(ap); 2641 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2642 mutex_exit(&sd_log_mutex); 2643 } 2644 #ifdef SD_FAULT_INJECTION 2645 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2646 if (un->sd_injection_mask & component) { 2647 mutex_enter(&sd_log_mutex); 2648 va_start(ap, fmt); 2649 (void) vsprintf(sd_log_buf, fmt, ap); 2650 va_end(ap); 2651 sd_injection_log(sd_log_buf, un); 2652 mutex_exit(&sd_log_mutex); 2653 } 2654 #endif 2655 } 2656 2657 2658 /* 2659 * Function: sd_log_trace 2660 * 2661 * Description: This routine is called by the SD_TRACE macro for debug 2662 * logging of trace conditions (i.e. function entry/exit). 2663 * 2664 * Arguments: comp - driver component being logged 2665 * dev - pointer to driver info structure 2666 * fmt - trace string and format to be logged 2667 */ 2668 2669 static void 2670 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2671 { 2672 va_list ap; 2673 dev_info_t *dev; 2674 2675 ASSERT(un != NULL); 2676 dev = SD_DEVINFO(un); 2677 ASSERT(dev != NULL); 2678 2679 /* 2680 * Filter messages based on the global component and level masks. 2681 * Also print if un matches the value of sd_debug_un, or if 2682 * sd_debug_un is set to NULL. 2683 */ 2684 if ((sd_component_mask & component) && 2685 (sd_level_mask & SD_LOGMASK_TRACE) && 2686 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2687 mutex_enter(&sd_log_mutex); 2688 va_start(ap, fmt); 2689 (void) vsprintf(sd_log_buf, fmt, ap); 2690 va_end(ap); 2691 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2692 mutex_exit(&sd_log_mutex); 2693 } 2694 #ifdef SD_FAULT_INJECTION 2695 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2696 if (un->sd_injection_mask & component) { 2697 mutex_enter(&sd_log_mutex); 2698 va_start(ap, fmt); 2699 (void) vsprintf(sd_log_buf, fmt, ap); 2700 va_end(ap); 2701 sd_injection_log(sd_log_buf, un); 2702 mutex_exit(&sd_log_mutex); 2703 } 2704 #endif 2705 } 2706 2707 2708 /* 2709 * Function: sdprobe 2710 * 2711 * Description: This is the driver probe(9e) entry point function. 2712 * 2713 * Arguments: devi - opaque device info handle 2714 * 2715 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2716 * DDI_PROBE_FAILURE: If the probe failed. 2717 * DDI_PROBE_PARTIAL: If the instance is not present now, 2718 * but may be present in the future. 2719 */ 2720 2721 static int 2722 sdprobe(dev_info_t *devi) 2723 { 2724 struct scsi_device *devp; 2725 int rval; 2726 #ifndef XPV_HVM_DRIVER 2727 int instance = ddi_get_instance(devi); 2728 #endif /* !XPV_HVM_DRIVER */ 2729 2730 /* 2731 * if it wasn't for pln, sdprobe could actually be nulldev 2732 * in the "__fibre" case. 2733 */ 2734 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2735 return (DDI_PROBE_DONTCARE); 2736 } 2737 2738 devp = ddi_get_driver_private(devi); 2739 2740 if (devp == NULL) { 2741 /* Ooops... nexus driver is mis-configured... */ 2742 return (DDI_PROBE_FAILURE); 2743 } 2744 2745 #ifndef XPV_HVM_DRIVER 2746 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2747 return (DDI_PROBE_PARTIAL); 2748 } 2749 #endif /* !XPV_HVM_DRIVER */ 2750 2751 /* 2752 * Call the SCSA utility probe routine to see if we actually 2753 * have a target at this SCSI nexus. 2754 */ 2755 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2756 case SCSIPROBE_EXISTS: 2757 switch (devp->sd_inq->inq_dtype) { 2758 case DTYPE_DIRECT: 2759 rval = DDI_PROBE_SUCCESS; 2760 break; 2761 case DTYPE_RODIRECT: 2762 /* CDs etc. Can be removable media */ 2763 rval = DDI_PROBE_SUCCESS; 2764 break; 2765 case DTYPE_OPTICAL: 2766 /* 2767 * Rewritable optical driver HP115AA 2768 * Can also be removable media 2769 */ 2770 2771 /* 2772 * Do not attempt to bind to DTYPE_OPTICAL if 2773 * pre solaris 9 sparc sd behavior is required 2774 * 2775 * If first time through and sd_dtype_optical_bind 2776 * has not been set in /etc/system check properties 2777 */ 2778 2779 if (sd_dtype_optical_bind < 0) { 2780 sd_dtype_optical_bind = ddi_prop_get_int 2781 (DDI_DEV_T_ANY, devi, 0, 2782 "optical-device-bind", 1); 2783 } 2784 2785 if (sd_dtype_optical_bind == 0) { 2786 rval = DDI_PROBE_FAILURE; 2787 } else { 2788 rval = DDI_PROBE_SUCCESS; 2789 } 2790 break; 2791 2792 case DTYPE_NOTPRESENT: 2793 default: 2794 rval = DDI_PROBE_FAILURE; 2795 break; 2796 } 2797 break; 2798 default: 2799 rval = DDI_PROBE_PARTIAL; 2800 break; 2801 } 2802 2803 /* 2804 * This routine checks for resource allocation prior to freeing, 2805 * so it will take care of the "smart probing" case where a 2806 * scsi_probe() may or may not have been issued and will *not* 2807 * free previously-freed resources. 2808 */ 2809 scsi_unprobe(devp); 2810 return (rval); 2811 } 2812 2813 2814 /* 2815 * Function: sdinfo 2816 * 2817 * Description: This is the driver getinfo(9e) entry point function. 2818 * Given the device number, return the devinfo pointer from 2819 * the scsi_device structure or the instance number 2820 * associated with the dev_t. 2821 * 2822 * Arguments: dip - pointer to device info structure 2823 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2824 * DDI_INFO_DEVT2INSTANCE) 2825 * arg - driver dev_t 2826 * resultp - user buffer for request response 2827 * 2828 * Return Code: DDI_SUCCESS 2829 * DDI_FAILURE 2830 */ 2831 /* ARGSUSED */ 2832 static int 2833 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2834 { 2835 struct sd_lun *un; 2836 dev_t dev; 2837 int instance; 2838 int error; 2839 2840 switch (infocmd) { 2841 case DDI_INFO_DEVT2DEVINFO: 2842 dev = (dev_t)arg; 2843 instance = SDUNIT(dev); 2844 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2845 return (DDI_FAILURE); 2846 } 2847 *result = (void *) SD_DEVINFO(un); 2848 error = DDI_SUCCESS; 2849 break; 2850 case DDI_INFO_DEVT2INSTANCE: 2851 dev = (dev_t)arg; 2852 instance = SDUNIT(dev); 2853 *result = (void *)(uintptr_t)instance; 2854 error = DDI_SUCCESS; 2855 break; 2856 default: 2857 error = DDI_FAILURE; 2858 } 2859 return (error); 2860 } 2861 2862 /* 2863 * Function: sd_prop_op 2864 * 2865 * Description: This is the driver prop_op(9e) entry point function. 2866 * Return the number of blocks for the partition in question 2867 * or forward the request to the property facilities. 2868 * 2869 * Arguments: dev - device number 2870 * dip - pointer to device info structure 2871 * prop_op - property operator 2872 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2873 * name - pointer to property name 2874 * valuep - pointer or address of the user buffer 2875 * lengthp - property length 2876 * 2877 * Return Code: DDI_PROP_SUCCESS 2878 * DDI_PROP_NOT_FOUND 2879 * DDI_PROP_UNDEFINED 2880 * DDI_PROP_NO_MEMORY 2881 * DDI_PROP_BUF_TOO_SMALL 2882 */ 2883 2884 static int 2885 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2886 char *name, caddr_t valuep, int *lengthp) 2887 { 2888 struct sd_lun *un; 2889 2890 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2891 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2892 name, valuep, lengthp)); 2893 2894 return (cmlb_prop_op(un->un_cmlbhandle, 2895 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2896 SDPART(dev), (void *)SD_PATH_DIRECT)); 2897 } 2898 2899 /* 2900 * The following functions are for smart probing: 2901 * sd_scsi_probe_cache_init() 2902 * sd_scsi_probe_cache_fini() 2903 * sd_scsi_clear_probe_cache() 2904 * sd_scsi_probe_with_cache() 2905 */ 2906 2907 /* 2908 * Function: sd_scsi_probe_cache_init 2909 * 2910 * Description: Initializes the probe response cache mutex and head pointer. 2911 * 2912 * Context: Kernel thread context 2913 */ 2914 2915 static void 2916 sd_scsi_probe_cache_init(void) 2917 { 2918 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2919 sd_scsi_probe_cache_head = NULL; 2920 } 2921 2922 2923 /* 2924 * Function: sd_scsi_probe_cache_fini 2925 * 2926 * Description: Frees all resources associated with the probe response cache. 2927 * 2928 * Context: Kernel thread context 2929 */ 2930 2931 static void 2932 sd_scsi_probe_cache_fini(void) 2933 { 2934 struct sd_scsi_probe_cache *cp; 2935 struct sd_scsi_probe_cache *ncp; 2936 2937 /* Clean up our smart probing linked list */ 2938 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2939 ncp = cp->next; 2940 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2941 } 2942 sd_scsi_probe_cache_head = NULL; 2943 mutex_destroy(&sd_scsi_probe_cache_mutex); 2944 } 2945 2946 2947 /* 2948 * Function: sd_scsi_clear_probe_cache 2949 * 2950 * Description: This routine clears the probe response cache. This is 2951 * done when open() returns ENXIO so that when deferred 2952 * attach is attempted (possibly after a device has been 2953 * turned on) we will retry the probe. Since we don't know 2954 * which target we failed to open, we just clear the 2955 * entire cache. 2956 * 2957 * Context: Kernel thread context 2958 */ 2959 2960 static void 2961 sd_scsi_clear_probe_cache(void) 2962 { 2963 struct sd_scsi_probe_cache *cp; 2964 int i; 2965 2966 mutex_enter(&sd_scsi_probe_cache_mutex); 2967 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2968 /* 2969 * Reset all entries to SCSIPROBE_EXISTS. This will 2970 * force probing to be performed the next time 2971 * sd_scsi_probe_with_cache is called. 2972 */ 2973 for (i = 0; i < NTARGETS_WIDE; i++) { 2974 cp->cache[i] = SCSIPROBE_EXISTS; 2975 } 2976 } 2977 mutex_exit(&sd_scsi_probe_cache_mutex); 2978 } 2979 2980 2981 /* 2982 * Function: sd_scsi_probe_with_cache 2983 * 2984 * Description: This routine implements support for a scsi device probe 2985 * with cache. The driver maintains a cache of the target 2986 * responses to scsi probes. If we get no response from a 2987 * target during a probe inquiry, we remember that, and we 2988 * avoid additional calls to scsi_probe on non-zero LUNs 2989 * on the same target until the cache is cleared. By doing 2990 * so we avoid the 1/4 sec selection timeout for nonzero 2991 * LUNs. lun0 of a target is always probed. 2992 * 2993 * Arguments: devp - Pointer to a scsi_device(9S) structure 2994 * waitfunc - indicates what the allocator routines should 2995 * do when resources are not available. This value 2996 * is passed on to scsi_probe() when that routine 2997 * is called. 2998 * 2999 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 3000 * otherwise the value returned by scsi_probe(9F). 3001 * 3002 * Context: Kernel thread context 3003 */ 3004 3005 static int 3006 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 3007 { 3008 struct sd_scsi_probe_cache *cp; 3009 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 3010 int lun, tgt; 3011 3012 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3013 SCSI_ADDR_PROP_LUN, 0); 3014 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 3015 SCSI_ADDR_PROP_TARGET, -1); 3016 3017 /* Make sure caching enabled and target in range */ 3018 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 3019 /* do it the old way (no cache) */ 3020 return (scsi_probe(devp, waitfn)); 3021 } 3022 3023 mutex_enter(&sd_scsi_probe_cache_mutex); 3024 3025 /* Find the cache for this scsi bus instance */ 3026 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 3027 if (cp->pdip == pdip) { 3028 break; 3029 } 3030 } 3031 3032 /* If we can't find a cache for this pdip, create one */ 3033 if (cp == NULL) { 3034 int i; 3035 3036 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 3037 KM_SLEEP); 3038 cp->pdip = pdip; 3039 cp->next = sd_scsi_probe_cache_head; 3040 sd_scsi_probe_cache_head = cp; 3041 for (i = 0; i < NTARGETS_WIDE; i++) { 3042 cp->cache[i] = SCSIPROBE_EXISTS; 3043 } 3044 } 3045 3046 mutex_exit(&sd_scsi_probe_cache_mutex); 3047 3048 /* Recompute the cache for this target if LUN zero */ 3049 if (lun == 0) { 3050 cp->cache[tgt] = SCSIPROBE_EXISTS; 3051 } 3052 3053 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 3054 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 3055 return (SCSIPROBE_NORESP); 3056 } 3057 3058 /* Do the actual probe; save & return the result */ 3059 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3060 } 3061 3062 3063 /* 3064 * Function: sd_scsi_target_lun_init 3065 * 3066 * Description: Initializes the attached lun chain mutex and head pointer. 3067 * 3068 * Context: Kernel thread context 3069 */ 3070 3071 static void 3072 sd_scsi_target_lun_init(void) 3073 { 3074 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3075 sd_scsi_target_lun_head = NULL; 3076 } 3077 3078 3079 /* 3080 * Function: sd_scsi_target_lun_fini 3081 * 3082 * Description: Frees all resources associated with the attached lun 3083 * chain 3084 * 3085 * Context: Kernel thread context 3086 */ 3087 3088 static void 3089 sd_scsi_target_lun_fini(void) 3090 { 3091 struct sd_scsi_hba_tgt_lun *cp; 3092 struct sd_scsi_hba_tgt_lun *ncp; 3093 3094 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3095 ncp = cp->next; 3096 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3097 } 3098 sd_scsi_target_lun_head = NULL; 3099 mutex_destroy(&sd_scsi_target_lun_mutex); 3100 } 3101 3102 3103 /* 3104 * Function: sd_scsi_get_target_lun_count 3105 * 3106 * Description: This routine will check in the attached lun chain to see 3107 * how many luns are attached on the required SCSI controller 3108 * and target. Currently, some capabilities like tagged queue 3109 * are supported per target based by HBA. So all luns in a 3110 * target have the same capabilities. Based on this assumption, 3111 * sd should only set these capabilities once per target. This 3112 * function is called when sd needs to decide how many luns 3113 * already attached on a target. 3114 * 3115 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3116 * controller device. 3117 * target - The target ID on the controller's SCSI bus. 3118 * 3119 * Return Code: The number of luns attached on the required target and 3120 * controller. 3121 * -1 if target ID is not in parallel SCSI scope or the given 3122 * dip is not in the chain. 3123 * 3124 * Context: Kernel thread context 3125 */ 3126 3127 static int 3128 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3129 { 3130 struct sd_scsi_hba_tgt_lun *cp; 3131 3132 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3133 return (-1); 3134 } 3135 3136 mutex_enter(&sd_scsi_target_lun_mutex); 3137 3138 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3139 if (cp->pdip == dip) { 3140 break; 3141 } 3142 } 3143 3144 mutex_exit(&sd_scsi_target_lun_mutex); 3145 3146 if (cp == NULL) { 3147 return (-1); 3148 } 3149 3150 return (cp->nlun[target]); 3151 } 3152 3153 3154 /* 3155 * Function: sd_scsi_update_lun_on_target 3156 * 3157 * Description: This routine is used to update the attached lun chain when a 3158 * lun is attached or detached on a target. 3159 * 3160 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3161 * controller device. 3162 * target - The target ID on the controller's SCSI bus. 3163 * flag - Indicate the lun is attached or detached. 3164 * 3165 * Context: Kernel thread context 3166 */ 3167 3168 static void 3169 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3170 { 3171 struct sd_scsi_hba_tgt_lun *cp; 3172 3173 mutex_enter(&sd_scsi_target_lun_mutex); 3174 3175 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3176 if (cp->pdip == dip) { 3177 break; 3178 } 3179 } 3180 3181 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3182 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3183 KM_SLEEP); 3184 cp->pdip = dip; 3185 cp->next = sd_scsi_target_lun_head; 3186 sd_scsi_target_lun_head = cp; 3187 } 3188 3189 mutex_exit(&sd_scsi_target_lun_mutex); 3190 3191 if (cp != NULL) { 3192 if (flag == SD_SCSI_LUN_ATTACH) { 3193 cp->nlun[target] ++; 3194 } else { 3195 cp->nlun[target] --; 3196 } 3197 } 3198 } 3199 3200 3201 /* 3202 * Function: sd_spin_up_unit 3203 * 3204 * Description: Issues the following commands to spin-up the device: 3205 * START STOP UNIT, and INQUIRY. 3206 * 3207 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3208 * structure for this target. 3209 * 3210 * Return Code: 0 - success 3211 * EIO - failure 3212 * EACCES - reservation conflict 3213 * 3214 * Context: Kernel thread context 3215 */ 3216 3217 static int 3218 sd_spin_up_unit(sd_ssc_t *ssc) 3219 { 3220 size_t resid = 0; 3221 int has_conflict = FALSE; 3222 uchar_t *bufaddr; 3223 int status; 3224 struct sd_lun *un; 3225 3226 ASSERT(ssc != NULL); 3227 un = ssc->ssc_un; 3228 ASSERT(un != NULL); 3229 3230 /* 3231 * Send a throwaway START UNIT command. 3232 * 3233 * If we fail on this, we don't care presently what precisely 3234 * is wrong. EMC's arrays will also fail this with a check 3235 * condition (0x2/0x4/0x3) if the device is "inactive," but 3236 * we don't want to fail the attach because it may become 3237 * "active" later. 3238 * We don't know if power condition is supported or not at 3239 * this stage, use START STOP bit. 3240 */ 3241 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 3242 SD_TARGET_START, SD_PATH_DIRECT); 3243 3244 if (status != 0) { 3245 if (status == EACCES) 3246 has_conflict = TRUE; 3247 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3248 } 3249 3250 /* 3251 * Send another INQUIRY command to the target. This is necessary for 3252 * non-removable media direct access devices because their INQUIRY data 3253 * may not be fully qualified until they are spun up (perhaps via the 3254 * START command above). Note: This seems to be needed for some 3255 * legacy devices only.) The INQUIRY command should succeed even if a 3256 * Reservation Conflict is present. 3257 */ 3258 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3259 3260 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3261 != 0) { 3262 kmem_free(bufaddr, SUN_INQSIZE); 3263 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3264 return (EIO); 3265 } 3266 3267 /* 3268 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3269 * Note that this routine does not return a failure here even if the 3270 * INQUIRY command did not return any data. This is a legacy behavior. 3271 */ 3272 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3273 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3274 } 3275 3276 kmem_free(bufaddr, SUN_INQSIZE); 3277 3278 /* If we hit a reservation conflict above, tell the caller. */ 3279 if (has_conflict == TRUE) { 3280 return (EACCES); 3281 } 3282 3283 return (0); 3284 } 3285 3286 #ifdef _LP64 3287 /* 3288 * Function: sd_enable_descr_sense 3289 * 3290 * Description: This routine attempts to select descriptor sense format 3291 * using the Control mode page. Devices that support 64 bit 3292 * LBAs (for >2TB luns) should also implement descriptor 3293 * sense data so we will call this function whenever we see 3294 * a lun larger than 2TB. If for some reason the device 3295 * supports 64 bit LBAs but doesn't support descriptor sense 3296 * presumably the mode select will fail. Everything will 3297 * continue to work normally except that we will not get 3298 * complete sense data for commands that fail with an LBA 3299 * larger than 32 bits. 3300 * 3301 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3302 * structure for this target. 3303 * 3304 * Context: Kernel thread context only 3305 */ 3306 3307 static void 3308 sd_enable_descr_sense(sd_ssc_t *ssc) 3309 { 3310 uchar_t *header; 3311 struct mode_control_scsi3 *ctrl_bufp; 3312 size_t buflen; 3313 size_t bd_len; 3314 int status; 3315 struct sd_lun *un; 3316 3317 ASSERT(ssc != NULL); 3318 un = ssc->ssc_un; 3319 ASSERT(un != NULL); 3320 3321 /* 3322 * Read MODE SENSE page 0xA, Control Mode Page 3323 */ 3324 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3325 sizeof (struct mode_control_scsi3); 3326 header = kmem_zalloc(buflen, KM_SLEEP); 3327 3328 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3329 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3330 3331 if (status != 0) { 3332 SD_ERROR(SD_LOG_COMMON, un, 3333 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3334 goto eds_exit; 3335 } 3336 3337 /* 3338 * Determine size of Block Descriptors in order to locate 3339 * the mode page data. ATAPI devices return 0, SCSI devices 3340 * should return MODE_BLK_DESC_LENGTH. 3341 */ 3342 bd_len = ((struct mode_header *)header)->bdesc_length; 3343 3344 /* Clear the mode data length field for MODE SELECT */ 3345 ((struct mode_header *)header)->length = 0; 3346 3347 ctrl_bufp = (struct mode_control_scsi3 *) 3348 (header + MODE_HEADER_LENGTH + bd_len); 3349 3350 /* 3351 * If the page length is smaller than the expected value, 3352 * the target device doesn't support D_SENSE. Bail out here. 3353 */ 3354 if (ctrl_bufp->mode_page.length < 3355 sizeof (struct mode_control_scsi3) - 2) { 3356 SD_ERROR(SD_LOG_COMMON, un, 3357 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3358 goto eds_exit; 3359 } 3360 3361 /* 3362 * Clear PS bit for MODE SELECT 3363 */ 3364 ctrl_bufp->mode_page.ps = 0; 3365 3366 /* 3367 * Set D_SENSE to enable descriptor sense format. 3368 */ 3369 ctrl_bufp->d_sense = 1; 3370 3371 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3372 3373 /* 3374 * Use MODE SELECT to commit the change to the D_SENSE bit 3375 */ 3376 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3377 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3378 3379 if (status != 0) { 3380 SD_INFO(SD_LOG_COMMON, un, 3381 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3382 } else { 3383 kmem_free(header, buflen); 3384 return; 3385 } 3386 3387 eds_exit: 3388 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3389 kmem_free(header, buflen); 3390 } 3391 3392 /* 3393 * Function: sd_reenable_dsense_task 3394 * 3395 * Description: Re-enable descriptor sense after device or bus reset 3396 * 3397 * Context: Executes in a taskq() thread context 3398 */ 3399 static void 3400 sd_reenable_dsense_task(void *arg) 3401 { 3402 struct sd_lun *un = arg; 3403 sd_ssc_t *ssc; 3404 3405 ASSERT(un != NULL); 3406 3407 ssc = sd_ssc_init(un); 3408 sd_enable_descr_sense(ssc); 3409 sd_ssc_fini(ssc); 3410 } 3411 #endif /* _LP64 */ 3412 3413 /* 3414 * Function: sd_set_mmc_caps 3415 * 3416 * Description: This routine determines if the device is MMC compliant and if 3417 * the device supports CDDA via a mode sense of the CDVD 3418 * capabilities mode page. Also checks if the device is a 3419 * dvdram writable device. 3420 * 3421 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3422 * structure for this target. 3423 * 3424 * Context: Kernel thread context only 3425 */ 3426 3427 static void 3428 sd_set_mmc_caps(sd_ssc_t *ssc) 3429 { 3430 struct mode_header_grp2 *sense_mhp; 3431 uchar_t *sense_page; 3432 caddr_t buf; 3433 int bd_len; 3434 int status; 3435 struct uscsi_cmd com; 3436 int rtn; 3437 uchar_t *out_data_rw, *out_data_hd; 3438 uchar_t *rqbuf_rw, *rqbuf_hd; 3439 uchar_t *out_data_gesn; 3440 int gesn_len; 3441 struct sd_lun *un; 3442 3443 ASSERT(ssc != NULL); 3444 un = ssc->ssc_un; 3445 ASSERT(un != NULL); 3446 3447 /* 3448 * The flags which will be set in this function are - mmc compliant, 3449 * dvdram writable device, cdda support. Initialize them to FALSE 3450 * and if a capability is detected - it will be set to TRUE. 3451 */ 3452 un->un_f_mmc_cap = FALSE; 3453 un->un_f_dvdram_writable_device = FALSE; 3454 un->un_f_cfg_cdda = FALSE; 3455 3456 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3457 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3458 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3459 3460 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3461 3462 if (status != 0) { 3463 /* command failed; just return */ 3464 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3465 return; 3466 } 3467 /* 3468 * If the mode sense request for the CDROM CAPABILITIES 3469 * page (0x2A) succeeds the device is assumed to be MMC. 3470 */ 3471 un->un_f_mmc_cap = TRUE; 3472 3473 /* See if GET STATUS EVENT NOTIFICATION is supported */ 3474 if (un->un_f_mmc_gesn_polling) { 3475 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN; 3476 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP); 3477 3478 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc, 3479 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS); 3480 3481 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3482 3483 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) { 3484 un->un_f_mmc_gesn_polling = FALSE; 3485 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3486 "sd_set_mmc_caps: gesn not supported " 3487 "%d %x %x %x %x\n", rtn, 3488 out_data_gesn[0], out_data_gesn[1], 3489 out_data_gesn[2], out_data_gesn[3]); 3490 } 3491 3492 kmem_free(out_data_gesn, gesn_len); 3493 } 3494 3495 /* Get to the page data */ 3496 sense_mhp = (struct mode_header_grp2 *)buf; 3497 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3498 sense_mhp->bdesc_length_lo; 3499 if (bd_len > MODE_BLK_DESC_LENGTH) { 3500 /* 3501 * We did not get back the expected block descriptor 3502 * length so we cannot determine if the device supports 3503 * CDDA. However, we still indicate the device is MMC 3504 * according to the successful response to the page 3505 * 0x2A mode sense request. 3506 */ 3507 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3508 "sd_set_mmc_caps: Mode Sense returned " 3509 "invalid block descriptor length\n"); 3510 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3511 return; 3512 } 3513 3514 /* See if read CDDA is supported */ 3515 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3516 bd_len); 3517 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3518 3519 /* See if writing DVD RAM is supported. */ 3520 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3521 if (un->un_f_dvdram_writable_device == TRUE) { 3522 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3523 return; 3524 } 3525 3526 /* 3527 * If the device presents DVD or CD capabilities in the mode 3528 * page, we can return here since a RRD will not have 3529 * these capabilities. 3530 */ 3531 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3532 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3533 return; 3534 } 3535 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3536 3537 /* 3538 * If un->un_f_dvdram_writable_device is still FALSE, 3539 * check for a Removable Rigid Disk (RRD). A RRD 3540 * device is identified by the features RANDOM_WRITABLE and 3541 * HARDWARE_DEFECT_MANAGEMENT. 3542 */ 3543 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3544 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3545 3546 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3547 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3548 RANDOM_WRITABLE, SD_PATH_STANDARD); 3549 3550 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3551 3552 if (rtn != 0) { 3553 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3554 kmem_free(rqbuf_rw, SENSE_LENGTH); 3555 return; 3556 } 3557 3558 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3559 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3560 3561 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3562 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3563 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3564 3565 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3566 3567 if (rtn == 0) { 3568 /* 3569 * We have good information, check for random writable 3570 * and hardware defect features. 3571 */ 3572 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3573 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3574 un->un_f_dvdram_writable_device = TRUE; 3575 } 3576 } 3577 3578 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3579 kmem_free(rqbuf_rw, SENSE_LENGTH); 3580 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3581 kmem_free(rqbuf_hd, SENSE_LENGTH); 3582 } 3583 3584 /* 3585 * Function: sd_check_for_writable_cd 3586 * 3587 * Description: This routine determines if the media in the device is 3588 * writable or not. It uses the get configuration command (0x46) 3589 * to determine if the media is writable 3590 * 3591 * Arguments: un - driver soft state (unit) structure 3592 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3593 * chain and the normal command waitq, or 3594 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3595 * "direct" chain and bypass the normal command 3596 * waitq. 3597 * 3598 * Context: Never called at interrupt context. 3599 */ 3600 3601 static void 3602 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3603 { 3604 struct uscsi_cmd com; 3605 uchar_t *out_data; 3606 uchar_t *rqbuf; 3607 int rtn; 3608 uchar_t *out_data_rw, *out_data_hd; 3609 uchar_t *rqbuf_rw, *rqbuf_hd; 3610 struct mode_header_grp2 *sense_mhp; 3611 uchar_t *sense_page; 3612 caddr_t buf; 3613 int bd_len; 3614 int status; 3615 struct sd_lun *un; 3616 3617 ASSERT(ssc != NULL); 3618 un = ssc->ssc_un; 3619 ASSERT(un != NULL); 3620 ASSERT(mutex_owned(SD_MUTEX(un))); 3621 3622 /* 3623 * Initialize the writable media to false, if configuration info. 3624 * tells us otherwise then only we will set it. 3625 */ 3626 un->un_f_mmc_writable_media = FALSE; 3627 mutex_exit(SD_MUTEX(un)); 3628 3629 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3630 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3631 3632 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3633 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3634 3635 if (rtn != 0) 3636 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3637 3638 mutex_enter(SD_MUTEX(un)); 3639 if (rtn == 0) { 3640 /* 3641 * We have good information, check for writable DVD. 3642 */ 3643 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3644 un->un_f_mmc_writable_media = TRUE; 3645 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3646 kmem_free(rqbuf, SENSE_LENGTH); 3647 return; 3648 } 3649 } 3650 3651 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3652 kmem_free(rqbuf, SENSE_LENGTH); 3653 3654 /* 3655 * Determine if this is a RRD type device. 3656 */ 3657 mutex_exit(SD_MUTEX(un)); 3658 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3659 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3660 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3661 3662 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3663 3664 mutex_enter(SD_MUTEX(un)); 3665 if (status != 0) { 3666 /* command failed; just return */ 3667 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3668 return; 3669 } 3670 3671 /* Get to the page data */ 3672 sense_mhp = (struct mode_header_grp2 *)buf; 3673 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3674 if (bd_len > MODE_BLK_DESC_LENGTH) { 3675 /* 3676 * We did not get back the expected block descriptor length so 3677 * we cannot check the mode page. 3678 */ 3679 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3680 "sd_check_for_writable_cd: Mode Sense returned " 3681 "invalid block descriptor length\n"); 3682 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3683 return; 3684 } 3685 3686 /* 3687 * If the device presents DVD or CD capabilities in the mode 3688 * page, we can return here since a RRD device will not have 3689 * these capabilities. 3690 */ 3691 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3692 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3693 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3694 return; 3695 } 3696 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3697 3698 /* 3699 * If un->un_f_mmc_writable_media is still FALSE, 3700 * check for RRD type media. A RRD device is identified 3701 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3702 */ 3703 mutex_exit(SD_MUTEX(un)); 3704 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3705 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3706 3707 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3708 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3709 RANDOM_WRITABLE, path_flag); 3710 3711 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3712 if (rtn != 0) { 3713 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3714 kmem_free(rqbuf_rw, SENSE_LENGTH); 3715 mutex_enter(SD_MUTEX(un)); 3716 return; 3717 } 3718 3719 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3720 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3721 3722 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3723 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3724 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3725 3726 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3727 mutex_enter(SD_MUTEX(un)); 3728 if (rtn == 0) { 3729 /* 3730 * We have good information, check for random writable 3731 * and hardware defect features as current. 3732 */ 3733 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3734 (out_data_rw[10] & 0x1) && 3735 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3736 (out_data_hd[10] & 0x1)) { 3737 un->un_f_mmc_writable_media = TRUE; 3738 } 3739 } 3740 3741 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3742 kmem_free(rqbuf_rw, SENSE_LENGTH); 3743 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3744 kmem_free(rqbuf_hd, SENSE_LENGTH); 3745 } 3746 3747 /* 3748 * Function: sd_read_unit_properties 3749 * 3750 * Description: The following implements a property lookup mechanism. 3751 * Properties for particular disks (keyed on vendor, model 3752 * and rev numbers) are sought in the sd.conf file via 3753 * sd_process_sdconf_file(), and if not found there, are 3754 * looked for in a list hardcoded in this driver via 3755 * sd_process_sdconf_table() Once located the properties 3756 * are used to update the driver unit structure. 3757 * 3758 * Arguments: un - driver soft state (unit) structure 3759 */ 3760 3761 static void 3762 sd_read_unit_properties(struct sd_lun *un) 3763 { 3764 /* 3765 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3766 * the "sd-config-list" property (from the sd.conf file) or if 3767 * there was not a match for the inquiry vid/pid. If this event 3768 * occurs the static driver configuration table is searched for 3769 * a match. 3770 */ 3771 ASSERT(un != NULL); 3772 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3773 sd_process_sdconf_table(un); 3774 } 3775 3776 /* check for LSI device */ 3777 sd_is_lsi(un); 3778 3779 3780 } 3781 3782 3783 /* 3784 * Function: sd_process_sdconf_file 3785 * 3786 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3787 * driver's config file (ie, sd.conf) and update the driver 3788 * soft state structure accordingly. 3789 * 3790 * Arguments: un - driver soft state (unit) structure 3791 * 3792 * Return Code: SD_SUCCESS - The properties were successfully set according 3793 * to the driver configuration file. 3794 * SD_FAILURE - The driver config list was not obtained or 3795 * there was no vid/pid match. This indicates that 3796 * the static config table should be used. 3797 * 3798 * The config file has a property, "sd-config-list". Currently we support 3799 * two kinds of formats. For both formats, the value of this property 3800 * is a list of duplets: 3801 * 3802 * sd-config-list= 3803 * <duplet>, 3804 * [,<duplet>]*; 3805 * 3806 * For the improved format, where 3807 * 3808 * <duplet>:= "<vid+pid>","<tunable-list>" 3809 * 3810 * and 3811 * 3812 * <tunable-list>:= <tunable> [, <tunable> ]*; 3813 * <tunable> = <name> : <value> 3814 * 3815 * The <vid+pid> is the string that is returned by the target device on a 3816 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3817 * to apply to all target devices with the specified <vid+pid>. 3818 * 3819 * Each <tunable> is a "<name> : <value>" pair. 3820 * 3821 * For the old format, the structure of each duplet is as follows: 3822 * 3823 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3824 * 3825 * The first entry of the duplet is the device ID string (the concatenated 3826 * vid & pid; not to be confused with a device_id). This is defined in 3827 * the same way as in the sd_disk_table. 3828 * 3829 * The second part of the duplet is a string that identifies a 3830 * data-property-name-list. The data-property-name-list is defined as 3831 * follows: 3832 * 3833 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3834 * 3835 * The syntax of <data-property-name> depends on the <version> field. 3836 * 3837 * If version = SD_CONF_VERSION_1 we have the following syntax: 3838 * 3839 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3840 * 3841 * where the prop0 value will be used to set prop0 if bit0 set in the 3842 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3843 * 3844 */ 3845 3846 static int 3847 sd_process_sdconf_file(struct sd_lun *un) 3848 { 3849 char **config_list = NULL; 3850 uint_t nelements; 3851 char *vidptr; 3852 int vidlen; 3853 char *dnlist_ptr; 3854 char *dataname_ptr; 3855 char *dataname_lasts; 3856 int *data_list = NULL; 3857 uint_t data_list_len; 3858 int rval = SD_FAILURE; 3859 int i; 3860 3861 ASSERT(un != NULL); 3862 3863 /* Obtain the configuration list associated with the .conf file */ 3864 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3865 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3866 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3867 return (SD_FAILURE); 3868 } 3869 3870 /* 3871 * Compare vids in each duplet to the inquiry vid - if a match is 3872 * made, get the data value and update the soft state structure 3873 * accordingly. 3874 * 3875 * Each duplet should show as a pair of strings, return SD_FAILURE 3876 * otherwise. 3877 */ 3878 if (nelements & 1) { 3879 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3880 "sd-config-list should show as pairs of strings.\n"); 3881 if (config_list) 3882 ddi_prop_free(config_list); 3883 return (SD_FAILURE); 3884 } 3885 3886 for (i = 0; i < nelements; i += 2) { 3887 /* 3888 * Note: The assumption here is that each vid entry is on 3889 * a unique line from its associated duplet. 3890 */ 3891 vidptr = config_list[i]; 3892 vidlen = (int)strlen(vidptr); 3893 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) { 3894 continue; 3895 } 3896 3897 /* 3898 * dnlist contains 1 or more blank separated 3899 * data-property-name entries 3900 */ 3901 dnlist_ptr = config_list[i + 1]; 3902 3903 if (strchr(dnlist_ptr, ':') != NULL) { 3904 /* 3905 * Decode the improved format sd-config-list. 3906 */ 3907 sd_nvpair_str_decode(un, dnlist_ptr); 3908 } else { 3909 /* 3910 * The old format sd-config-list, loop through all 3911 * data-property-name entries in the 3912 * data-property-name-list 3913 * setting the properties for each. 3914 */ 3915 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3916 &dataname_lasts); dataname_ptr != NULL; 3917 dataname_ptr = sd_strtok_r(NULL, " \t", 3918 &dataname_lasts)) { 3919 int version; 3920 3921 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3922 "sd_process_sdconf_file: disk:%s, " 3923 "data:%s\n", vidptr, dataname_ptr); 3924 3925 /* Get the data list */ 3926 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3927 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3928 &data_list_len) != DDI_PROP_SUCCESS) { 3929 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3930 "sd_process_sdconf_file: data " 3931 "property (%s) has no value\n", 3932 dataname_ptr); 3933 continue; 3934 } 3935 3936 version = data_list[0]; 3937 3938 if (version == SD_CONF_VERSION_1) { 3939 sd_tunables values; 3940 3941 /* Set the properties */ 3942 if (sd_chk_vers1_data(un, data_list[1], 3943 &data_list[2], data_list_len, 3944 dataname_ptr) == SD_SUCCESS) { 3945 sd_get_tunables_from_conf(un, 3946 data_list[1], &data_list[2], 3947 &values); 3948 sd_set_vers1_properties(un, 3949 data_list[1], &values); 3950 rval = SD_SUCCESS; 3951 } else { 3952 rval = SD_FAILURE; 3953 } 3954 } else { 3955 scsi_log(SD_DEVINFO(un), sd_label, 3956 CE_WARN, "data property %s version " 3957 "0x%x is invalid.", 3958 dataname_ptr, version); 3959 rval = SD_FAILURE; 3960 } 3961 if (data_list) 3962 ddi_prop_free(data_list); 3963 } 3964 } 3965 } 3966 3967 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3968 if (config_list) { 3969 ddi_prop_free(config_list); 3970 } 3971 3972 return (rval); 3973 } 3974 3975 /* 3976 * Function: sd_nvpair_str_decode() 3977 * 3978 * Description: Parse the improved format sd-config-list to get 3979 * each entry of tunable, which includes a name-value pair. 3980 * Then call sd_set_properties() to set the property. 3981 * 3982 * Arguments: un - driver soft state (unit) structure 3983 * nvpair_str - the tunable list 3984 */ 3985 static void 3986 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3987 { 3988 char *nv, *name, *value, *token; 3989 char *nv_lasts, *v_lasts, *x_lasts; 3990 3991 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3992 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3993 token = sd_strtok_r(nv, ":", &v_lasts); 3994 name = sd_strtok_r(token, " \t", &x_lasts); 3995 token = sd_strtok_r(NULL, ":", &v_lasts); 3996 value = sd_strtok_r(token, " \t", &x_lasts); 3997 if (name == NULL || value == NULL) { 3998 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3999 "sd_nvpair_str_decode: " 4000 "name or value is not valid!\n"); 4001 } else { 4002 sd_set_properties(un, name, value); 4003 } 4004 } 4005 } 4006 4007 /* 4008 * Function: sd_strtok_r() 4009 * 4010 * Description: This function uses strpbrk and strspn to break 4011 * string into tokens on sequentially subsequent calls. Return 4012 * NULL when no non-separator characters remain. The first 4013 * argument is NULL for subsequent calls. 4014 */ 4015 static char * 4016 sd_strtok_r(char *string, const char *sepset, char **lasts) 4017 { 4018 char *q, *r; 4019 4020 /* First or subsequent call */ 4021 if (string == NULL) 4022 string = *lasts; 4023 4024 if (string == NULL) 4025 return (NULL); 4026 4027 /* Skip leading separators */ 4028 q = string + strspn(string, sepset); 4029 4030 if (*q == '\0') 4031 return (NULL); 4032 4033 if ((r = strpbrk(q, sepset)) == NULL) 4034 *lasts = NULL; 4035 else { 4036 *r = '\0'; 4037 *lasts = r + 1; 4038 } 4039 return (q); 4040 } 4041 4042 /* 4043 * Function: sd_set_properties() 4044 * 4045 * Description: Set device properties based on the improved 4046 * format sd-config-list. 4047 * 4048 * Arguments: un - driver soft state (unit) structure 4049 * name - supported tunable name 4050 * value - tunable value 4051 */ 4052 static void 4053 sd_set_properties(struct sd_lun *un, char *name, char *value) 4054 { 4055 char *endptr = NULL; 4056 long val = 0; 4057 4058 if (strcasecmp(name, "cache-nonvolatile") == 0) { 4059 if (strcasecmp(value, "true") == 0) { 4060 un->un_f_suppress_cache_flush = TRUE; 4061 } else if (strcasecmp(value, "false") == 0) { 4062 un->un_f_suppress_cache_flush = FALSE; 4063 } else { 4064 goto value_invalid; 4065 } 4066 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4067 "suppress_cache_flush flag set to %d\n", 4068 un->un_f_suppress_cache_flush); 4069 return; 4070 } 4071 4072 if (strcasecmp(name, "controller-type") == 0) { 4073 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4074 un->un_ctype = val; 4075 } else { 4076 goto value_invalid; 4077 } 4078 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4079 "ctype set to %d\n", un->un_ctype); 4080 return; 4081 } 4082 4083 if (strcasecmp(name, "delay-busy") == 0) { 4084 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4085 un->un_busy_timeout = drv_usectohz(val / 1000); 4086 } else { 4087 goto value_invalid; 4088 } 4089 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4090 "busy_timeout set to %d\n", un->un_busy_timeout); 4091 return; 4092 } 4093 4094 if (strcasecmp(name, "disksort") == 0) { 4095 if (strcasecmp(value, "true") == 0) { 4096 un->un_f_disksort_disabled = FALSE; 4097 } else if (strcasecmp(value, "false") == 0) { 4098 un->un_f_disksort_disabled = TRUE; 4099 } else { 4100 goto value_invalid; 4101 } 4102 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4103 "disksort disabled flag set to %d\n", 4104 un->un_f_disksort_disabled); 4105 return; 4106 } 4107 4108 if (strcasecmp(name, "power-condition") == 0) { 4109 if (strcasecmp(value, "true") == 0) { 4110 un->un_f_power_condition_disabled = FALSE; 4111 } else if (strcasecmp(value, "false") == 0) { 4112 un->un_f_power_condition_disabled = TRUE; 4113 } else { 4114 goto value_invalid; 4115 } 4116 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4117 "power condition disabled flag set to %d\n", 4118 un->un_f_power_condition_disabled); 4119 return; 4120 } 4121 4122 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4123 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4124 un->un_reserve_release_time = val; 4125 } else { 4126 goto value_invalid; 4127 } 4128 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4129 "reservation release timeout set to %d\n", 4130 un->un_reserve_release_time); 4131 return; 4132 } 4133 4134 if (strcasecmp(name, "reset-lun") == 0) { 4135 if (strcasecmp(value, "true") == 0) { 4136 un->un_f_lun_reset_enabled = TRUE; 4137 } else if (strcasecmp(value, "false") == 0) { 4138 un->un_f_lun_reset_enabled = FALSE; 4139 } else { 4140 goto value_invalid; 4141 } 4142 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4143 "lun reset enabled flag set to %d\n", 4144 un->un_f_lun_reset_enabled); 4145 return; 4146 } 4147 4148 if (strcasecmp(name, "retries-busy") == 0) { 4149 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4150 un->un_busy_retry_count = val; 4151 } else { 4152 goto value_invalid; 4153 } 4154 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4155 "busy retry count set to %d\n", un->un_busy_retry_count); 4156 return; 4157 } 4158 4159 if (strcasecmp(name, "retries-timeout") == 0) { 4160 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4161 un->un_retry_count = val; 4162 } else { 4163 goto value_invalid; 4164 } 4165 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4166 "timeout retry count set to %d\n", un->un_retry_count); 4167 return; 4168 } 4169 4170 if (strcasecmp(name, "retries-notready") == 0) { 4171 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4172 un->un_notready_retry_count = val; 4173 } else { 4174 goto value_invalid; 4175 } 4176 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4177 "notready retry count set to %d\n", 4178 un->un_notready_retry_count); 4179 return; 4180 } 4181 4182 if (strcasecmp(name, "retries-reset") == 0) { 4183 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4184 un->un_reset_retry_count = val; 4185 } else { 4186 goto value_invalid; 4187 } 4188 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4189 "reset retry count set to %d\n", 4190 un->un_reset_retry_count); 4191 return; 4192 } 4193 4194 if (strcasecmp(name, "throttle-max") == 0) { 4195 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4196 un->un_saved_throttle = un->un_throttle = val; 4197 } else { 4198 goto value_invalid; 4199 } 4200 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4201 "throttle set to %d\n", un->un_throttle); 4202 } 4203 4204 if (strcasecmp(name, "throttle-min") == 0) { 4205 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4206 un->un_min_throttle = val; 4207 } else { 4208 goto value_invalid; 4209 } 4210 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4211 "min throttle set to %d\n", un->un_min_throttle); 4212 } 4213 4214 if (strcasecmp(name, "rmw-type") == 0) { 4215 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4216 un->un_f_rmw_type = val; 4217 } else { 4218 goto value_invalid; 4219 } 4220 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4221 "RMW type set to %d\n", un->un_f_rmw_type); 4222 } 4223 4224 if (strcasecmp(name, "physical-block-size") == 0) { 4225 if (ddi_strtol(value, &endptr, 0, &val) == 0 && 4226 ISP2(val) && val >= un->un_tgt_blocksize && 4227 val >= un->un_sys_blocksize) { 4228 un->un_phy_blocksize = val; 4229 } else { 4230 goto value_invalid; 4231 } 4232 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4233 "physical block size set to %d\n", un->un_phy_blocksize); 4234 } 4235 4236 if (strcasecmp(name, "retries-victim") == 0) { 4237 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4238 un->un_victim_retry_count = val; 4239 } else { 4240 goto value_invalid; 4241 } 4242 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4243 "victim retry count set to %d\n", 4244 un->un_victim_retry_count); 4245 return; 4246 } 4247 4248 /* 4249 * Validate the throttle values. 4250 * If any of the numbers are invalid, set everything to defaults. 4251 */ 4252 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4253 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4254 (un->un_min_throttle > un->un_throttle)) { 4255 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4256 un->un_min_throttle = sd_min_throttle; 4257 } 4258 4259 if (strcasecmp(name, "mmc-gesn-polling") == 0) { 4260 if (strcasecmp(value, "true") == 0) { 4261 un->un_f_mmc_gesn_polling = TRUE; 4262 } else if (strcasecmp(value, "false") == 0) { 4263 un->un_f_mmc_gesn_polling = FALSE; 4264 } else { 4265 goto value_invalid; 4266 } 4267 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4268 "mmc-gesn-polling set to %d\n", 4269 un->un_f_mmc_gesn_polling); 4270 } 4271 4272 return; 4273 4274 value_invalid: 4275 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4276 "value of prop %s is invalid\n", name); 4277 } 4278 4279 /* 4280 * Function: sd_get_tunables_from_conf() 4281 * 4282 * 4283 * This function reads the data list from the sd.conf file and pulls 4284 * the values that can have numeric values as arguments and places 4285 * the values in the appropriate sd_tunables member. 4286 * Since the order of the data list members varies across platforms 4287 * This function reads them from the data list in a platform specific 4288 * order and places them into the correct sd_tunable member that is 4289 * consistent across all platforms. 4290 */ 4291 static void 4292 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4293 sd_tunables *values) 4294 { 4295 int i; 4296 int mask; 4297 4298 bzero(values, sizeof (sd_tunables)); 4299 4300 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4301 4302 mask = 1 << i; 4303 if (mask > flags) { 4304 break; 4305 } 4306 4307 switch (mask & flags) { 4308 case 0: /* This mask bit not set in flags */ 4309 continue; 4310 case SD_CONF_BSET_THROTTLE: 4311 values->sdt_throttle = data_list[i]; 4312 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4313 "sd_get_tunables_from_conf: throttle = %d\n", 4314 values->sdt_throttle); 4315 break; 4316 case SD_CONF_BSET_CTYPE: 4317 values->sdt_ctype = data_list[i]; 4318 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4319 "sd_get_tunables_from_conf: ctype = %d\n", 4320 values->sdt_ctype); 4321 break; 4322 case SD_CONF_BSET_NRR_COUNT: 4323 values->sdt_not_rdy_retries = data_list[i]; 4324 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4325 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4326 values->sdt_not_rdy_retries); 4327 break; 4328 case SD_CONF_BSET_BSY_RETRY_COUNT: 4329 values->sdt_busy_retries = data_list[i]; 4330 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4331 "sd_get_tunables_from_conf: busy_retries = %d\n", 4332 values->sdt_busy_retries); 4333 break; 4334 case SD_CONF_BSET_RST_RETRIES: 4335 values->sdt_reset_retries = data_list[i]; 4336 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4337 "sd_get_tunables_from_conf: reset_retries = %d\n", 4338 values->sdt_reset_retries); 4339 break; 4340 case SD_CONF_BSET_RSV_REL_TIME: 4341 values->sdt_reserv_rel_time = data_list[i]; 4342 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4343 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4344 values->sdt_reserv_rel_time); 4345 break; 4346 case SD_CONF_BSET_MIN_THROTTLE: 4347 values->sdt_min_throttle = data_list[i]; 4348 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4349 "sd_get_tunables_from_conf: min_throttle = %d\n", 4350 values->sdt_min_throttle); 4351 break; 4352 case SD_CONF_BSET_DISKSORT_DISABLED: 4353 values->sdt_disk_sort_dis = data_list[i]; 4354 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4355 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4356 values->sdt_disk_sort_dis); 4357 break; 4358 case SD_CONF_BSET_LUN_RESET_ENABLED: 4359 values->sdt_lun_reset_enable = data_list[i]; 4360 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4361 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4362 "\n", values->sdt_lun_reset_enable); 4363 break; 4364 case SD_CONF_BSET_CACHE_IS_NV: 4365 values->sdt_suppress_cache_flush = data_list[i]; 4366 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4367 "sd_get_tunables_from_conf: \ 4368 suppress_cache_flush = %d" 4369 "\n", values->sdt_suppress_cache_flush); 4370 break; 4371 case SD_CONF_BSET_PC_DISABLED: 4372 values->sdt_disk_sort_dis = data_list[i]; 4373 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4374 "sd_get_tunables_from_conf: power_condition_dis = " 4375 "%d\n", values->sdt_power_condition_dis); 4376 break; 4377 } 4378 } 4379 } 4380 4381 /* 4382 * Function: sd_process_sdconf_table 4383 * 4384 * Description: Search the static configuration table for a match on the 4385 * inquiry vid/pid and update the driver soft state structure 4386 * according to the table property values for the device. 4387 * 4388 * The form of a configuration table entry is: 4389 * <vid+pid>,<flags>,<property-data> 4390 * "SEAGATE ST42400N",1,0x40000, 4391 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4392 * 4393 * Arguments: un - driver soft state (unit) structure 4394 */ 4395 4396 static void 4397 sd_process_sdconf_table(struct sd_lun *un) 4398 { 4399 char *id = NULL; 4400 int table_index; 4401 int idlen; 4402 4403 ASSERT(un != NULL); 4404 for (table_index = 0; table_index < sd_disk_table_size; 4405 table_index++) { 4406 id = sd_disk_table[table_index].device_id; 4407 idlen = strlen(id); 4408 4409 /* 4410 * The static configuration table currently does not 4411 * implement version 10 properties. Additionally, 4412 * multiple data-property-name entries are not 4413 * implemented in the static configuration table. 4414 */ 4415 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4416 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4417 "sd_process_sdconf_table: disk %s\n", id); 4418 sd_set_vers1_properties(un, 4419 sd_disk_table[table_index].flags, 4420 sd_disk_table[table_index].properties); 4421 break; 4422 } 4423 } 4424 } 4425 4426 4427 /* 4428 * Function: sd_sdconf_id_match 4429 * 4430 * Description: This local function implements a case sensitive vid/pid 4431 * comparison as well as the boundary cases of wild card and 4432 * multiple blanks. 4433 * 4434 * Note: An implicit assumption made here is that the scsi 4435 * inquiry structure will always keep the vid, pid and 4436 * revision strings in consecutive sequence, so they can be 4437 * read as a single string. If this assumption is not the 4438 * case, a separate string, to be used for the check, needs 4439 * to be built with these strings concatenated. 4440 * 4441 * Arguments: un - driver soft state (unit) structure 4442 * id - table or config file vid/pid 4443 * idlen - length of the vid/pid (bytes) 4444 * 4445 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4446 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4447 */ 4448 4449 static int 4450 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4451 { 4452 struct scsi_inquiry *sd_inq; 4453 int rval = SD_SUCCESS; 4454 4455 ASSERT(un != NULL); 4456 sd_inq = un->un_sd->sd_inq; 4457 ASSERT(id != NULL); 4458 4459 /* 4460 * We use the inq_vid as a pointer to a buffer containing the 4461 * vid and pid and use the entire vid/pid length of the table 4462 * entry for the comparison. This works because the inq_pid 4463 * data member follows inq_vid in the scsi_inquiry structure. 4464 */ 4465 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4466 /* 4467 * The user id string is compared to the inquiry vid/pid 4468 * using a case insensitive comparison and ignoring 4469 * multiple spaces. 4470 */ 4471 rval = sd_blank_cmp(un, id, idlen); 4472 if (rval != SD_SUCCESS) { 4473 /* 4474 * User id strings that start and end with a "*" 4475 * are a special case. These do not have a 4476 * specific vendor, and the product string can 4477 * appear anywhere in the 16 byte PID portion of 4478 * the inquiry data. This is a simple strstr() 4479 * type search for the user id in the inquiry data. 4480 */ 4481 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4482 char *pidptr = &id[1]; 4483 int i; 4484 int j; 4485 int pidstrlen = idlen - 2; 4486 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4487 pidstrlen; 4488 4489 if (j < 0) { 4490 return (SD_FAILURE); 4491 } 4492 for (i = 0; i < j; i++) { 4493 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4494 pidptr, pidstrlen) == 0) { 4495 rval = SD_SUCCESS; 4496 break; 4497 } 4498 } 4499 } 4500 } 4501 } 4502 return (rval); 4503 } 4504 4505 4506 /* 4507 * Function: sd_blank_cmp 4508 * 4509 * Description: If the id string starts and ends with a space, treat 4510 * multiple consecutive spaces as equivalent to a single 4511 * space. For example, this causes a sd_disk_table entry 4512 * of " NEC CDROM " to match a device's id string of 4513 * "NEC CDROM". 4514 * 4515 * Note: The success exit condition for this routine is if 4516 * the pointer to the table entry is '\0' and the cnt of 4517 * the inquiry length is zero. This will happen if the inquiry 4518 * string returned by the device is padded with spaces to be 4519 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4520 * SCSI spec states that the inquiry string is to be padded with 4521 * spaces. 4522 * 4523 * Arguments: un - driver soft state (unit) structure 4524 * id - table or config file vid/pid 4525 * idlen - length of the vid/pid (bytes) 4526 * 4527 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4528 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4529 */ 4530 4531 static int 4532 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4533 { 4534 char *p1; 4535 char *p2; 4536 int cnt; 4537 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4538 sizeof (SD_INQUIRY(un)->inq_pid); 4539 4540 ASSERT(un != NULL); 4541 p2 = un->un_sd->sd_inq->inq_vid; 4542 ASSERT(id != NULL); 4543 p1 = id; 4544 4545 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4546 /* 4547 * Note: string p1 is terminated by a NUL but string p2 4548 * isn't. The end of p2 is determined by cnt. 4549 */ 4550 for (;;) { 4551 /* skip over any extra blanks in both strings */ 4552 while ((*p1 != '\0') && (*p1 == ' ')) { 4553 p1++; 4554 } 4555 while ((cnt != 0) && (*p2 == ' ')) { 4556 p2++; 4557 cnt--; 4558 } 4559 4560 /* compare the two strings */ 4561 if ((cnt == 0) || 4562 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4563 break; 4564 } 4565 while ((cnt > 0) && 4566 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4567 p1++; 4568 p2++; 4569 cnt--; 4570 } 4571 } 4572 } 4573 4574 /* return SD_SUCCESS if both strings match */ 4575 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4576 } 4577 4578 4579 /* 4580 * Function: sd_chk_vers1_data 4581 * 4582 * Description: Verify the version 1 device properties provided by the 4583 * user via the configuration file 4584 * 4585 * Arguments: un - driver soft state (unit) structure 4586 * flags - integer mask indicating properties to be set 4587 * prop_list - integer list of property values 4588 * list_len - number of the elements 4589 * 4590 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4591 * SD_FAILURE - Indicates the user provided data is invalid 4592 */ 4593 4594 static int 4595 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4596 int list_len, char *dataname_ptr) 4597 { 4598 int i; 4599 int mask = 1; 4600 int index = 0; 4601 4602 ASSERT(un != NULL); 4603 4604 /* Check for a NULL property name and list */ 4605 if (dataname_ptr == NULL) { 4606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4607 "sd_chk_vers1_data: NULL data property name."); 4608 return (SD_FAILURE); 4609 } 4610 if (prop_list == NULL) { 4611 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4612 "sd_chk_vers1_data: %s NULL data property list.", 4613 dataname_ptr); 4614 return (SD_FAILURE); 4615 } 4616 4617 /* Display a warning if undefined bits are set in the flags */ 4618 if (flags & ~SD_CONF_BIT_MASK) { 4619 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4620 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4621 "Properties not set.", 4622 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4623 return (SD_FAILURE); 4624 } 4625 4626 /* 4627 * Verify the length of the list by identifying the highest bit set 4628 * in the flags and validating that the property list has a length 4629 * up to the index of this bit. 4630 */ 4631 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4632 if (flags & mask) { 4633 index++; 4634 } 4635 mask = 1 << i; 4636 } 4637 if (list_len < (index + 2)) { 4638 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4639 "sd_chk_vers1_data: " 4640 "Data property list %s size is incorrect. " 4641 "Properties not set.", dataname_ptr); 4642 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4643 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4644 return (SD_FAILURE); 4645 } 4646 return (SD_SUCCESS); 4647 } 4648 4649 4650 /* 4651 * Function: sd_set_vers1_properties 4652 * 4653 * Description: Set version 1 device properties based on a property list 4654 * retrieved from the driver configuration file or static 4655 * configuration table. Version 1 properties have the format: 4656 * 4657 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4658 * 4659 * where the prop0 value will be used to set prop0 if bit0 4660 * is set in the flags 4661 * 4662 * Arguments: un - driver soft state (unit) structure 4663 * flags - integer mask indicating properties to be set 4664 * prop_list - integer list of property values 4665 */ 4666 4667 static void 4668 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4669 { 4670 ASSERT(un != NULL); 4671 4672 /* 4673 * Set the flag to indicate cache is to be disabled. An attempt 4674 * to disable the cache via sd_cache_control() will be made 4675 * later during attach once the basic initialization is complete. 4676 */ 4677 if (flags & SD_CONF_BSET_NOCACHE) { 4678 un->un_f_opt_disable_cache = TRUE; 4679 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4680 "sd_set_vers1_properties: caching disabled flag set\n"); 4681 } 4682 4683 /* CD-specific configuration parameters */ 4684 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4685 un->un_f_cfg_playmsf_bcd = TRUE; 4686 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4687 "sd_set_vers1_properties: playmsf_bcd set\n"); 4688 } 4689 if (flags & SD_CONF_BSET_READSUB_BCD) { 4690 un->un_f_cfg_readsub_bcd = TRUE; 4691 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4692 "sd_set_vers1_properties: readsub_bcd set\n"); 4693 } 4694 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4695 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4696 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4697 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4698 } 4699 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4700 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4701 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4702 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4703 } 4704 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4705 un->un_f_cfg_no_read_header = TRUE; 4706 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4707 "sd_set_vers1_properties: no_read_header set\n"); 4708 } 4709 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4710 un->un_f_cfg_read_cd_xd4 = TRUE; 4711 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4712 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4713 } 4714 4715 /* Support for devices which do not have valid/unique serial numbers */ 4716 if (flags & SD_CONF_BSET_FAB_DEVID) { 4717 un->un_f_opt_fab_devid = TRUE; 4718 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4719 "sd_set_vers1_properties: fab_devid bit set\n"); 4720 } 4721 4722 /* Support for user throttle configuration */ 4723 if (flags & SD_CONF_BSET_THROTTLE) { 4724 ASSERT(prop_list != NULL); 4725 un->un_saved_throttle = un->un_throttle = 4726 prop_list->sdt_throttle; 4727 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4728 "sd_set_vers1_properties: throttle set to %d\n", 4729 prop_list->sdt_throttle); 4730 } 4731 4732 /* Set the per disk retry count according to the conf file or table. */ 4733 if (flags & SD_CONF_BSET_NRR_COUNT) { 4734 ASSERT(prop_list != NULL); 4735 if (prop_list->sdt_not_rdy_retries) { 4736 un->un_notready_retry_count = 4737 prop_list->sdt_not_rdy_retries; 4738 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4739 "sd_set_vers1_properties: not ready retry count" 4740 " set to %d\n", un->un_notready_retry_count); 4741 } 4742 } 4743 4744 /* The controller type is reported for generic disk driver ioctls */ 4745 if (flags & SD_CONF_BSET_CTYPE) { 4746 ASSERT(prop_list != NULL); 4747 switch (prop_list->sdt_ctype) { 4748 case CTYPE_CDROM: 4749 un->un_ctype = prop_list->sdt_ctype; 4750 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4751 "sd_set_vers1_properties: ctype set to " 4752 "CTYPE_CDROM\n"); 4753 break; 4754 case CTYPE_CCS: 4755 un->un_ctype = prop_list->sdt_ctype; 4756 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4757 "sd_set_vers1_properties: ctype set to " 4758 "CTYPE_CCS\n"); 4759 break; 4760 case CTYPE_ROD: /* RW optical */ 4761 un->un_ctype = prop_list->sdt_ctype; 4762 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4763 "sd_set_vers1_properties: ctype set to " 4764 "CTYPE_ROD\n"); 4765 break; 4766 default: 4767 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4768 "sd_set_vers1_properties: Could not set " 4769 "invalid ctype value (%d)", 4770 prop_list->sdt_ctype); 4771 } 4772 } 4773 4774 /* Purple failover timeout */ 4775 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4776 ASSERT(prop_list != NULL); 4777 un->un_busy_retry_count = 4778 prop_list->sdt_busy_retries; 4779 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4780 "sd_set_vers1_properties: " 4781 "busy retry count set to %d\n", 4782 un->un_busy_retry_count); 4783 } 4784 4785 /* Purple reset retry count */ 4786 if (flags & SD_CONF_BSET_RST_RETRIES) { 4787 ASSERT(prop_list != NULL); 4788 un->un_reset_retry_count = 4789 prop_list->sdt_reset_retries; 4790 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4791 "sd_set_vers1_properties: " 4792 "reset retry count set to %d\n", 4793 un->un_reset_retry_count); 4794 } 4795 4796 /* Purple reservation release timeout */ 4797 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4798 ASSERT(prop_list != NULL); 4799 un->un_reserve_release_time = 4800 prop_list->sdt_reserv_rel_time; 4801 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4802 "sd_set_vers1_properties: " 4803 "reservation release timeout set to %d\n", 4804 un->un_reserve_release_time); 4805 } 4806 4807 /* 4808 * Driver flag telling the driver to verify that no commands are pending 4809 * for a device before issuing a Test Unit Ready. This is a workaround 4810 * for a firmware bug in some Seagate eliteI drives. 4811 */ 4812 if (flags & SD_CONF_BSET_TUR_CHECK) { 4813 un->un_f_cfg_tur_check = TRUE; 4814 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4815 "sd_set_vers1_properties: tur queue check set\n"); 4816 } 4817 4818 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4819 un->un_min_throttle = prop_list->sdt_min_throttle; 4820 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4821 "sd_set_vers1_properties: min throttle set to %d\n", 4822 un->un_min_throttle); 4823 } 4824 4825 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4826 un->un_f_disksort_disabled = 4827 (prop_list->sdt_disk_sort_dis != 0) ? 4828 TRUE : FALSE; 4829 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4830 "sd_set_vers1_properties: disksort disabled " 4831 "flag set to %d\n", 4832 prop_list->sdt_disk_sort_dis); 4833 } 4834 4835 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4836 un->un_f_lun_reset_enabled = 4837 (prop_list->sdt_lun_reset_enable != 0) ? 4838 TRUE : FALSE; 4839 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4840 "sd_set_vers1_properties: lun reset enabled " 4841 "flag set to %d\n", 4842 prop_list->sdt_lun_reset_enable); 4843 } 4844 4845 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4846 un->un_f_suppress_cache_flush = 4847 (prop_list->sdt_suppress_cache_flush != 0) ? 4848 TRUE : FALSE; 4849 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4850 "sd_set_vers1_properties: suppress_cache_flush " 4851 "flag set to %d\n", 4852 prop_list->sdt_suppress_cache_flush); 4853 } 4854 4855 if (flags & SD_CONF_BSET_PC_DISABLED) { 4856 un->un_f_power_condition_disabled = 4857 (prop_list->sdt_power_condition_dis != 0) ? 4858 TRUE : FALSE; 4859 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4860 "sd_set_vers1_properties: power_condition_disabled " 4861 "flag set to %d\n", 4862 prop_list->sdt_power_condition_dis); 4863 } 4864 4865 /* 4866 * Validate the throttle values. 4867 * If any of the numbers are invalid, set everything to defaults. 4868 */ 4869 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4870 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4871 (un->un_min_throttle > un->un_throttle)) { 4872 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4873 un->un_min_throttle = sd_min_throttle; 4874 } 4875 } 4876 4877 /* 4878 * Function: sd_is_lsi() 4879 * 4880 * Description: Check for lsi devices, step through the static device 4881 * table to match vid/pid. 4882 * 4883 * Args: un - ptr to sd_lun 4884 * 4885 * Notes: When creating new LSI property, need to add the new LSI property 4886 * to this function. 4887 */ 4888 static void 4889 sd_is_lsi(struct sd_lun *un) 4890 { 4891 char *id = NULL; 4892 int table_index; 4893 int idlen; 4894 void *prop; 4895 4896 ASSERT(un != NULL); 4897 for (table_index = 0; table_index < sd_disk_table_size; 4898 table_index++) { 4899 id = sd_disk_table[table_index].device_id; 4900 idlen = strlen(id); 4901 if (idlen == 0) { 4902 continue; 4903 } 4904 4905 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4906 prop = sd_disk_table[table_index].properties; 4907 if (prop == &lsi_properties || 4908 prop == &lsi_oem_properties || 4909 prop == &lsi_properties_scsi || 4910 prop == &symbios_properties) { 4911 un->un_f_cfg_is_lsi = TRUE; 4912 } 4913 break; 4914 } 4915 } 4916 } 4917 4918 /* 4919 * Function: sd_get_physical_geometry 4920 * 4921 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4922 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4923 * target, and use this information to initialize the physical 4924 * geometry cache specified by pgeom_p. 4925 * 4926 * MODE SENSE is an optional command, so failure in this case 4927 * does not necessarily denote an error. We want to use the 4928 * MODE SENSE commands to derive the physical geometry of the 4929 * device, but if either command fails, the logical geometry is 4930 * used as the fallback for disk label geometry in cmlb. 4931 * 4932 * This requires that un->un_blockcount and un->un_tgt_blocksize 4933 * have already been initialized for the current target and 4934 * that the current values be passed as args so that we don't 4935 * end up ever trying to use -1 as a valid value. This could 4936 * happen if either value is reset while we're not holding 4937 * the mutex. 4938 * 4939 * Arguments: un - driver soft state (unit) structure 4940 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4941 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4942 * to use the USCSI "direct" chain and bypass the normal 4943 * command waitq. 4944 * 4945 * Context: Kernel thread only (can sleep). 4946 */ 4947 4948 static int 4949 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4950 diskaddr_t capacity, int lbasize, int path_flag) 4951 { 4952 struct mode_format *page3p; 4953 struct mode_geometry *page4p; 4954 struct mode_header *headerp; 4955 int sector_size; 4956 int nsect; 4957 int nhead; 4958 int ncyl; 4959 int intrlv; 4960 int spc; 4961 diskaddr_t modesense_capacity; 4962 int rpm; 4963 int bd_len; 4964 int mode_header_length; 4965 uchar_t *p3bufp; 4966 uchar_t *p4bufp; 4967 int cdbsize; 4968 int ret = EIO; 4969 sd_ssc_t *ssc; 4970 int status; 4971 4972 ASSERT(un != NULL); 4973 4974 if (lbasize == 0) { 4975 if (ISCD(un)) { 4976 lbasize = 2048; 4977 } else { 4978 lbasize = un->un_sys_blocksize; 4979 } 4980 } 4981 pgeom_p->g_secsize = (unsigned short)lbasize; 4982 4983 /* 4984 * If the unit is a cd/dvd drive MODE SENSE page three 4985 * and MODE SENSE page four are reserved (see SBC spec 4986 * and MMC spec). To prevent soft errors just return 4987 * using the default LBA size. 4988 * 4989 * Since SATA MODE SENSE function (sata_txlt_mode_sense()) does not 4990 * implement support for mode pages 3 and 4 return here to prevent 4991 * illegal requests on SATA drives. 4992 * 4993 * These pages are also reserved in SBC-2 and later. We assume SBC-2 4994 * or later for a direct-attached block device if the SCSI version is 4995 * at least SPC-3. 4996 */ 4997 4998 if (ISCD(un) || 4999 un->un_interconnect_type == SD_INTERCONNECT_SATA || 5000 (un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5)) 5001 return (ret); 5002 5003 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 5004 5005 /* 5006 * Retrieve MODE SENSE page 3 - Format Device Page 5007 */ 5008 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 5009 ssc = sd_ssc_init(un); 5010 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 5011 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 5012 if (status != 0) { 5013 SD_ERROR(SD_LOG_COMMON, un, 5014 "sd_get_physical_geometry: mode sense page 3 failed\n"); 5015 goto page3_exit; 5016 } 5017 5018 /* 5019 * Determine size of Block Descriptors in order to locate the mode 5020 * page data. ATAPI devices return 0, SCSI devices should return 5021 * MODE_BLK_DESC_LENGTH. 5022 */ 5023 headerp = (struct mode_header *)p3bufp; 5024 if (un->un_f_cfg_is_atapi == TRUE) { 5025 struct mode_header_grp2 *mhp = 5026 (struct mode_header_grp2 *)headerp; 5027 mode_header_length = MODE_HEADER_LENGTH_GRP2; 5028 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5029 } else { 5030 mode_header_length = MODE_HEADER_LENGTH; 5031 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5032 } 5033 5034 if (bd_len > MODE_BLK_DESC_LENGTH) { 5035 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5036 "sd_get_physical_geometry: received unexpected bd_len " 5037 "of %d, page3\n", bd_len); 5038 status = EIO; 5039 goto page3_exit; 5040 } 5041 5042 page3p = (struct mode_format *) 5043 ((caddr_t)headerp + mode_header_length + bd_len); 5044 5045 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 5046 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5047 "sd_get_physical_geometry: mode sense pg3 code mismatch " 5048 "%d\n", page3p->mode_page.code); 5049 status = EIO; 5050 goto page3_exit; 5051 } 5052 5053 /* 5054 * Use this physical geometry data only if BOTH MODE SENSE commands 5055 * complete successfully; otherwise, revert to the logical geometry. 5056 * So, we need to save everything in temporary variables. 5057 */ 5058 sector_size = BE_16(page3p->data_bytes_sect); 5059 5060 /* 5061 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 5062 */ 5063 if (sector_size == 0) { 5064 sector_size = un->un_sys_blocksize; 5065 } else { 5066 sector_size &= ~(un->un_sys_blocksize - 1); 5067 } 5068 5069 nsect = BE_16(page3p->sect_track); 5070 intrlv = BE_16(page3p->interleave); 5071 5072 SD_INFO(SD_LOG_COMMON, un, 5073 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 5074 SD_INFO(SD_LOG_COMMON, un, 5075 " mode page: %d; nsect: %d; sector size: %d;\n", 5076 page3p->mode_page.code, nsect, sector_size); 5077 SD_INFO(SD_LOG_COMMON, un, 5078 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 5079 BE_16(page3p->track_skew), 5080 BE_16(page3p->cylinder_skew)); 5081 5082 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5083 5084 /* 5085 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 5086 */ 5087 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 5088 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 5089 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 5090 if (status != 0) { 5091 SD_ERROR(SD_LOG_COMMON, un, 5092 "sd_get_physical_geometry: mode sense page 4 failed\n"); 5093 goto page4_exit; 5094 } 5095 5096 /* 5097 * Determine size of Block Descriptors in order to locate the mode 5098 * page data. ATAPI devices return 0, SCSI devices should return 5099 * MODE_BLK_DESC_LENGTH. 5100 */ 5101 headerp = (struct mode_header *)p4bufp; 5102 if (un->un_f_cfg_is_atapi == TRUE) { 5103 struct mode_header_grp2 *mhp = 5104 (struct mode_header_grp2 *)headerp; 5105 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5106 } else { 5107 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5108 } 5109 5110 if (bd_len > MODE_BLK_DESC_LENGTH) { 5111 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5112 "sd_get_physical_geometry: received unexpected bd_len of " 5113 "%d, page4\n", bd_len); 5114 status = EIO; 5115 goto page4_exit; 5116 } 5117 5118 page4p = (struct mode_geometry *) 5119 ((caddr_t)headerp + mode_header_length + bd_len); 5120 5121 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 5122 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5123 "sd_get_physical_geometry: mode sense pg4 code mismatch " 5124 "%d\n", page4p->mode_page.code); 5125 status = EIO; 5126 goto page4_exit; 5127 } 5128 5129 /* 5130 * Stash the data now, after we know that both commands completed. 5131 */ 5132 5133 5134 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 5135 spc = nhead * nsect; 5136 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 5137 rpm = BE_16(page4p->rpm); 5138 5139 modesense_capacity = spc * ncyl; 5140 5141 SD_INFO(SD_LOG_COMMON, un, 5142 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 5143 SD_INFO(SD_LOG_COMMON, un, 5144 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 5145 SD_INFO(SD_LOG_COMMON, un, 5146 " computed capacity(h*s*c): %d;\n", modesense_capacity); 5147 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 5148 (void *)pgeom_p, capacity); 5149 5150 /* 5151 * Compensate if the drive's geometry is not rectangular, i.e., 5152 * the product of C * H * S returned by MODE SENSE >= that returned 5153 * by read capacity. This is an idiosyncrasy of the original x86 5154 * disk subsystem. 5155 */ 5156 if (modesense_capacity >= capacity) { 5157 SD_INFO(SD_LOG_COMMON, un, 5158 "sd_get_physical_geometry: adjusting acyl; " 5159 "old: %d; new: %d\n", pgeom_p->g_acyl, 5160 (modesense_capacity - capacity + spc - 1) / spc); 5161 if (sector_size != 0) { 5162 /* 1243403: NEC D38x7 drives don't support sec size */ 5163 pgeom_p->g_secsize = (unsigned short)sector_size; 5164 } 5165 pgeom_p->g_nsect = (unsigned short)nsect; 5166 pgeom_p->g_nhead = (unsigned short)nhead; 5167 pgeom_p->g_capacity = capacity; 5168 pgeom_p->g_acyl = 5169 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5170 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5171 } 5172 5173 pgeom_p->g_rpm = (unsigned short)rpm; 5174 pgeom_p->g_intrlv = (unsigned short)intrlv; 5175 ret = 0; 5176 5177 SD_INFO(SD_LOG_COMMON, un, 5178 "sd_get_physical_geometry: mode sense geometry:\n"); 5179 SD_INFO(SD_LOG_COMMON, un, 5180 " nsect: %d; sector size: %d; interlv: %d\n", 5181 nsect, sector_size, intrlv); 5182 SD_INFO(SD_LOG_COMMON, un, 5183 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5184 nhead, ncyl, rpm, modesense_capacity); 5185 SD_INFO(SD_LOG_COMMON, un, 5186 "sd_get_physical_geometry: (cached)\n"); 5187 SD_INFO(SD_LOG_COMMON, un, 5188 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5189 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5190 pgeom_p->g_nhead, pgeom_p->g_nsect); 5191 SD_INFO(SD_LOG_COMMON, un, 5192 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5193 pgeom_p->g_secsize, pgeom_p->g_capacity, 5194 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5195 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5196 5197 page4_exit: 5198 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5199 5200 page3_exit: 5201 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5202 5203 if (status != 0) { 5204 if (status == EIO) { 5205 /* 5206 * Some disks do not support mode sense(6), we 5207 * should ignore this kind of error(sense key is 5208 * 0x5 - illegal request). 5209 */ 5210 uint8_t *sensep; 5211 int senlen; 5212 5213 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5214 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5215 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5216 5217 if (senlen > 0 && 5218 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5219 sd_ssc_assessment(ssc, 5220 SD_FMT_IGNORE_COMPROMISE); 5221 } else { 5222 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5223 } 5224 } else { 5225 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5226 } 5227 } 5228 sd_ssc_fini(ssc); 5229 return (ret); 5230 } 5231 5232 /* 5233 * Function: sd_get_virtual_geometry 5234 * 5235 * Description: Ask the controller to tell us about the target device. 5236 * 5237 * Arguments: un - pointer to softstate 5238 * capacity - disk capacity in #blocks 5239 * lbasize - disk block size in bytes 5240 * 5241 * Context: Kernel thread only 5242 */ 5243 5244 static int 5245 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5246 diskaddr_t capacity, int lbasize) 5247 { 5248 uint_t geombuf; 5249 int spc; 5250 5251 ASSERT(un != NULL); 5252 5253 /* Set sector size, and total number of sectors */ 5254 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5255 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5256 5257 /* Let the HBA tell us its geometry */ 5258 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5259 5260 /* A value of -1 indicates an undefined "geometry" property */ 5261 if (geombuf == (-1)) { 5262 return (EINVAL); 5263 } 5264 5265 /* Initialize the logical geometry cache. */ 5266 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5267 lgeom_p->g_nsect = geombuf & 0xffff; 5268 lgeom_p->g_secsize = un->un_sys_blocksize; 5269 5270 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5271 5272 /* 5273 * Note: The driver originally converted the capacity value from 5274 * target blocks to system blocks. However, the capacity value passed 5275 * to this routine is already in terms of system blocks (this scaling 5276 * is done when the READ CAPACITY command is issued and processed). 5277 * This 'error' may have gone undetected because the usage of g_ncyl 5278 * (which is based upon g_capacity) is very limited within the driver 5279 */ 5280 lgeom_p->g_capacity = capacity; 5281 5282 /* 5283 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5284 * hba may return zero values if the device has been removed. 5285 */ 5286 if (spc == 0) { 5287 lgeom_p->g_ncyl = 0; 5288 } else { 5289 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5290 } 5291 lgeom_p->g_acyl = 0; 5292 5293 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5294 return (0); 5295 5296 } 5297 /* 5298 * Function: sd_update_block_info 5299 * 5300 * Description: Calculate a byte count to sector count bitshift value 5301 * from sector size. 5302 * 5303 * Arguments: un: unit struct. 5304 * lbasize: new target sector size 5305 * capacity: new target capacity, ie. block count 5306 * 5307 * Context: Kernel thread context 5308 */ 5309 5310 static void 5311 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5312 { 5313 if (lbasize != 0) { 5314 un->un_tgt_blocksize = lbasize; 5315 un->un_f_tgt_blocksize_is_valid = TRUE; 5316 if (!un->un_f_has_removable_media) { 5317 un->un_sys_blocksize = lbasize; 5318 } 5319 } 5320 5321 if (capacity != 0) { 5322 un->un_blockcount = capacity; 5323 un->un_f_blockcount_is_valid = TRUE; 5324 5325 /* 5326 * The capacity has changed so update the errstats. 5327 */ 5328 if (un->un_errstats != NULL) { 5329 struct sd_errstats *stp; 5330 5331 capacity *= un->un_sys_blocksize; 5332 stp = (struct sd_errstats *)un->un_errstats->ks_data; 5333 if (stp->sd_capacity.value.ui64 < capacity) 5334 stp->sd_capacity.value.ui64 = capacity; 5335 } 5336 } 5337 } 5338 5339 5340 /* 5341 * Function: sd_register_devid 5342 * 5343 * Description: This routine will obtain the device id information from the 5344 * target, obtain the serial number, and register the device 5345 * id with the ddi framework. 5346 * 5347 * Arguments: devi - the system's dev_info_t for the device. 5348 * un - driver soft state (unit) structure 5349 * reservation_flag - indicates if a reservation conflict 5350 * occurred during attach 5351 * 5352 * Context: Kernel Thread 5353 */ 5354 static void 5355 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5356 { 5357 int rval = 0; 5358 uchar_t *inq80 = NULL; 5359 size_t inq80_len = MAX_INQUIRY_SIZE; 5360 size_t inq80_resid = 0; 5361 uchar_t *inq83 = NULL; 5362 size_t inq83_len = MAX_INQUIRY_SIZE; 5363 size_t inq83_resid = 0; 5364 int dlen, len; 5365 char *sn; 5366 struct sd_lun *un; 5367 5368 ASSERT(ssc != NULL); 5369 un = ssc->ssc_un; 5370 ASSERT(un != NULL); 5371 ASSERT(mutex_owned(SD_MUTEX(un))); 5372 ASSERT((SD_DEVINFO(un)) == devi); 5373 5374 5375 /* 5376 * We check the availability of the World Wide Name (0x83) and Unit 5377 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5378 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5379 * 0x83 is available, that is the best choice. Our next choice is 5380 * 0x80. If neither are available, we munge the devid from the device 5381 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5382 * to fabricate a devid for non-Sun qualified disks. 5383 */ 5384 if (sd_check_vpd_page_support(ssc) == 0) { 5385 /* collect page 80 data if available */ 5386 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5387 5388 mutex_exit(SD_MUTEX(un)); 5389 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5390 5391 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5392 0x01, 0x80, &inq80_resid); 5393 5394 if (rval != 0) { 5395 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5396 kmem_free(inq80, inq80_len); 5397 inq80 = NULL; 5398 inq80_len = 0; 5399 } else if (ddi_prop_exists( 5400 DDI_DEV_T_NONE, SD_DEVINFO(un), 5401 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5402 INQUIRY_SERIAL_NO) == 0) { 5403 /* 5404 * If we don't already have a serial number 5405 * property, do quick verify of data returned 5406 * and define property. 5407 */ 5408 dlen = inq80_len - inq80_resid; 5409 len = (size_t)inq80[3]; 5410 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5411 /* 5412 * Ensure sn termination, skip leading 5413 * blanks, and create property 5414 * 'inquiry-serial-no'. 5415 */ 5416 sn = (char *)&inq80[4]; 5417 sn[len] = 0; 5418 while (*sn && (*sn == ' ')) 5419 sn++; 5420 if (*sn) { 5421 (void) ddi_prop_update_string( 5422 DDI_DEV_T_NONE, 5423 SD_DEVINFO(un), 5424 INQUIRY_SERIAL_NO, sn); 5425 } 5426 } 5427 } 5428 mutex_enter(SD_MUTEX(un)); 5429 } 5430 5431 /* collect page 83 data if available */ 5432 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5433 mutex_exit(SD_MUTEX(un)); 5434 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5435 5436 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5437 0x01, 0x83, &inq83_resid); 5438 5439 if (rval != 0) { 5440 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5441 kmem_free(inq83, inq83_len); 5442 inq83 = NULL; 5443 inq83_len = 0; 5444 } 5445 mutex_enter(SD_MUTEX(un)); 5446 } 5447 } 5448 5449 /* 5450 * If transport has already registered a devid for this target 5451 * then that takes precedence over the driver's determination 5452 * of the devid. 5453 * 5454 * NOTE: The reason this check is done here instead of at the beginning 5455 * of the function is to allow the code above to create the 5456 * 'inquiry-serial-no' property. 5457 */ 5458 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5459 ASSERT(un->un_devid); 5460 un->un_f_devid_transport_defined = TRUE; 5461 goto cleanup; /* use devid registered by the transport */ 5462 } 5463 5464 /* 5465 * This is the case of antiquated Sun disk drives that have the 5466 * FAB_DEVID property set in the disk_table. These drives 5467 * manage the devid's by storing them in last 2 available sectors 5468 * on the drive and have them fabricated by the ddi layer by calling 5469 * ddi_devid_init and passing the DEVID_FAB flag. 5470 */ 5471 if (un->un_f_opt_fab_devid == TRUE) { 5472 /* 5473 * Depending on EINVAL isn't reliable, since a reserved disk 5474 * may result in invalid geometry, so check to make sure a 5475 * reservation conflict did not occur during attach. 5476 */ 5477 if ((sd_get_devid(ssc) == EINVAL) && 5478 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5479 /* 5480 * The devid is invalid AND there is no reservation 5481 * conflict. Fabricate a new devid. 5482 */ 5483 (void) sd_create_devid(ssc); 5484 } 5485 5486 /* Register the devid if it exists */ 5487 if (un->un_devid != NULL) { 5488 (void) ddi_devid_register(SD_DEVINFO(un), 5489 un->un_devid); 5490 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5491 "sd_register_devid: Devid Fabricated\n"); 5492 } 5493 goto cleanup; 5494 } 5495 5496 /* encode best devid possible based on data available */ 5497 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5498 (char *)ddi_driver_name(SD_DEVINFO(un)), 5499 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5500 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5501 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5502 5503 /* devid successfully encoded, register devid */ 5504 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5505 5506 } else { 5507 /* 5508 * Unable to encode a devid based on data available. 5509 * This is not a Sun qualified disk. Older Sun disk 5510 * drives that have the SD_FAB_DEVID property 5511 * set in the disk_table and non Sun qualified 5512 * disks are treated in the same manner. These 5513 * drives manage the devid's by storing them in 5514 * last 2 available sectors on the drive and 5515 * have them fabricated by the ddi layer by 5516 * calling ddi_devid_init and passing the 5517 * DEVID_FAB flag. 5518 * Create a fabricate devid only if there's no 5519 * fabricate devid existed. 5520 */ 5521 if (sd_get_devid(ssc) == EINVAL) { 5522 (void) sd_create_devid(ssc); 5523 } 5524 un->un_f_opt_fab_devid = TRUE; 5525 5526 /* Register the devid if it exists */ 5527 if (un->un_devid != NULL) { 5528 (void) ddi_devid_register(SD_DEVINFO(un), 5529 un->un_devid); 5530 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5531 "sd_register_devid: devid fabricated using " 5532 "ddi framework\n"); 5533 } 5534 } 5535 5536 cleanup: 5537 /* clean up resources */ 5538 if (inq80 != NULL) { 5539 kmem_free(inq80, inq80_len); 5540 } 5541 if (inq83 != NULL) { 5542 kmem_free(inq83, inq83_len); 5543 } 5544 } 5545 5546 5547 5548 /* 5549 * Function: sd_get_devid 5550 * 5551 * Description: This routine will return 0 if a valid device id has been 5552 * obtained from the target and stored in the soft state. If a 5553 * valid device id has not been previously read and stored, a 5554 * read attempt will be made. 5555 * 5556 * Arguments: un - driver soft state (unit) structure 5557 * 5558 * Return Code: 0 if we successfully get the device id 5559 * 5560 * Context: Kernel Thread 5561 */ 5562 5563 static int 5564 sd_get_devid(sd_ssc_t *ssc) 5565 { 5566 struct dk_devid *dkdevid; 5567 ddi_devid_t tmpid; 5568 uint_t *ip; 5569 size_t sz; 5570 diskaddr_t blk; 5571 int status; 5572 int chksum; 5573 int i; 5574 size_t buffer_size; 5575 struct sd_lun *un; 5576 5577 ASSERT(ssc != NULL); 5578 un = ssc->ssc_un; 5579 ASSERT(un != NULL); 5580 ASSERT(mutex_owned(SD_MUTEX(un))); 5581 5582 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5583 un); 5584 5585 if (un->un_devid != NULL) { 5586 return (0); 5587 } 5588 5589 mutex_exit(SD_MUTEX(un)); 5590 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5591 (void *)SD_PATH_DIRECT) != 0) { 5592 mutex_enter(SD_MUTEX(un)); 5593 return (EINVAL); 5594 } 5595 5596 /* 5597 * Read and verify device id, stored in the reserved cylinders at the 5598 * end of the disk. Backup label is on the odd sectors of the last 5599 * track of the last cylinder. Device id will be on track of the next 5600 * to last cylinder. 5601 */ 5602 mutex_enter(SD_MUTEX(un)); 5603 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5604 mutex_exit(SD_MUTEX(un)); 5605 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5606 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5607 SD_PATH_DIRECT); 5608 5609 if (status != 0) { 5610 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5611 goto error; 5612 } 5613 5614 /* Validate the revision */ 5615 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5616 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5617 status = EINVAL; 5618 goto error; 5619 } 5620 5621 /* Calculate the checksum */ 5622 chksum = 0; 5623 ip = (uint_t *)dkdevid; 5624 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5625 i++) { 5626 chksum ^= ip[i]; 5627 } 5628 5629 /* Compare the checksums */ 5630 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5631 status = EINVAL; 5632 goto error; 5633 } 5634 5635 /* Validate the device id */ 5636 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5637 status = EINVAL; 5638 goto error; 5639 } 5640 5641 /* 5642 * Store the device id in the driver soft state 5643 */ 5644 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5645 tmpid = kmem_alloc(sz, KM_SLEEP); 5646 5647 mutex_enter(SD_MUTEX(un)); 5648 5649 un->un_devid = tmpid; 5650 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5651 5652 kmem_free(dkdevid, buffer_size); 5653 5654 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5655 5656 return (status); 5657 error: 5658 mutex_enter(SD_MUTEX(un)); 5659 kmem_free(dkdevid, buffer_size); 5660 return (status); 5661 } 5662 5663 5664 /* 5665 * Function: sd_create_devid 5666 * 5667 * Description: This routine will fabricate the device id and write it 5668 * to the disk. 5669 * 5670 * Arguments: un - driver soft state (unit) structure 5671 * 5672 * Return Code: value of the fabricated device id 5673 * 5674 * Context: Kernel Thread 5675 */ 5676 5677 static ddi_devid_t 5678 sd_create_devid(sd_ssc_t *ssc) 5679 { 5680 struct sd_lun *un; 5681 5682 ASSERT(ssc != NULL); 5683 un = ssc->ssc_un; 5684 ASSERT(un != NULL); 5685 5686 /* Fabricate the devid */ 5687 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5688 == DDI_FAILURE) { 5689 return (NULL); 5690 } 5691 5692 /* Write the devid to disk */ 5693 if (sd_write_deviceid(ssc) != 0) { 5694 ddi_devid_free(un->un_devid); 5695 un->un_devid = NULL; 5696 } 5697 5698 return (un->un_devid); 5699 } 5700 5701 5702 /* 5703 * Function: sd_write_deviceid 5704 * 5705 * Description: This routine will write the device id to the disk 5706 * reserved sector. 5707 * 5708 * Arguments: un - driver soft state (unit) structure 5709 * 5710 * Return Code: EINVAL 5711 * value returned by sd_send_scsi_cmd 5712 * 5713 * Context: Kernel Thread 5714 */ 5715 5716 static int 5717 sd_write_deviceid(sd_ssc_t *ssc) 5718 { 5719 struct dk_devid *dkdevid; 5720 uchar_t *buf; 5721 diskaddr_t blk; 5722 uint_t *ip, chksum; 5723 int status; 5724 int i; 5725 struct sd_lun *un; 5726 5727 ASSERT(ssc != NULL); 5728 un = ssc->ssc_un; 5729 ASSERT(un != NULL); 5730 ASSERT(mutex_owned(SD_MUTEX(un))); 5731 5732 mutex_exit(SD_MUTEX(un)); 5733 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5734 (void *)SD_PATH_DIRECT) != 0) { 5735 mutex_enter(SD_MUTEX(un)); 5736 return (-1); 5737 } 5738 5739 5740 /* Allocate the buffer */ 5741 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5742 dkdevid = (struct dk_devid *)buf; 5743 5744 /* Fill in the revision */ 5745 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5746 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5747 5748 /* Copy in the device id */ 5749 mutex_enter(SD_MUTEX(un)); 5750 bcopy(un->un_devid, &dkdevid->dkd_devid, 5751 ddi_devid_sizeof(un->un_devid)); 5752 mutex_exit(SD_MUTEX(un)); 5753 5754 /* Calculate the checksum */ 5755 chksum = 0; 5756 ip = (uint_t *)dkdevid; 5757 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5758 i++) { 5759 chksum ^= ip[i]; 5760 } 5761 5762 /* Fill-in checksum */ 5763 DKD_FORMCHKSUM(chksum, dkdevid); 5764 5765 /* Write the reserved sector */ 5766 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5767 SD_PATH_DIRECT); 5768 if (status != 0) 5769 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5770 5771 kmem_free(buf, un->un_sys_blocksize); 5772 5773 mutex_enter(SD_MUTEX(un)); 5774 return (status); 5775 } 5776 5777 5778 /* 5779 * Function: sd_check_vpd_page_support 5780 * 5781 * Description: This routine sends an inquiry command with the EVPD bit set and 5782 * a page code of 0x00 to the device. It is used to determine which 5783 * vital product pages are available to find the devid. We are 5784 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1, 5785 * the device does not support that command. 5786 * 5787 * Arguments: un - driver soft state (unit) structure 5788 * 5789 * Return Code: 0 - success 5790 * 1 - check condition 5791 * 5792 * Context: This routine can sleep. 5793 */ 5794 5795 static int 5796 sd_check_vpd_page_support(sd_ssc_t *ssc) 5797 { 5798 uchar_t *page_list = NULL; 5799 uchar_t page_length = 0xff; /* Use max possible length */ 5800 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5801 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5802 int rval = 0; 5803 int counter; 5804 struct sd_lun *un; 5805 5806 ASSERT(ssc != NULL); 5807 un = ssc->ssc_un; 5808 ASSERT(un != NULL); 5809 ASSERT(mutex_owned(SD_MUTEX(un))); 5810 5811 mutex_exit(SD_MUTEX(un)); 5812 5813 /* 5814 * We'll set the page length to the maximum to save figuring it out 5815 * with an additional call. 5816 */ 5817 page_list = kmem_zalloc(page_length, KM_SLEEP); 5818 5819 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5820 page_code, NULL); 5821 5822 if (rval != 0) 5823 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5824 5825 mutex_enter(SD_MUTEX(un)); 5826 5827 /* 5828 * Now we must validate that the device accepted the command, as some 5829 * drives do not support it. If the drive does support it, we will 5830 * return 0, and the supported pages will be in un_vpd_page_mask. If 5831 * not, we return -1. 5832 */ 5833 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5834 /* Loop to find one of the 2 pages we need */ 5835 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5836 5837 /* 5838 * Pages are returned in ascending order, and 0x83 is what we 5839 * are hoping for. 5840 */ 5841 while ((page_list[counter] <= 0xB1) && 5842 (counter <= (page_list[VPD_PAGE_LENGTH] + 5843 VPD_HEAD_OFFSET))) { 5844 /* 5845 * Add 3 because page_list[3] is the number of 5846 * pages minus 3 5847 */ 5848 5849 switch (page_list[counter]) { 5850 case 0x00: 5851 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5852 break; 5853 case 0x80: 5854 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5855 break; 5856 case 0x81: 5857 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5858 break; 5859 case 0x82: 5860 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5861 break; 5862 case 0x83: 5863 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5864 break; 5865 case 0x86: 5866 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5867 break; 5868 case 0xB1: 5869 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG; 5870 break; 5871 } 5872 counter++; 5873 } 5874 5875 } else { 5876 rval = -1; 5877 5878 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5879 "sd_check_vpd_page_support: This drive does not implement " 5880 "VPD pages.\n"); 5881 } 5882 5883 kmem_free(page_list, page_length); 5884 5885 return (rval); 5886 } 5887 5888 5889 /* 5890 * Function: sd_setup_pm 5891 * 5892 * Description: Initialize Power Management on the device 5893 * 5894 * Context: Kernel Thread 5895 */ 5896 5897 static void 5898 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5899 { 5900 uint_t log_page_size; 5901 uchar_t *log_page_data; 5902 int rval = 0; 5903 struct sd_lun *un; 5904 5905 ASSERT(ssc != NULL); 5906 un = ssc->ssc_un; 5907 ASSERT(un != NULL); 5908 5909 /* 5910 * Since we are called from attach, holding a mutex for 5911 * un is unnecessary. Because some of the routines called 5912 * from here require SD_MUTEX to not be held, assert this 5913 * right up front. 5914 */ 5915 ASSERT(!mutex_owned(SD_MUTEX(un))); 5916 /* 5917 * Since the sd device does not have the 'reg' property, 5918 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5919 * The following code is to tell cpr that this device 5920 * DOES need to be suspended and resumed. 5921 */ 5922 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5923 "pm-hardware-state", "needs-suspend-resume"); 5924 5925 /* 5926 * This complies with the new power management framework 5927 * for certain desktop machines. Create the pm_components 5928 * property as a string array property. 5929 * If un_f_pm_supported is TRUE, that means the disk 5930 * attached HBA has set the "pm-capable" property and 5931 * the value of this property is bigger than 0. 5932 */ 5933 if (un->un_f_pm_supported) { 5934 /* 5935 * not all devices have a motor, try it first. 5936 * some devices may return ILLEGAL REQUEST, some 5937 * will hang 5938 * The following START_STOP_UNIT is used to check if target 5939 * device has a motor. 5940 */ 5941 un->un_f_start_stop_supported = TRUE; 5942 5943 if (un->un_f_power_condition_supported) { 5944 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5945 SD_POWER_CONDITION, SD_TARGET_ACTIVE, 5946 SD_PATH_DIRECT); 5947 if (rval != 0) { 5948 un->un_f_power_condition_supported = FALSE; 5949 } 5950 } 5951 if (!un->un_f_power_condition_supported) { 5952 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5953 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); 5954 } 5955 if (rval != 0) { 5956 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5957 un->un_f_start_stop_supported = FALSE; 5958 } 5959 5960 /* 5961 * create pm properties anyways otherwise the parent can't 5962 * go to sleep 5963 */ 5964 un->un_f_pm_is_enabled = TRUE; 5965 (void) sd_create_pm_components(devi, un); 5966 5967 /* 5968 * If it claims that log sense is supported, check it out. 5969 */ 5970 if (un->un_f_log_sense_supported) { 5971 rval = sd_log_page_supported(ssc, 5972 START_STOP_CYCLE_PAGE); 5973 if (rval == 1) { 5974 /* Page found, use it. */ 5975 un->un_start_stop_cycle_page = 5976 START_STOP_CYCLE_PAGE; 5977 } else { 5978 /* 5979 * Page not found or log sense is not 5980 * supported. 5981 * Notice we do not check the old style 5982 * START_STOP_CYCLE_VU_PAGE because this 5983 * code path does not apply to old disks. 5984 */ 5985 un->un_f_log_sense_supported = FALSE; 5986 un->un_f_pm_log_sense_smart = FALSE; 5987 } 5988 } 5989 5990 return; 5991 } 5992 5993 /* 5994 * For the disk whose attached HBA has not set the "pm-capable" 5995 * property, check if it supports the power management. 5996 */ 5997 if (!un->un_f_log_sense_supported) { 5998 un->un_power_level = SD_SPINDLE_ON; 5999 un->un_f_pm_is_enabled = FALSE; 6000 return; 6001 } 6002 6003 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 6004 6005 #ifdef SDDEBUG 6006 if (sd_force_pm_supported) { 6007 /* Force a successful result */ 6008 rval = 1; 6009 } 6010 #endif 6011 6012 /* 6013 * If the start-stop cycle counter log page is not supported 6014 * or if the pm-capable property is set to be false (0), 6015 * then we should not create the pm_components property. 6016 */ 6017 if (rval == -1) { 6018 /* 6019 * Error. 6020 * Reading log sense failed, most likely this is 6021 * an older drive that does not support log sense. 6022 * If this fails auto-pm is not supported. 6023 */ 6024 un->un_power_level = SD_SPINDLE_ON; 6025 un->un_f_pm_is_enabled = FALSE; 6026 6027 } else if (rval == 0) { 6028 /* 6029 * Page not found. 6030 * The start stop cycle counter is implemented as page 6031 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6032 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6033 */ 6034 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 6035 /* 6036 * Page found, use this one. 6037 */ 6038 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6039 un->un_f_pm_is_enabled = TRUE; 6040 } else { 6041 /* 6042 * Error or page not found. 6043 * auto-pm is not supported for this device. 6044 */ 6045 un->un_power_level = SD_SPINDLE_ON; 6046 un->un_f_pm_is_enabled = FALSE; 6047 } 6048 } else { 6049 /* 6050 * Page found, use it. 6051 */ 6052 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6053 un->un_f_pm_is_enabled = TRUE; 6054 } 6055 6056 6057 if (un->un_f_pm_is_enabled == TRUE) { 6058 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6059 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6060 6061 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6062 log_page_size, un->un_start_stop_cycle_page, 6063 0x01, 0, SD_PATH_DIRECT); 6064 6065 if (rval != 0) { 6066 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6067 } 6068 6069 #ifdef SDDEBUG 6070 if (sd_force_pm_supported) { 6071 /* Force a successful result */ 6072 rval = 0; 6073 } 6074 #endif 6075 6076 /* 6077 * If the Log sense for Page( Start/stop cycle counter page) 6078 * succeeds, then power management is supported and we can 6079 * enable auto-pm. 6080 */ 6081 if (rval == 0) { 6082 (void) sd_create_pm_components(devi, un); 6083 } else { 6084 un->un_power_level = SD_SPINDLE_ON; 6085 un->un_f_pm_is_enabled = FALSE; 6086 } 6087 6088 kmem_free(log_page_data, log_page_size); 6089 } 6090 } 6091 6092 6093 /* 6094 * Function: sd_create_pm_components 6095 * 6096 * Description: Initialize PM property. 6097 * 6098 * Context: Kernel thread context 6099 */ 6100 6101 static void 6102 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6103 { 6104 ASSERT(!mutex_owned(SD_MUTEX(un))); 6105 6106 if (un->un_f_power_condition_supported) { 6107 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6108 "pm-components", sd_pwr_pc.pm_comp, 5) 6109 != DDI_PROP_SUCCESS) { 6110 un->un_power_level = SD_SPINDLE_ACTIVE; 6111 un->un_f_pm_is_enabled = FALSE; 6112 return; 6113 } 6114 } else { 6115 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6116 "pm-components", sd_pwr_ss.pm_comp, 3) 6117 != DDI_PROP_SUCCESS) { 6118 un->un_power_level = SD_SPINDLE_ON; 6119 un->un_f_pm_is_enabled = FALSE; 6120 return; 6121 } 6122 } 6123 /* 6124 * When components are initially created they are idle, 6125 * power up any non-removables. 6126 * Note: the return value of pm_raise_power can't be used 6127 * for determining if PM should be enabled for this device. 6128 * Even if you check the return values and remove this 6129 * property created above, the PM framework will not honor the 6130 * change after the first call to pm_raise_power. Hence, 6131 * removal of that property does not help if pm_raise_power 6132 * fails. In the case of removable media, the start/stop 6133 * will fail if the media is not present. 6134 */ 6135 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 6136 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) { 6137 mutex_enter(SD_MUTEX(un)); 6138 un->un_power_level = SD_PM_STATE_ACTIVE(un); 6139 mutex_enter(&un->un_pm_mutex); 6140 /* Set to on and not busy. */ 6141 un->un_pm_count = 0; 6142 } else { 6143 mutex_enter(SD_MUTEX(un)); 6144 un->un_power_level = SD_PM_STATE_STOPPED(un); 6145 mutex_enter(&un->un_pm_mutex); 6146 /* Set to off. */ 6147 un->un_pm_count = -1; 6148 } 6149 mutex_exit(&un->un_pm_mutex); 6150 mutex_exit(SD_MUTEX(un)); 6151 } 6152 6153 6154 /* 6155 * Function: sd_ddi_suspend 6156 * 6157 * Description: Performs system power-down operations. This includes 6158 * setting the drive state to indicate its suspended so 6159 * that no new commands will be accepted. Also, wait for 6160 * all commands that are in transport or queued to a timer 6161 * for retry to complete. All timeout threads are cancelled. 6162 * 6163 * Return Code: DDI_FAILURE or DDI_SUCCESS 6164 * 6165 * Context: Kernel thread context 6166 */ 6167 6168 static int 6169 sd_ddi_suspend(dev_info_t *devi) 6170 { 6171 struct sd_lun *un; 6172 clock_t wait_cmds_complete; 6173 6174 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6175 if (un == NULL) { 6176 return (DDI_FAILURE); 6177 } 6178 6179 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6180 6181 mutex_enter(SD_MUTEX(un)); 6182 6183 /* Return success if the device is already suspended. */ 6184 if (un->un_state == SD_STATE_SUSPENDED) { 6185 mutex_exit(SD_MUTEX(un)); 6186 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6187 "device already suspended, exiting\n"); 6188 return (DDI_SUCCESS); 6189 } 6190 6191 /* Return failure if the device is being used by HA */ 6192 if (un->un_resvd_status & 6193 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6194 mutex_exit(SD_MUTEX(un)); 6195 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6196 "device in use by HA, exiting\n"); 6197 return (DDI_FAILURE); 6198 } 6199 6200 /* 6201 * Return failure if the device is in a resource wait 6202 * or power changing state. 6203 */ 6204 if ((un->un_state == SD_STATE_RWAIT) || 6205 (un->un_state == SD_STATE_PM_CHANGING)) { 6206 mutex_exit(SD_MUTEX(un)); 6207 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6208 "device in resource wait state, exiting\n"); 6209 return (DDI_FAILURE); 6210 } 6211 6212 6213 un->un_save_state = un->un_last_state; 6214 New_state(un, SD_STATE_SUSPENDED); 6215 6216 /* 6217 * Wait for all commands that are in transport or queued to a timer 6218 * for retry to complete. 6219 * 6220 * While waiting, no new commands will be accepted or sent because of 6221 * the new state we set above. 6222 * 6223 * Wait till current operation has completed. If we are in the resource 6224 * wait state (with an intr outstanding) then we need to wait till the 6225 * intr completes and starts the next cmd. We want to wait for 6226 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6227 */ 6228 wait_cmds_complete = ddi_get_lbolt() + 6229 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6230 6231 while (un->un_ncmds_in_transport != 0) { 6232 /* 6233 * Fail if commands do not finish in the specified time. 6234 */ 6235 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6236 wait_cmds_complete) == -1) { 6237 /* 6238 * Undo the state changes made above. Everything 6239 * must go back to it's original value. 6240 */ 6241 Restore_state(un); 6242 un->un_last_state = un->un_save_state; 6243 /* Wake up any threads that might be waiting. */ 6244 cv_broadcast(&un->un_suspend_cv); 6245 mutex_exit(SD_MUTEX(un)); 6246 SD_ERROR(SD_LOG_IO_PM, un, 6247 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6248 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6249 return (DDI_FAILURE); 6250 } 6251 } 6252 6253 /* 6254 * Cancel SCSI watch thread and timeouts, if any are active 6255 */ 6256 6257 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6258 opaque_t temp_token = un->un_swr_token; 6259 mutex_exit(SD_MUTEX(un)); 6260 scsi_watch_suspend(temp_token); 6261 mutex_enter(SD_MUTEX(un)); 6262 } 6263 6264 if (un->un_reset_throttle_timeid != NULL) { 6265 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6266 un->un_reset_throttle_timeid = NULL; 6267 mutex_exit(SD_MUTEX(un)); 6268 (void) untimeout(temp_id); 6269 mutex_enter(SD_MUTEX(un)); 6270 } 6271 6272 if (un->un_dcvb_timeid != NULL) { 6273 timeout_id_t temp_id = un->un_dcvb_timeid; 6274 un->un_dcvb_timeid = NULL; 6275 mutex_exit(SD_MUTEX(un)); 6276 (void) untimeout(temp_id); 6277 mutex_enter(SD_MUTEX(un)); 6278 } 6279 6280 mutex_enter(&un->un_pm_mutex); 6281 if (un->un_pm_timeid != NULL) { 6282 timeout_id_t temp_id = un->un_pm_timeid; 6283 un->un_pm_timeid = NULL; 6284 mutex_exit(&un->un_pm_mutex); 6285 mutex_exit(SD_MUTEX(un)); 6286 (void) untimeout(temp_id); 6287 mutex_enter(SD_MUTEX(un)); 6288 } else { 6289 mutex_exit(&un->un_pm_mutex); 6290 } 6291 6292 if (un->un_rmw_msg_timeid != NULL) { 6293 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6294 un->un_rmw_msg_timeid = NULL; 6295 mutex_exit(SD_MUTEX(un)); 6296 (void) untimeout(temp_id); 6297 mutex_enter(SD_MUTEX(un)); 6298 } 6299 6300 if (un->un_retry_timeid != NULL) { 6301 timeout_id_t temp_id = un->un_retry_timeid; 6302 un->un_retry_timeid = NULL; 6303 mutex_exit(SD_MUTEX(un)); 6304 (void) untimeout(temp_id); 6305 mutex_enter(SD_MUTEX(un)); 6306 6307 if (un->un_retry_bp != NULL) { 6308 un->un_retry_bp->av_forw = un->un_waitq_headp; 6309 un->un_waitq_headp = un->un_retry_bp; 6310 if (un->un_waitq_tailp == NULL) { 6311 un->un_waitq_tailp = un->un_retry_bp; 6312 } 6313 un->un_retry_bp = NULL; 6314 un->un_retry_statp = NULL; 6315 } 6316 } 6317 6318 if (un->un_direct_priority_timeid != NULL) { 6319 timeout_id_t temp_id = un->un_direct_priority_timeid; 6320 un->un_direct_priority_timeid = NULL; 6321 mutex_exit(SD_MUTEX(un)); 6322 (void) untimeout(temp_id); 6323 mutex_enter(SD_MUTEX(un)); 6324 } 6325 6326 if (un->un_f_is_fibre == TRUE) { 6327 /* 6328 * Remove callbacks for insert and remove events 6329 */ 6330 if (un->un_insert_event != NULL) { 6331 mutex_exit(SD_MUTEX(un)); 6332 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6333 mutex_enter(SD_MUTEX(un)); 6334 un->un_insert_event = NULL; 6335 } 6336 6337 if (un->un_remove_event != NULL) { 6338 mutex_exit(SD_MUTEX(un)); 6339 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6340 mutex_enter(SD_MUTEX(un)); 6341 un->un_remove_event = NULL; 6342 } 6343 } 6344 6345 mutex_exit(SD_MUTEX(un)); 6346 6347 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6348 6349 return (DDI_SUCCESS); 6350 } 6351 6352 6353 /* 6354 * Function: sd_ddi_resume 6355 * 6356 * Description: Performs system power-up operations.. 6357 * 6358 * Return Code: DDI_SUCCESS 6359 * DDI_FAILURE 6360 * 6361 * Context: Kernel thread context 6362 */ 6363 6364 static int 6365 sd_ddi_resume(dev_info_t *devi) 6366 { 6367 struct sd_lun *un; 6368 6369 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6370 if (un == NULL) { 6371 return (DDI_FAILURE); 6372 } 6373 6374 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6375 6376 mutex_enter(SD_MUTEX(un)); 6377 Restore_state(un); 6378 6379 /* 6380 * Restore the state which was saved to give the 6381 * the right state in un_last_state 6382 */ 6383 un->un_last_state = un->un_save_state; 6384 /* 6385 * Note: throttle comes back at full. 6386 * Also note: this MUST be done before calling pm_raise_power 6387 * otherwise the system can get hung in biowait. The scenario where 6388 * this'll happen is under cpr suspend. Writing of the system 6389 * state goes through sddump, which writes 0 to un_throttle. If 6390 * writing the system state then fails, example if the partition is 6391 * too small, then cpr attempts a resume. If throttle isn't restored 6392 * from the saved value until after calling pm_raise_power then 6393 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6394 * in biowait. 6395 */ 6396 un->un_throttle = un->un_saved_throttle; 6397 6398 /* 6399 * The chance of failure is very rare as the only command done in power 6400 * entry point is START command when you transition from 0->1 or 6401 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6402 * which suspend was done. Ignore the return value as the resume should 6403 * not be failed. In the case of removable media the media need not be 6404 * inserted and hence there is a chance that raise power will fail with 6405 * media not present. 6406 */ 6407 if (un->un_f_attach_spinup) { 6408 mutex_exit(SD_MUTEX(un)); 6409 (void) pm_raise_power(SD_DEVINFO(un), 0, 6410 SD_PM_STATE_ACTIVE(un)); 6411 mutex_enter(SD_MUTEX(un)); 6412 } 6413 6414 /* 6415 * Don't broadcast to the suspend cv and therefore possibly 6416 * start I/O until after power has been restored. 6417 */ 6418 cv_broadcast(&un->un_suspend_cv); 6419 cv_broadcast(&un->un_state_cv); 6420 6421 /* restart thread */ 6422 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6423 scsi_watch_resume(un->un_swr_token); 6424 } 6425 6426 #if (defined(__fibre)) 6427 if (un->un_f_is_fibre == TRUE) { 6428 /* 6429 * Add callbacks for insert and remove events 6430 */ 6431 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6432 sd_init_event_callbacks(un); 6433 } 6434 } 6435 #endif 6436 6437 /* 6438 * Transport any pending commands to the target. 6439 * 6440 * If this is a low-activity device commands in queue will have to wait 6441 * until new commands come in, which may take awhile. Also, we 6442 * specifically don't check un_ncmds_in_transport because we know that 6443 * there really are no commands in progress after the unit was 6444 * suspended and we could have reached the throttle level, been 6445 * suspended, and have no new commands coming in for awhile. Highly 6446 * unlikely, but so is the low-activity disk scenario. 6447 */ 6448 ddi_xbuf_dispatch(un->un_xbuf_attr); 6449 6450 sd_start_cmds(un, NULL); 6451 mutex_exit(SD_MUTEX(un)); 6452 6453 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6454 6455 return (DDI_SUCCESS); 6456 } 6457 6458 6459 /* 6460 * Function: sd_pm_state_change 6461 * 6462 * Description: Change the driver power state. 6463 * Someone else is required to actually change the driver 6464 * power level. 6465 * 6466 * Arguments: un - driver soft state (unit) structure 6467 * level - the power level that is changed to 6468 * flag - to decide how to change the power state 6469 * 6470 * Return Code: DDI_SUCCESS 6471 * 6472 * Context: Kernel thread context 6473 */ 6474 static int 6475 sd_pm_state_change(struct sd_lun *un, int level, int flag) 6476 { 6477 ASSERT(un != NULL); 6478 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n"); 6479 6480 ASSERT(!mutex_owned(SD_MUTEX(un))); 6481 mutex_enter(SD_MUTEX(un)); 6482 6483 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) { 6484 un->un_power_level = level; 6485 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6486 mutex_enter(&un->un_pm_mutex); 6487 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6488 un->un_pm_count++; 6489 ASSERT(un->un_pm_count == 0); 6490 } 6491 mutex_exit(&un->un_pm_mutex); 6492 } else { 6493 /* 6494 * Exit if power management is not enabled for this device, 6495 * or if the device is being used by HA. 6496 */ 6497 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6498 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6499 mutex_exit(SD_MUTEX(un)); 6500 SD_TRACE(SD_LOG_POWER, un, 6501 "sd_pm_state_change: exiting\n"); 6502 return (DDI_FAILURE); 6503 } 6504 6505 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: " 6506 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver); 6507 6508 /* 6509 * See if the device is not busy, ie.: 6510 * - we have no commands in the driver for this device 6511 * - not waiting for resources 6512 */ 6513 if ((un->un_ncmds_in_driver == 0) && 6514 (un->un_state != SD_STATE_RWAIT)) { 6515 /* 6516 * The device is not busy, so it is OK to go to low 6517 * power state. Indicate low power, but rely on someone 6518 * else to actually change it. 6519 */ 6520 mutex_enter(&un->un_pm_mutex); 6521 un->un_pm_count = -1; 6522 mutex_exit(&un->un_pm_mutex); 6523 un->un_power_level = level; 6524 } 6525 } 6526 6527 mutex_exit(SD_MUTEX(un)); 6528 6529 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n"); 6530 6531 return (DDI_SUCCESS); 6532 } 6533 6534 6535 /* 6536 * Function: sd_pm_idletimeout_handler 6537 * 6538 * Description: A timer routine that's active only while a device is busy. 6539 * The purpose is to extend slightly the pm framework's busy 6540 * view of the device to prevent busy/idle thrashing for 6541 * back-to-back commands. Do this by comparing the current time 6542 * to the time at which the last command completed and when the 6543 * difference is greater than sd_pm_idletime, call 6544 * pm_idle_component. In addition to indicating idle to the pm 6545 * framework, update the chain type to again use the internal pm 6546 * layers of the driver. 6547 * 6548 * Arguments: arg - driver soft state (unit) structure 6549 * 6550 * Context: Executes in a timeout(9F) thread context 6551 */ 6552 6553 static void 6554 sd_pm_idletimeout_handler(void *arg) 6555 { 6556 const hrtime_t idletime = sd_pm_idletime * NANOSEC; 6557 struct sd_lun *un = arg; 6558 6559 mutex_enter(&sd_detach_mutex); 6560 if (un->un_detach_count != 0) { 6561 /* Abort if the instance is detaching */ 6562 mutex_exit(&sd_detach_mutex); 6563 return; 6564 } 6565 mutex_exit(&sd_detach_mutex); 6566 6567 /* 6568 * Grab both mutexes, in the proper order, since we're accessing 6569 * both PM and softstate variables. 6570 */ 6571 mutex_enter(SD_MUTEX(un)); 6572 mutex_enter(&un->un_pm_mutex); 6573 if (((gethrtime() - un->un_pm_idle_time) > idletime) && 6574 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6575 /* 6576 * Update the chain types. 6577 * This takes affect on the next new command received. 6578 */ 6579 if (un->un_f_non_devbsize_supported) { 6580 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6581 } else { 6582 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6583 } 6584 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6585 6586 SD_TRACE(SD_LOG_IO_PM, un, 6587 "sd_pm_idletimeout_handler: idling device\n"); 6588 (void) pm_idle_component(SD_DEVINFO(un), 0); 6589 un->un_pm_idle_timeid = NULL; 6590 } else { 6591 un->un_pm_idle_timeid = 6592 timeout(sd_pm_idletimeout_handler, un, 6593 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6594 } 6595 mutex_exit(&un->un_pm_mutex); 6596 mutex_exit(SD_MUTEX(un)); 6597 } 6598 6599 6600 /* 6601 * Function: sd_pm_timeout_handler 6602 * 6603 * Description: Callback to tell framework we are idle. 6604 * 6605 * Context: timeout(9f) thread context. 6606 */ 6607 6608 static void 6609 sd_pm_timeout_handler(void *arg) 6610 { 6611 struct sd_lun *un = arg; 6612 6613 (void) pm_idle_component(SD_DEVINFO(un), 0); 6614 mutex_enter(&un->un_pm_mutex); 6615 un->un_pm_timeid = NULL; 6616 mutex_exit(&un->un_pm_mutex); 6617 } 6618 6619 6620 /* 6621 * Function: sdpower 6622 * 6623 * Description: PM entry point. 6624 * 6625 * Return Code: DDI_SUCCESS 6626 * DDI_FAILURE 6627 * 6628 * Context: Kernel thread context 6629 */ 6630 6631 static int 6632 sdpower(dev_info_t *devi, int component, int level) 6633 { 6634 struct sd_lun *un; 6635 int instance; 6636 int rval = DDI_SUCCESS; 6637 uint_t i, log_page_size, maxcycles, ncycles; 6638 uchar_t *log_page_data; 6639 int log_sense_page; 6640 int medium_present; 6641 time_t intvlp; 6642 struct pm_trans_data sd_pm_tran_data; 6643 uchar_t save_state = SD_STATE_NORMAL; 6644 int sval; 6645 uchar_t state_before_pm; 6646 int got_semaphore_here; 6647 sd_ssc_t *ssc; 6648 int last_power_level = SD_SPINDLE_UNINIT; 6649 6650 instance = ddi_get_instance(devi); 6651 6652 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6653 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) { 6654 return (DDI_FAILURE); 6655 } 6656 6657 ssc = sd_ssc_init(un); 6658 6659 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6660 6661 /* 6662 * Must synchronize power down with close. 6663 * Attempt to decrement/acquire the open/close semaphore, 6664 * but do NOT wait on it. If it's not greater than zero, 6665 * ie. it can't be decremented without waiting, then 6666 * someone else, either open or close, already has it 6667 * and the try returns 0. Use that knowledge here to determine 6668 * if it's OK to change the device power level. 6669 * Also, only increment it on exit if it was decremented, ie. gotten, 6670 * here. 6671 */ 6672 got_semaphore_here = sema_tryp(&un->un_semoclose); 6673 6674 mutex_enter(SD_MUTEX(un)); 6675 6676 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6677 un->un_ncmds_in_driver); 6678 6679 /* 6680 * If un_ncmds_in_driver is non-zero it indicates commands are 6681 * already being processed in the driver, or if the semaphore was 6682 * not gotten here it indicates an open or close is being processed. 6683 * At the same time somebody is requesting to go to a lower power 6684 * that can't perform I/O, which can't happen, therefore we need to 6685 * return failure. 6686 */ 6687 if ((!SD_PM_IS_IO_CAPABLE(un, level)) && 6688 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6689 mutex_exit(SD_MUTEX(un)); 6690 6691 if (got_semaphore_here != 0) { 6692 sema_v(&un->un_semoclose); 6693 } 6694 SD_TRACE(SD_LOG_IO_PM, un, 6695 "sdpower: exit, device has queued cmds.\n"); 6696 6697 goto sdpower_failed; 6698 } 6699 6700 /* 6701 * if it is OFFLINE that means the disk is completely dead 6702 * in our case we have to put the disk in on or off by sending commands 6703 * Of course that will fail anyway so return back here. 6704 * 6705 * Power changes to a device that's OFFLINE or SUSPENDED 6706 * are not allowed. 6707 */ 6708 if ((un->un_state == SD_STATE_OFFLINE) || 6709 (un->un_state == SD_STATE_SUSPENDED)) { 6710 mutex_exit(SD_MUTEX(un)); 6711 6712 if (got_semaphore_here != 0) { 6713 sema_v(&un->un_semoclose); 6714 } 6715 SD_TRACE(SD_LOG_IO_PM, un, 6716 "sdpower: exit, device is off-line.\n"); 6717 6718 goto sdpower_failed; 6719 } 6720 6721 /* 6722 * Change the device's state to indicate it's power level 6723 * is being changed. Do this to prevent a power off in the 6724 * middle of commands, which is especially bad on devices 6725 * that are really powered off instead of just spun down. 6726 */ 6727 state_before_pm = un->un_state; 6728 un->un_state = SD_STATE_PM_CHANGING; 6729 6730 mutex_exit(SD_MUTEX(un)); 6731 6732 /* 6733 * If log sense command is not supported, bypass the 6734 * following checking, otherwise, check the log sense 6735 * information for this device. 6736 */ 6737 if (SD_PM_STOP_MOTOR_NEEDED(un, level) && 6738 un->un_f_log_sense_supported) { 6739 /* 6740 * Get the log sense information to understand whether the 6741 * the powercycle counts have gone beyond the threshhold. 6742 */ 6743 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6744 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6745 6746 mutex_enter(SD_MUTEX(un)); 6747 log_sense_page = un->un_start_stop_cycle_page; 6748 mutex_exit(SD_MUTEX(un)); 6749 6750 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6751 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6752 6753 if (rval != 0) { 6754 if (rval == EIO) 6755 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6756 else 6757 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6758 } 6759 6760 #ifdef SDDEBUG 6761 if (sd_force_pm_supported) { 6762 /* Force a successful result */ 6763 rval = 0; 6764 } 6765 #endif 6766 if (rval != 0) { 6767 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6768 "Log Sense Failed\n"); 6769 6770 kmem_free(log_page_data, log_page_size); 6771 /* Cannot support power management on those drives */ 6772 6773 if (got_semaphore_here != 0) { 6774 sema_v(&un->un_semoclose); 6775 } 6776 /* 6777 * On exit put the state back to it's original value 6778 * and broadcast to anyone waiting for the power 6779 * change completion. 6780 */ 6781 mutex_enter(SD_MUTEX(un)); 6782 un->un_state = state_before_pm; 6783 cv_broadcast(&un->un_suspend_cv); 6784 mutex_exit(SD_MUTEX(un)); 6785 SD_TRACE(SD_LOG_IO_PM, un, 6786 "sdpower: exit, Log Sense Failed.\n"); 6787 6788 goto sdpower_failed; 6789 } 6790 6791 /* 6792 * From the page data - Convert the essential information to 6793 * pm_trans_data 6794 */ 6795 maxcycles = 6796 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6797 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6798 6799 ncycles = 6800 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6801 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6802 6803 if (un->un_f_pm_log_sense_smart) { 6804 sd_pm_tran_data.un.smart_count.allowed = maxcycles; 6805 sd_pm_tran_data.un.smart_count.consumed = ncycles; 6806 sd_pm_tran_data.un.smart_count.flag = 0; 6807 sd_pm_tran_data.format = DC_SMART_FORMAT; 6808 } else { 6809 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6810 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6811 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6812 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6813 log_page_data[8+i]; 6814 } 6815 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6816 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6817 } 6818 6819 kmem_free(log_page_data, log_page_size); 6820 6821 /* 6822 * Call pm_trans_check routine to get the Ok from 6823 * the global policy 6824 */ 6825 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6826 #ifdef SDDEBUG 6827 if (sd_force_pm_supported) { 6828 /* Force a successful result */ 6829 rval = 1; 6830 } 6831 #endif 6832 switch (rval) { 6833 case 0: 6834 /* 6835 * Not Ok to Power cycle or error in parameters passed 6836 * Would have given the advised time to consider power 6837 * cycle. Based on the new intvlp parameter we are 6838 * supposed to pretend we are busy so that pm framework 6839 * will never call our power entry point. Because of 6840 * that install a timeout handler and wait for the 6841 * recommended time to elapse so that power management 6842 * can be effective again. 6843 * 6844 * To effect this behavior, call pm_busy_component to 6845 * indicate to the framework this device is busy. 6846 * By not adjusting un_pm_count the rest of PM in 6847 * the driver will function normally, and independent 6848 * of this but because the framework is told the device 6849 * is busy it won't attempt powering down until it gets 6850 * a matching idle. The timeout handler sends this. 6851 * Note: sd_pm_entry can't be called here to do this 6852 * because sdpower may have been called as a result 6853 * of a call to pm_raise_power from within sd_pm_entry. 6854 * 6855 * If a timeout handler is already active then 6856 * don't install another. 6857 */ 6858 mutex_enter(&un->un_pm_mutex); 6859 if (un->un_pm_timeid == NULL) { 6860 un->un_pm_timeid = 6861 timeout(sd_pm_timeout_handler, 6862 un, intvlp * drv_usectohz(1000000)); 6863 mutex_exit(&un->un_pm_mutex); 6864 (void) pm_busy_component(SD_DEVINFO(un), 0); 6865 } else { 6866 mutex_exit(&un->un_pm_mutex); 6867 } 6868 if (got_semaphore_here != 0) { 6869 sema_v(&un->un_semoclose); 6870 } 6871 /* 6872 * On exit put the state back to it's original value 6873 * and broadcast to anyone waiting for the power 6874 * change completion. 6875 */ 6876 mutex_enter(SD_MUTEX(un)); 6877 un->un_state = state_before_pm; 6878 cv_broadcast(&un->un_suspend_cv); 6879 mutex_exit(SD_MUTEX(un)); 6880 6881 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6882 "trans check Failed, not ok to power cycle.\n"); 6883 6884 goto sdpower_failed; 6885 case -1: 6886 if (got_semaphore_here != 0) { 6887 sema_v(&un->un_semoclose); 6888 } 6889 /* 6890 * On exit put the state back to it's original value 6891 * and broadcast to anyone waiting for the power 6892 * change completion. 6893 */ 6894 mutex_enter(SD_MUTEX(un)); 6895 un->un_state = state_before_pm; 6896 cv_broadcast(&un->un_suspend_cv); 6897 mutex_exit(SD_MUTEX(un)); 6898 SD_TRACE(SD_LOG_IO_PM, un, 6899 "sdpower: exit, trans check command Failed.\n"); 6900 6901 goto sdpower_failed; 6902 } 6903 } 6904 6905 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6906 /* 6907 * Save the last state... if the STOP FAILS we need it 6908 * for restoring 6909 */ 6910 mutex_enter(SD_MUTEX(un)); 6911 save_state = un->un_last_state; 6912 last_power_level = un->un_power_level; 6913 /* 6914 * There must not be any cmds. getting processed 6915 * in the driver when we get here. Power to the 6916 * device is potentially going off. 6917 */ 6918 ASSERT(un->un_ncmds_in_driver == 0); 6919 mutex_exit(SD_MUTEX(un)); 6920 6921 /* 6922 * For now PM suspend the device completely before spindle is 6923 * turned off 6924 */ 6925 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE)) 6926 == DDI_FAILURE) { 6927 if (got_semaphore_here != 0) { 6928 sema_v(&un->un_semoclose); 6929 } 6930 /* 6931 * On exit put the state back to it's original value 6932 * and broadcast to anyone waiting for the power 6933 * change completion. 6934 */ 6935 mutex_enter(SD_MUTEX(un)); 6936 un->un_state = state_before_pm; 6937 un->un_power_level = last_power_level; 6938 cv_broadcast(&un->un_suspend_cv); 6939 mutex_exit(SD_MUTEX(un)); 6940 SD_TRACE(SD_LOG_IO_PM, un, 6941 "sdpower: exit, PM suspend Failed.\n"); 6942 6943 goto sdpower_failed; 6944 } 6945 } 6946 6947 /* 6948 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6949 * close, or strategy. Dump no long uses this routine, it uses it's 6950 * own code so it can be done in polled mode. 6951 */ 6952 6953 medium_present = TRUE; 6954 6955 /* 6956 * When powering up, issue a TUR in case the device is at unit 6957 * attention. Don't do retries. Bypass the PM layer, otherwise 6958 * a deadlock on un_pm_busy_cv will occur. 6959 */ 6960 if (SD_PM_IS_IO_CAPABLE(un, level)) { 6961 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6962 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6963 if (sval != 0) 6964 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6965 } 6966 6967 if (un->un_f_power_condition_supported) { 6968 char *pm_condition_name[] = {"STOPPED", "STANDBY", 6969 "IDLE", "ACTIVE"}; 6970 SD_TRACE(SD_LOG_IO_PM, un, 6971 "sdpower: sending \'%s\' power condition", 6972 pm_condition_name[level]); 6973 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 6974 sd_pl2pc[level], SD_PATH_DIRECT); 6975 } else { 6976 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6977 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6978 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 6979 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : 6980 SD_TARGET_STOP), SD_PATH_DIRECT); 6981 } 6982 if (sval != 0) { 6983 if (sval == EIO) 6984 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6985 else 6986 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6987 } 6988 6989 /* Command failed, check for media present. */ 6990 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6991 medium_present = FALSE; 6992 } 6993 6994 /* 6995 * The conditions of interest here are: 6996 * if a spindle off with media present fails, 6997 * then restore the state and return an error. 6998 * else if a spindle on fails, 6999 * then return an error (there's no state to restore). 7000 * In all other cases we setup for the new state 7001 * and return success. 7002 */ 7003 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 7004 if ((medium_present == TRUE) && (sval != 0)) { 7005 /* The stop command from above failed */ 7006 rval = DDI_FAILURE; 7007 /* 7008 * The stop command failed, and we have media 7009 * present. Put the level back by calling the 7010 * sd_pm_resume() and set the state back to 7011 * it's previous value. 7012 */ 7013 (void) sd_pm_state_change(un, last_power_level, 7014 SD_PM_STATE_ROLLBACK); 7015 mutex_enter(SD_MUTEX(un)); 7016 un->un_last_state = save_state; 7017 mutex_exit(SD_MUTEX(un)); 7018 } else if (un->un_f_monitor_media_state) { 7019 /* 7020 * The stop command from above succeeded. 7021 * Terminate watch thread in case of removable media 7022 * devices going into low power state. This is as per 7023 * the requirements of pm framework, otherwise commands 7024 * will be generated for the device (through watch 7025 * thread), even when the device is in low power state. 7026 */ 7027 mutex_enter(SD_MUTEX(un)); 7028 un->un_f_watcht_stopped = FALSE; 7029 if (un->un_swr_token != NULL) { 7030 opaque_t temp_token = un->un_swr_token; 7031 un->un_f_watcht_stopped = TRUE; 7032 un->un_swr_token = NULL; 7033 mutex_exit(SD_MUTEX(un)); 7034 (void) scsi_watch_request_terminate(temp_token, 7035 SCSI_WATCH_TERMINATE_ALL_WAIT); 7036 } else { 7037 mutex_exit(SD_MUTEX(un)); 7038 } 7039 } 7040 } else { 7041 /* 7042 * The level requested is I/O capable. 7043 * Legacy behavior: return success on a failed spinup 7044 * if there is no media in the drive. 7045 * Do this by looking at medium_present here. 7046 */ 7047 if ((sval != 0) && medium_present) { 7048 /* The start command from above failed */ 7049 rval = DDI_FAILURE; 7050 } else { 7051 /* 7052 * The start command from above succeeded 7053 * PM resume the devices now that we have 7054 * started the disks 7055 */ 7056 (void) sd_pm_state_change(un, level, 7057 SD_PM_STATE_CHANGE); 7058 7059 /* 7060 * Resume the watch thread since it was suspended 7061 * when the device went into low power mode. 7062 */ 7063 if (un->un_f_monitor_media_state) { 7064 mutex_enter(SD_MUTEX(un)); 7065 if (un->un_f_watcht_stopped == TRUE) { 7066 opaque_t temp_token; 7067 7068 un->un_f_watcht_stopped = FALSE; 7069 mutex_exit(SD_MUTEX(un)); 7070 temp_token = 7071 sd_watch_request_submit(un); 7072 mutex_enter(SD_MUTEX(un)); 7073 un->un_swr_token = temp_token; 7074 } 7075 mutex_exit(SD_MUTEX(un)); 7076 } 7077 } 7078 } 7079 7080 if (got_semaphore_here != 0) { 7081 sema_v(&un->un_semoclose); 7082 } 7083 /* 7084 * On exit put the state back to it's original value 7085 * and broadcast to anyone waiting for the power 7086 * change completion. 7087 */ 7088 mutex_enter(SD_MUTEX(un)); 7089 un->un_state = state_before_pm; 7090 cv_broadcast(&un->un_suspend_cv); 7091 mutex_exit(SD_MUTEX(un)); 7092 7093 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7094 7095 sd_ssc_fini(ssc); 7096 return (rval); 7097 7098 sdpower_failed: 7099 7100 sd_ssc_fini(ssc); 7101 return (DDI_FAILURE); 7102 } 7103 7104 7105 7106 /* 7107 * Function: sdattach 7108 * 7109 * Description: Driver's attach(9e) entry point function. 7110 * 7111 * Arguments: devi - opaque device info handle 7112 * cmd - attach type 7113 * 7114 * Return Code: DDI_SUCCESS 7115 * DDI_FAILURE 7116 * 7117 * Context: Kernel thread context 7118 */ 7119 7120 static int 7121 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7122 { 7123 switch (cmd) { 7124 case DDI_ATTACH: 7125 return (sd_unit_attach(devi)); 7126 case DDI_RESUME: 7127 return (sd_ddi_resume(devi)); 7128 default: 7129 break; 7130 } 7131 return (DDI_FAILURE); 7132 } 7133 7134 7135 /* 7136 * Function: sddetach 7137 * 7138 * Description: Driver's detach(9E) entry point function. 7139 * 7140 * Arguments: devi - opaque device info handle 7141 * cmd - detach type 7142 * 7143 * Return Code: DDI_SUCCESS 7144 * DDI_FAILURE 7145 * 7146 * Context: Kernel thread context 7147 */ 7148 7149 static int 7150 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7151 { 7152 switch (cmd) { 7153 case DDI_DETACH: 7154 return (sd_unit_detach(devi)); 7155 case DDI_SUSPEND: 7156 return (sd_ddi_suspend(devi)); 7157 default: 7158 break; 7159 } 7160 return (DDI_FAILURE); 7161 } 7162 7163 7164 /* 7165 * Function: sd_sync_with_callback 7166 * 7167 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7168 * state while the callback routine is active. 7169 * 7170 * Arguments: un: softstate structure for the instance 7171 * 7172 * Context: Kernel thread context 7173 */ 7174 7175 static void 7176 sd_sync_with_callback(struct sd_lun *un) 7177 { 7178 ASSERT(un != NULL); 7179 7180 mutex_enter(SD_MUTEX(un)); 7181 7182 ASSERT(un->un_in_callback >= 0); 7183 7184 while (un->un_in_callback > 0) { 7185 mutex_exit(SD_MUTEX(un)); 7186 delay(2); 7187 mutex_enter(SD_MUTEX(un)); 7188 } 7189 7190 mutex_exit(SD_MUTEX(un)); 7191 } 7192 7193 /* 7194 * Function: sd_unit_attach 7195 * 7196 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7197 * the soft state structure for the device and performs 7198 * all necessary structure and device initializations. 7199 * 7200 * Arguments: devi: the system's dev_info_t for the device. 7201 * 7202 * Return Code: DDI_SUCCESS if attach is successful. 7203 * DDI_FAILURE if any part of the attach fails. 7204 * 7205 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7206 * Kernel thread context only. Can sleep. 7207 */ 7208 7209 static int 7210 sd_unit_attach(dev_info_t *devi) 7211 { 7212 struct scsi_device *devp; 7213 struct sd_lun *un; 7214 char *variantp; 7215 char name_str[48]; 7216 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7217 int instance; 7218 int rval; 7219 int wc_enabled; 7220 int wc_changeable; 7221 int tgt; 7222 uint64_t capacity; 7223 uint_t lbasize = 0; 7224 dev_info_t *pdip = ddi_get_parent(devi); 7225 int offbyone = 0; 7226 int geom_label_valid = 0; 7227 sd_ssc_t *ssc; 7228 int status; 7229 struct sd_fm_internal *sfip = NULL; 7230 int max_xfer_size; 7231 7232 /* 7233 * Retrieve the target driver's private data area. This was set 7234 * up by the HBA. 7235 */ 7236 devp = ddi_get_driver_private(devi); 7237 7238 /* 7239 * Retrieve the target ID of the device. 7240 */ 7241 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7242 SCSI_ADDR_PROP_TARGET, -1); 7243 7244 /* 7245 * Since we have no idea what state things were left in by the last 7246 * user of the device, set up some 'default' settings, ie. turn 'em 7247 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7248 * Do this before the scsi_probe, which sends an inquiry. 7249 * This is a fix for bug (4430280). 7250 * Of special importance is wide-xfer. The drive could have been left 7251 * in wide transfer mode by the last driver to communicate with it, 7252 * this includes us. If that's the case, and if the following is not 7253 * setup properly or we don't re-negotiate with the drive prior to 7254 * transferring data to/from the drive, it causes bus parity errors, 7255 * data overruns, and unexpected interrupts. This first occurred when 7256 * the fix for bug (4378686) was made. 7257 */ 7258 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7259 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7260 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7261 7262 /* 7263 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7264 * on a target. Setting it per lun instance actually sets the 7265 * capability of this target, which affects those luns already 7266 * attached on the same target. So during attach, we can only disable 7267 * this capability only when no other lun has been attached on this 7268 * target. By doing this, we assume a target has the same tagged-qing 7269 * capability for every lun. The condition can be removed when HBA 7270 * is changed to support per lun based tagged-qing capability. 7271 */ 7272 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7273 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7274 } 7275 7276 /* 7277 * Use scsi_probe() to issue an INQUIRY command to the device. 7278 * This call will allocate and fill in the scsi_inquiry structure 7279 * and point the sd_inq member of the scsi_device structure to it. 7280 * If the attach succeeds, then this memory will not be de-allocated 7281 * (via scsi_unprobe()) until the instance is detached. 7282 */ 7283 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7284 goto probe_failed; 7285 } 7286 7287 /* 7288 * Check the device type as specified in the inquiry data and 7289 * claim it if it is of a type that we support. 7290 */ 7291 switch (devp->sd_inq->inq_dtype) { 7292 case DTYPE_DIRECT: 7293 break; 7294 case DTYPE_RODIRECT: 7295 break; 7296 case DTYPE_OPTICAL: 7297 break; 7298 case DTYPE_NOTPRESENT: 7299 default: 7300 /* Unsupported device type; fail the attach. */ 7301 goto probe_failed; 7302 } 7303 7304 /* 7305 * Allocate the soft state structure for this unit. 7306 * 7307 * We rely upon this memory being set to all zeroes by 7308 * ddi_soft_state_zalloc(). We assume that any member of the 7309 * soft state structure that is not explicitly initialized by 7310 * this routine will have a value of zero. 7311 */ 7312 instance = ddi_get_instance(devp->sd_dev); 7313 #ifndef XPV_HVM_DRIVER 7314 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7315 goto probe_failed; 7316 } 7317 #endif /* !XPV_HVM_DRIVER */ 7318 7319 /* 7320 * Retrieve a pointer to the newly-allocated soft state. 7321 * 7322 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7323 * was successful, unless something has gone horribly wrong and the 7324 * ddi's soft state internals are corrupt (in which case it is 7325 * probably better to halt here than just fail the attach....) 7326 */ 7327 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7328 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7329 instance); 7330 /*NOTREACHED*/ 7331 } 7332 7333 /* 7334 * Link the back ptr of the driver soft state to the scsi_device 7335 * struct for this lun. 7336 * Save a pointer to the softstate in the driver-private area of 7337 * the scsi_device struct. 7338 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7339 * we first set un->un_sd below. 7340 */ 7341 un->un_sd = devp; 7342 devp->sd_private = (opaque_t)un; 7343 7344 /* 7345 * The following must be after devp is stored in the soft state struct. 7346 */ 7347 #ifdef SDDEBUG 7348 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7349 "%s_unit_attach: un:0x%p instance:%d\n", 7350 ddi_driver_name(devi), un, instance); 7351 #endif 7352 7353 /* 7354 * Set up the device type and node type (for the minor nodes). 7355 * By default we assume that the device can at least support the 7356 * Common Command Set. Call it a CD-ROM if it reports itself 7357 * as a RODIRECT device. 7358 */ 7359 switch (devp->sd_inq->inq_dtype) { 7360 case DTYPE_RODIRECT: 7361 un->un_node_type = DDI_NT_CD_CHAN; 7362 un->un_ctype = CTYPE_CDROM; 7363 break; 7364 case DTYPE_OPTICAL: 7365 un->un_node_type = DDI_NT_BLOCK_CHAN; 7366 un->un_ctype = CTYPE_ROD; 7367 break; 7368 default: 7369 un->un_node_type = DDI_NT_BLOCK_CHAN; 7370 un->un_ctype = CTYPE_CCS; 7371 break; 7372 } 7373 7374 /* 7375 * Try to read the interconnect type from the HBA. 7376 * 7377 * Note: This driver is currently compiled as two binaries, a parallel 7378 * scsi version (sd) and a fibre channel version (ssd). All functional 7379 * differences are determined at compile time. In the future a single 7380 * binary will be provided and the interconnect type will be used to 7381 * differentiate between fibre and parallel scsi behaviors. At that time 7382 * it will be necessary for all fibre channel HBAs to support this 7383 * property. 7384 * 7385 * set un_f_is_fiber to TRUE ( default fiber ) 7386 */ 7387 un->un_f_is_fibre = TRUE; 7388 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7389 case INTERCONNECT_SSA: 7390 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7392 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7393 break; 7394 case INTERCONNECT_PARALLEL: 7395 un->un_f_is_fibre = FALSE; 7396 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7397 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7398 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7399 break; 7400 case INTERCONNECT_SAS: 7401 un->un_f_is_fibre = FALSE; 7402 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7403 un->un_node_type = DDI_NT_BLOCK_SAS; 7404 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7405 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7406 break; 7407 case INTERCONNECT_SATA: 7408 un->un_f_is_fibre = FALSE; 7409 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7410 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7411 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7412 break; 7413 case INTERCONNECT_FIBRE: 7414 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7415 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7416 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7417 break; 7418 case INTERCONNECT_FABRIC: 7419 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7420 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7421 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7422 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7423 break; 7424 default: 7425 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7426 /* 7427 * The HBA does not support the "interconnect-type" property 7428 * (or did not provide a recognized type). 7429 * 7430 * Note: This will be obsoleted when a single fibre channel 7431 * and parallel scsi driver is delivered. In the meantime the 7432 * interconnect type will be set to the platform default.If that 7433 * type is not parallel SCSI, it means that we should be 7434 * assuming "ssd" semantics. However, here this also means that 7435 * the FC HBA is not supporting the "interconnect-type" property 7436 * like we expect it to, so log this occurrence. 7437 */ 7438 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7439 if (!SD_IS_PARALLEL_SCSI(un)) { 7440 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7441 "sd_unit_attach: un:0x%p Assuming " 7442 "INTERCONNECT_FIBRE\n", un); 7443 } else { 7444 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7445 "sd_unit_attach: un:0x%p Assuming " 7446 "INTERCONNECT_PARALLEL\n", un); 7447 un->un_f_is_fibre = FALSE; 7448 } 7449 #else 7450 /* 7451 * Note: This source will be implemented when a single fibre 7452 * channel and parallel scsi driver is delivered. The default 7453 * will be to assume that if a device does not support the 7454 * "interconnect-type" property it is a parallel SCSI HBA and 7455 * we will set the interconnect type for parallel scsi. 7456 */ 7457 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7458 un->un_f_is_fibre = FALSE; 7459 #endif 7460 break; 7461 } 7462 7463 if (un->un_f_is_fibre == TRUE) { 7464 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7465 SCSI_VERSION_3) { 7466 switch (un->un_interconnect_type) { 7467 case SD_INTERCONNECT_FIBRE: 7468 case SD_INTERCONNECT_SSA: 7469 un->un_node_type = DDI_NT_BLOCK_WWN; 7470 break; 7471 default: 7472 break; 7473 } 7474 } 7475 } 7476 7477 /* 7478 * Initialize the Request Sense command for the target 7479 */ 7480 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7481 goto alloc_rqs_failed; 7482 } 7483 7484 /* 7485 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7486 * with separate binary for sd and ssd. 7487 * 7488 * x86 has 1 binary, un_retry_count is set base on connection type. 7489 * The hardcoded values will go away when Sparc uses 1 binary 7490 * for sd and ssd. This hardcoded values need to match 7491 * SD_RETRY_COUNT in sddef.h 7492 * The value used is base on interconnect type. 7493 * fibre = 3, parallel = 5 7494 */ 7495 #if defined(__i386) || defined(__amd64) 7496 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7497 #else 7498 un->un_retry_count = SD_RETRY_COUNT; 7499 #endif 7500 7501 /* 7502 * Set the per disk retry count to the default number of retries 7503 * for disks and CDROMs. This value can be overridden by the 7504 * disk property list or an entry in sd.conf. 7505 */ 7506 un->un_notready_retry_count = 7507 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7508 : DISK_NOT_READY_RETRY_COUNT(un); 7509 7510 /* 7511 * Set the busy retry count to the default value of un_retry_count. 7512 * This can be overridden by entries in sd.conf or the device 7513 * config table. 7514 */ 7515 un->un_busy_retry_count = un->un_retry_count; 7516 7517 /* 7518 * Init the reset threshold for retries. This number determines 7519 * how many retries must be performed before a reset can be issued 7520 * (for certain error conditions). This can be overridden by entries 7521 * in sd.conf or the device config table. 7522 */ 7523 un->un_reset_retry_count = (un->un_retry_count / 2); 7524 7525 /* 7526 * Set the victim_retry_count to the default un_retry_count 7527 */ 7528 un->un_victim_retry_count = (2 * un->un_retry_count); 7529 7530 /* 7531 * Set the reservation release timeout to the default value of 7532 * 5 seconds. This can be overridden by entries in ssd.conf or the 7533 * device config table. 7534 */ 7535 un->un_reserve_release_time = 5; 7536 7537 /* 7538 * Set up the default maximum transfer size. Note that this may 7539 * get updated later in the attach, when setting up default wide 7540 * operations for disks. 7541 */ 7542 #if defined(__i386) || defined(__amd64) 7543 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7544 un->un_partial_dma_supported = 1; 7545 #else 7546 un->un_max_xfer_size = (uint_t)maxphys; 7547 #endif 7548 7549 /* 7550 * Get "allow bus device reset" property (defaults to "enabled" if 7551 * the property was not defined). This is to disable bus resets for 7552 * certain kinds of error recovery. Note: In the future when a run-time 7553 * fibre check is available the soft state flag should default to 7554 * enabled. 7555 */ 7556 if (un->un_f_is_fibre == TRUE) { 7557 un->un_f_allow_bus_device_reset = TRUE; 7558 } else { 7559 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7560 "allow-bus-device-reset", 1) != 0) { 7561 un->un_f_allow_bus_device_reset = TRUE; 7562 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7563 "sd_unit_attach: un:0x%p Bus device reset " 7564 "enabled\n", un); 7565 } else { 7566 un->un_f_allow_bus_device_reset = FALSE; 7567 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7568 "sd_unit_attach: un:0x%p Bus device reset " 7569 "disabled\n", un); 7570 } 7571 } 7572 7573 /* 7574 * Check if this is an ATAPI device. ATAPI devices use Group 1 7575 * Read/Write commands and Group 2 Mode Sense/Select commands. 7576 * 7577 * Note: The "obsolete" way of doing this is to check for the "atapi" 7578 * property. The new "variant" property with a value of "atapi" has been 7579 * introduced so that future 'variants' of standard SCSI behavior (like 7580 * atapi) could be specified by the underlying HBA drivers by supplying 7581 * a new value for the "variant" property, instead of having to define a 7582 * new property. 7583 */ 7584 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7585 un->un_f_cfg_is_atapi = TRUE; 7586 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7587 "sd_unit_attach: un:0x%p Atapi device\n", un); 7588 } 7589 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7590 &variantp) == DDI_PROP_SUCCESS) { 7591 if (strcmp(variantp, "atapi") == 0) { 7592 un->un_f_cfg_is_atapi = TRUE; 7593 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7594 "sd_unit_attach: un:0x%p Atapi device\n", un); 7595 } 7596 ddi_prop_free(variantp); 7597 } 7598 7599 un->un_cmd_timeout = SD_IO_TIME; 7600 7601 un->un_busy_timeout = SD_BSY_TIMEOUT; 7602 7603 /* Info on current states, statuses, etc. (Updated frequently) */ 7604 un->un_state = SD_STATE_NORMAL; 7605 un->un_last_state = SD_STATE_NORMAL; 7606 7607 /* Control & status info for command throttling */ 7608 un->un_throttle = sd_max_throttle; 7609 un->un_saved_throttle = sd_max_throttle; 7610 un->un_min_throttle = sd_min_throttle; 7611 7612 if (un->un_f_is_fibre == TRUE) { 7613 un->un_f_use_adaptive_throttle = TRUE; 7614 } else { 7615 un->un_f_use_adaptive_throttle = FALSE; 7616 } 7617 7618 /* Removable media support. */ 7619 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7620 un->un_mediastate = DKIO_NONE; 7621 un->un_specified_mediastate = DKIO_NONE; 7622 7623 /* CVs for suspend/resume (PM or DR) */ 7624 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7625 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7626 7627 /* Power management support. */ 7628 un->un_power_level = SD_SPINDLE_UNINIT; 7629 7630 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7631 un->un_f_wcc_inprog = 0; 7632 7633 /* 7634 * The open/close semaphore is used to serialize threads executing 7635 * in the driver's open & close entry point routines for a given 7636 * instance. 7637 */ 7638 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7639 7640 /* 7641 * The conf file entry and softstate variable is a forceful override, 7642 * meaning a non-zero value must be entered to change the default. 7643 */ 7644 un->un_f_disksort_disabled = FALSE; 7645 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7646 un->un_f_enable_rmw = FALSE; 7647 7648 /* 7649 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but 7650 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property. 7651 */ 7652 un->un_f_mmc_gesn_polling = TRUE; 7653 7654 /* 7655 * physical sector size defaults to DEV_BSIZE currently. We can 7656 * override this value via the driver configuration file so we must 7657 * set it before calling sd_read_unit_properties(). 7658 */ 7659 un->un_phy_blocksize = DEV_BSIZE; 7660 7661 /* 7662 * Retrieve the properties from the static driver table or the driver 7663 * configuration file (.conf) for this unit and update the soft state 7664 * for the device as needed for the indicated properties. 7665 * Note: the property configuration needs to occur here as some of the 7666 * following routines may have dependencies on soft state flags set 7667 * as part of the driver property configuration. 7668 */ 7669 sd_read_unit_properties(un); 7670 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7671 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7672 7673 /* 7674 * Only if a device has "hotpluggable" property, it is 7675 * treated as hotpluggable device. Otherwise, it is 7676 * regarded as non-hotpluggable one. 7677 */ 7678 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7679 -1) != -1) { 7680 un->un_f_is_hotpluggable = TRUE; 7681 } 7682 7683 /* 7684 * set unit's attributes(flags) according to "hotpluggable" and 7685 * RMB bit in INQUIRY data. 7686 */ 7687 sd_set_unit_attributes(un, devi); 7688 7689 /* 7690 * By default, we mark the capacity, lbasize, and geometry 7691 * as invalid. Only if we successfully read a valid capacity 7692 * will we update the un_blockcount and un_tgt_blocksize with the 7693 * valid values (the geometry will be validated later). 7694 */ 7695 un->un_f_blockcount_is_valid = FALSE; 7696 un->un_f_tgt_blocksize_is_valid = FALSE; 7697 7698 /* 7699 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7700 * otherwise. 7701 */ 7702 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7703 un->un_blockcount = 0; 7704 7705 /* 7706 * Set up the per-instance info needed to determine the correct 7707 * CDBs and other info for issuing commands to the target. 7708 */ 7709 sd_init_cdb_limits(un); 7710 7711 /* 7712 * Set up the IO chains to use, based upon the target type. 7713 */ 7714 if (un->un_f_non_devbsize_supported) { 7715 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7716 } else { 7717 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7718 } 7719 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7720 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7721 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7722 7723 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7724 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7725 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7726 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7727 7728 7729 if (ISCD(un)) { 7730 un->un_additional_codes = sd_additional_codes; 7731 } else { 7732 un->un_additional_codes = NULL; 7733 } 7734 7735 /* 7736 * Create the kstats here so they can be available for attach-time 7737 * routines that send commands to the unit (either polled or via 7738 * sd_send_scsi_cmd). 7739 * 7740 * Note: This is a critical sequence that needs to be maintained: 7741 * 1) Instantiate the kstats here, before any routines using the 7742 * iopath (i.e. sd_send_scsi_cmd). 7743 * 2) Instantiate and initialize the partition stats 7744 * (sd_set_pstats). 7745 * 3) Initialize the error stats (sd_set_errstats), following 7746 * sd_validate_geometry(),sd_register_devid(), 7747 * and sd_cache_control(). 7748 */ 7749 7750 un->un_stats = kstat_create(sd_label, instance, 7751 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7752 if (un->un_stats != NULL) { 7753 un->un_stats->ks_lock = SD_MUTEX(un); 7754 kstat_install(un->un_stats); 7755 } 7756 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7757 "sd_unit_attach: un:0x%p un_stats created\n", un); 7758 7759 sd_create_errstats(un, instance); 7760 if (un->un_errstats == NULL) { 7761 goto create_errstats_failed; 7762 } 7763 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7764 "sd_unit_attach: un:0x%p errstats created\n", un); 7765 7766 /* 7767 * The following if/else code was relocated here from below as part 7768 * of the fix for bug (4430280). However with the default setup added 7769 * on entry to this routine, it's no longer absolutely necessary for 7770 * this to be before the call to sd_spin_up_unit. 7771 */ 7772 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7773 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7774 (devp->sd_inq->inq_ansi == 5)) && 7775 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7776 7777 /* 7778 * If tagged queueing is supported by the target 7779 * and by the host adapter then we will enable it 7780 */ 7781 un->un_tagflags = 0; 7782 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7783 (un->un_f_arq_enabled == TRUE)) { 7784 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7785 1, 1) == 1) { 7786 un->un_tagflags = FLAG_STAG; 7787 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7788 "sd_unit_attach: un:0x%p tag queueing " 7789 "enabled\n", un); 7790 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7791 "untagged-qing", 0) == 1) { 7792 un->un_f_opt_queueing = TRUE; 7793 un->un_saved_throttle = un->un_throttle = 7794 min(un->un_throttle, 3); 7795 } else { 7796 un->un_f_opt_queueing = FALSE; 7797 un->un_saved_throttle = un->un_throttle = 1; 7798 } 7799 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7800 == 1) && (un->un_f_arq_enabled == TRUE)) { 7801 /* The Host Adapter supports internal queueing. */ 7802 un->un_f_opt_queueing = TRUE; 7803 un->un_saved_throttle = un->un_throttle = 7804 min(un->un_throttle, 3); 7805 } else { 7806 un->un_f_opt_queueing = FALSE; 7807 un->un_saved_throttle = un->un_throttle = 1; 7808 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7809 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7810 } 7811 7812 /* 7813 * Enable large transfers for SATA/SAS drives 7814 */ 7815 if (SD_IS_SERIAL(un)) { 7816 un->un_max_xfer_size = 7817 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7818 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7819 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7820 "sd_unit_attach: un:0x%p max transfer " 7821 "size=0x%x\n", un, un->un_max_xfer_size); 7822 7823 } 7824 7825 /* Setup or tear down default wide operations for disks */ 7826 7827 /* 7828 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7829 * and "ssd_max_xfer_size" to exist simultaneously on the same 7830 * system and be set to different values. In the future this 7831 * code may need to be updated when the ssd module is 7832 * obsoleted and removed from the system. (4299588) 7833 */ 7834 if (SD_IS_PARALLEL_SCSI(un) && 7835 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7836 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7837 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7838 1, 1) == 1) { 7839 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7840 "sd_unit_attach: un:0x%p Wide Transfer " 7841 "enabled\n", un); 7842 } 7843 7844 /* 7845 * If tagged queuing has also been enabled, then 7846 * enable large xfers 7847 */ 7848 if (un->un_saved_throttle == sd_max_throttle) { 7849 un->un_max_xfer_size = 7850 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7851 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7852 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7853 "sd_unit_attach: un:0x%p max transfer " 7854 "size=0x%x\n", un, un->un_max_xfer_size); 7855 } 7856 } else { 7857 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7858 0, 1) == 1) { 7859 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7860 "sd_unit_attach: un:0x%p " 7861 "Wide Transfer disabled\n", un); 7862 } 7863 } 7864 } else { 7865 un->un_tagflags = FLAG_STAG; 7866 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7867 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7868 } 7869 7870 /* 7871 * If this target supports LUN reset, try to enable it. 7872 */ 7873 if (un->un_f_lun_reset_enabled) { 7874 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7875 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7876 "un:0x%p lun_reset capability set\n", un); 7877 } else { 7878 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7879 "un:0x%p lun-reset capability not set\n", un); 7880 } 7881 } 7882 7883 /* 7884 * Adjust the maximum transfer size. This is to fix 7885 * the problem of partial DMA support on SPARC. Some 7886 * HBA driver, like aac, has very small dma_attr_maxxfer 7887 * size, which requires partial DMA support on SPARC. 7888 * In the future the SPARC pci nexus driver may solve 7889 * the problem instead of this fix. 7890 */ 7891 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7892 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7893 /* We need DMA partial even on sparc to ensure sddump() works */ 7894 un->un_max_xfer_size = max_xfer_size; 7895 if (un->un_partial_dma_supported == 0) 7896 un->un_partial_dma_supported = 1; 7897 } 7898 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7899 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7900 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7901 un->un_max_xfer_size) == 1) { 7902 un->un_buf_breakup_supported = 1; 7903 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7904 "un:0x%p Buf breakup enabled\n", un); 7905 } 7906 } 7907 7908 /* 7909 * Set PKT_DMA_PARTIAL flag. 7910 */ 7911 if (un->un_partial_dma_supported == 1) { 7912 un->un_pkt_flags = PKT_DMA_PARTIAL; 7913 } else { 7914 un->un_pkt_flags = 0; 7915 } 7916 7917 /* Initialize sd_ssc_t for internal uscsi commands */ 7918 ssc = sd_ssc_init(un); 7919 scsi_fm_init(devp); 7920 7921 /* 7922 * Allocate memory for SCSI FMA stuffs. 7923 */ 7924 un->un_fm_private = 7925 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7926 sfip = (struct sd_fm_internal *)un->un_fm_private; 7927 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7928 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7929 sfip->fm_ssc.ssc_un = un; 7930 7931 if (ISCD(un) || 7932 un->un_f_has_removable_media || 7933 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7934 /* 7935 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7936 * Their log are unchanged. 7937 */ 7938 sfip->fm_log_level = SD_FM_LOG_NSUP; 7939 } else { 7940 /* 7941 * If enter here, it should be non-CDROM and FM-capable 7942 * device, and it will not keep the old scsi_log as before 7943 * in /var/adm/messages. However, the property 7944 * "fm-scsi-log" will control whether the FM telemetry will 7945 * be logged in /var/adm/messages. 7946 */ 7947 int fm_scsi_log; 7948 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7949 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7950 7951 if (fm_scsi_log) 7952 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7953 else 7954 sfip->fm_log_level = SD_FM_LOG_SILENT; 7955 } 7956 7957 /* 7958 * At this point in the attach, we have enough info in the 7959 * soft state to be able to issue commands to the target. 7960 * 7961 * All command paths used below MUST issue their commands as 7962 * SD_PATH_DIRECT. This is important as intermediate layers 7963 * are not all initialized yet (such as PM). 7964 */ 7965 7966 /* 7967 * Send a TEST UNIT READY command to the device. This should clear 7968 * any outstanding UNIT ATTENTION that may be present. 7969 * 7970 * Note: Don't check for success, just track if there is a reservation, 7971 * this is a throw away command to clear any unit attentions. 7972 * 7973 * Note: This MUST be the first command issued to the target during 7974 * attach to ensure power on UNIT ATTENTIONS are cleared. 7975 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7976 * with attempts at spinning up a device with no media. 7977 */ 7978 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7979 if (status != 0) { 7980 if (status == EACCES) 7981 reservation_flag = SD_TARGET_IS_RESERVED; 7982 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7983 } 7984 7985 /* 7986 * If the device is NOT a removable media device, attempt to spin 7987 * it up (using the START_STOP_UNIT command) and read its capacity 7988 * (using the READ CAPACITY command). Note, however, that either 7989 * of these could fail and in some cases we would continue with 7990 * the attach despite the failure (see below). 7991 */ 7992 if (un->un_f_descr_format_supported) { 7993 7994 switch (sd_spin_up_unit(ssc)) { 7995 case 0: 7996 /* 7997 * Spin-up was successful; now try to read the 7998 * capacity. If successful then save the results 7999 * and mark the capacity & lbasize as valid. 8000 */ 8001 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8002 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8003 8004 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 8005 &lbasize, SD_PATH_DIRECT); 8006 8007 switch (status) { 8008 case 0: { 8009 if (capacity > DK_MAX_BLOCKS) { 8010 #ifdef _LP64 8011 if ((capacity + 1) > 8012 SD_GROUP1_MAX_ADDRESS) { 8013 /* 8014 * Enable descriptor format 8015 * sense data so that we can 8016 * get 64 bit sense data 8017 * fields. 8018 */ 8019 sd_enable_descr_sense(ssc); 8020 } 8021 #else 8022 /* 32-bit kernels can't handle this */ 8023 scsi_log(SD_DEVINFO(un), 8024 sd_label, CE_WARN, 8025 "disk has %llu blocks, which " 8026 "is too large for a 32-bit " 8027 "kernel", capacity); 8028 8029 #if defined(__i386) || defined(__amd64) 8030 /* 8031 * 1TB disk was treated as (1T - 512)B 8032 * in the past, so that it might have 8033 * valid VTOC and solaris partitions, 8034 * we have to allow it to continue to 8035 * work. 8036 */ 8037 if (capacity -1 > DK_MAX_BLOCKS) 8038 #endif 8039 goto spinup_failed; 8040 #endif 8041 } 8042 8043 /* 8044 * Here it's not necessary to check the case: 8045 * the capacity of the device is bigger than 8046 * what the max hba cdb can support. Because 8047 * sd_send_scsi_READ_CAPACITY will retrieve 8048 * the capacity by sending USCSI command, which 8049 * is constrained by the max hba cdb. Actually, 8050 * sd_send_scsi_READ_CAPACITY will return 8051 * EINVAL when using bigger cdb than required 8052 * cdb length. Will handle this case in 8053 * "case EINVAL". 8054 */ 8055 8056 /* 8057 * The following relies on 8058 * sd_send_scsi_READ_CAPACITY never 8059 * returning 0 for capacity and/or lbasize. 8060 */ 8061 sd_update_block_info(un, lbasize, capacity); 8062 8063 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8064 "sd_unit_attach: un:0x%p capacity = %ld " 8065 "blocks; lbasize= %ld.\n", un, 8066 un->un_blockcount, un->un_tgt_blocksize); 8067 8068 break; 8069 } 8070 case EINVAL: 8071 /* 8072 * In the case where the max-cdb-length property 8073 * is smaller than the required CDB length for 8074 * a SCSI device, a target driver can fail to 8075 * attach to that device. 8076 */ 8077 scsi_log(SD_DEVINFO(un), 8078 sd_label, CE_WARN, 8079 "disk capacity is too large " 8080 "for current cdb length"); 8081 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8082 8083 goto spinup_failed; 8084 case EACCES: 8085 /* 8086 * Should never get here if the spin-up 8087 * succeeded, but code it in anyway. 8088 * From here, just continue with the attach... 8089 */ 8090 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8091 "sd_unit_attach: un:0x%p " 8092 "sd_send_scsi_READ_CAPACITY " 8093 "returned reservation conflict\n", un); 8094 reservation_flag = SD_TARGET_IS_RESERVED; 8095 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8096 break; 8097 default: 8098 /* 8099 * Likewise, should never get here if the 8100 * spin-up succeeded. Just continue with 8101 * the attach... 8102 */ 8103 if (status == EIO) 8104 sd_ssc_assessment(ssc, 8105 SD_FMT_STATUS_CHECK); 8106 else 8107 sd_ssc_assessment(ssc, 8108 SD_FMT_IGNORE); 8109 break; 8110 } 8111 break; 8112 case EACCES: 8113 /* 8114 * Device is reserved by another host. In this case 8115 * we could not spin it up or read the capacity, but 8116 * we continue with the attach anyway. 8117 */ 8118 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8119 "sd_unit_attach: un:0x%p spin-up reservation " 8120 "conflict.\n", un); 8121 reservation_flag = SD_TARGET_IS_RESERVED; 8122 break; 8123 default: 8124 /* Fail the attach if the spin-up failed. */ 8125 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8126 "sd_unit_attach: un:0x%p spin-up failed.", un); 8127 goto spinup_failed; 8128 } 8129 8130 } 8131 8132 /* 8133 * Check to see if this is a MMC drive 8134 */ 8135 if (ISCD(un)) { 8136 sd_set_mmc_caps(ssc); 8137 } 8138 8139 /* 8140 * Add a zero-length attribute to tell the world we support 8141 * kernel ioctls (for layered drivers) 8142 */ 8143 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8144 DDI_KERNEL_IOCTL, NULL, 0); 8145 8146 /* 8147 * Add a boolean property to tell the world we support 8148 * the B_FAILFAST flag (for layered drivers) 8149 */ 8150 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8151 "ddi-failfast-supported", NULL, 0); 8152 8153 /* 8154 * Initialize power management 8155 */ 8156 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8157 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8158 sd_setup_pm(ssc, devi); 8159 if (un->un_f_pm_is_enabled == FALSE) { 8160 /* 8161 * For performance, point to a jump table that does 8162 * not include pm. 8163 * The direct and priority chains don't change with PM. 8164 * 8165 * Note: this is currently done based on individual device 8166 * capabilities. When an interface for determining system 8167 * power enabled state becomes available, or when additional 8168 * layers are added to the command chain, these values will 8169 * have to be re-evaluated for correctness. 8170 */ 8171 if (un->un_f_non_devbsize_supported) { 8172 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8173 } else { 8174 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8175 } 8176 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8177 } 8178 8179 /* 8180 * This property is set to 0 by HA software to avoid retries 8181 * on a reserved disk. (The preferred property name is 8182 * "retry-on-reservation-conflict") (1189689) 8183 * 8184 * Note: The use of a global here can have unintended consequences. A 8185 * per instance variable is preferable to match the capabilities of 8186 * different underlying hba's (4402600) 8187 */ 8188 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8189 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8190 sd_retry_on_reservation_conflict); 8191 if (sd_retry_on_reservation_conflict != 0) { 8192 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8193 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8194 sd_retry_on_reservation_conflict); 8195 } 8196 8197 /* Set up options for QFULL handling. */ 8198 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8199 "qfull-retries", -1)) != -1) { 8200 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8201 rval, 1); 8202 } 8203 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8204 "qfull-retry-interval", -1)) != -1) { 8205 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8206 rval, 1); 8207 } 8208 8209 /* 8210 * This just prints a message that announces the existence of the 8211 * device. The message is always printed in the system logfile, but 8212 * only appears on the console if the system is booted with the 8213 * -v (verbose) argument. 8214 */ 8215 ddi_report_dev(devi); 8216 8217 un->un_mediastate = DKIO_NONE; 8218 8219 /* 8220 * Check Block Device Characteristics VPD. 8221 */ 8222 sd_check_bdc_vpd(ssc); 8223 8224 /* 8225 * Check whether the drive is in emulation mode. 8226 */ 8227 sd_check_emulation_mode(ssc); 8228 8229 cmlb_alloc_handle(&un->un_cmlbhandle); 8230 8231 #if defined(__i386) || defined(__amd64) 8232 /* 8233 * On x86, compensate for off-by-1 legacy error 8234 */ 8235 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8236 (lbasize == un->un_sys_blocksize)) 8237 offbyone = CMLB_OFF_BY_ONE; 8238 #endif 8239 8240 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8241 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8242 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8243 un->un_node_type, offbyone, un->un_cmlbhandle, 8244 (void *)SD_PATH_DIRECT) != 0) { 8245 goto cmlb_attach_failed; 8246 } 8247 8248 8249 /* 8250 * Read and validate the device's geometry (ie, disk label) 8251 * A new unformatted drive will not have a valid geometry, but 8252 * the driver needs to successfully attach to this device so 8253 * the drive can be formatted via ioctls. 8254 */ 8255 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8256 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8257 8258 mutex_enter(SD_MUTEX(un)); 8259 8260 /* 8261 * Read and initialize the devid for the unit. 8262 */ 8263 if (un->un_f_devid_supported) { 8264 sd_register_devid(ssc, devi, reservation_flag); 8265 } 8266 mutex_exit(SD_MUTEX(un)); 8267 8268 #if (defined(__fibre)) 8269 /* 8270 * Register callbacks for fibre only. You can't do this solely 8271 * on the basis of the devid_type because this is hba specific. 8272 * We need to query our hba capabilities to find out whether to 8273 * register or not. 8274 */ 8275 if (un->un_f_is_fibre) { 8276 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8277 sd_init_event_callbacks(un); 8278 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8279 "sd_unit_attach: un:0x%p event callbacks inserted", 8280 un); 8281 } 8282 } 8283 #endif 8284 8285 if (un->un_f_opt_disable_cache == TRUE) { 8286 /* 8287 * Disable both read cache and write cache. This is 8288 * the historic behavior of the keywords in the config file. 8289 */ 8290 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8291 0) { 8292 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8293 "sd_unit_attach: un:0x%p Could not disable " 8294 "caching", un); 8295 goto devid_failed; 8296 } 8297 } 8298 8299 /* 8300 * Check the value of the WCE bit and if it's allowed to be changed, 8301 * set un_f_write_cache_enabled and un_f_cache_mode_changeable 8302 * accordingly. 8303 */ 8304 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8305 sd_get_write_cache_changeable(ssc, &wc_changeable); 8306 mutex_enter(SD_MUTEX(un)); 8307 un->un_f_write_cache_enabled = (wc_enabled != 0); 8308 un->un_f_cache_mode_changeable = (wc_changeable != 0); 8309 mutex_exit(SD_MUTEX(un)); 8310 8311 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8312 un->un_tgt_blocksize != DEV_BSIZE) || 8313 un->un_f_enable_rmw) { 8314 if (!(un->un_wm_cache)) { 8315 (void) snprintf(name_str, sizeof (name_str), 8316 "%s%d_cache", 8317 ddi_driver_name(SD_DEVINFO(un)), 8318 ddi_get_instance(SD_DEVINFO(un))); 8319 un->un_wm_cache = kmem_cache_create( 8320 name_str, sizeof (struct sd_w_map), 8321 8, sd_wm_cache_constructor, 8322 sd_wm_cache_destructor, NULL, 8323 (void *)un, NULL, 0); 8324 if (!(un->un_wm_cache)) { 8325 goto wm_cache_failed; 8326 } 8327 } 8328 } 8329 8330 /* 8331 * Check the value of the NV_SUP bit and set 8332 * un_f_suppress_cache_flush accordingly. 8333 */ 8334 sd_get_nv_sup(ssc); 8335 8336 /* 8337 * Find out what type of reservation this disk supports. 8338 */ 8339 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8340 8341 switch (status) { 8342 case 0: 8343 /* 8344 * SCSI-3 reservations are supported. 8345 */ 8346 un->un_reservation_type = SD_SCSI3_RESERVATION; 8347 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8348 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8349 break; 8350 case ENOTSUP: 8351 /* 8352 * The PERSISTENT RESERVE IN command would not be recognized by 8353 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8354 */ 8355 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8356 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8357 un->un_reservation_type = SD_SCSI2_RESERVATION; 8358 8359 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8360 break; 8361 default: 8362 /* 8363 * default to SCSI-3 reservations 8364 */ 8365 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8366 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8367 un->un_reservation_type = SD_SCSI3_RESERVATION; 8368 8369 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8370 break; 8371 } 8372 8373 /* 8374 * Set the pstat and error stat values here, so data obtained during the 8375 * previous attach-time routines is available. 8376 * 8377 * Note: This is a critical sequence that needs to be maintained: 8378 * 1) Instantiate the kstats before any routines using the iopath 8379 * (i.e. sd_send_scsi_cmd). 8380 * 2) Initialize the error stats (sd_set_errstats) and partition 8381 * stats (sd_set_pstats)here, following 8382 * cmlb_validate_geometry(), sd_register_devid(), and 8383 * sd_cache_control(). 8384 */ 8385 8386 if (un->un_f_pkstats_enabled && geom_label_valid) { 8387 sd_set_pstats(un); 8388 SD_TRACE(SD_LOG_IO_PARTITION, un, 8389 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8390 } 8391 8392 sd_set_errstats(un); 8393 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8394 "sd_unit_attach: un:0x%p errstats set\n", un); 8395 8396 8397 /* 8398 * After successfully attaching an instance, we record the information 8399 * of how many luns have been attached on the relative target and 8400 * controller for parallel SCSI. This information is used when sd tries 8401 * to set the tagged queuing capability in HBA. 8402 */ 8403 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8404 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8405 } 8406 8407 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8408 "sd_unit_attach: un:0x%p exit success\n", un); 8409 8410 /* Uninitialize sd_ssc_t pointer */ 8411 sd_ssc_fini(ssc); 8412 8413 return (DDI_SUCCESS); 8414 8415 /* 8416 * An error occurred during the attach; clean up & return failure. 8417 */ 8418 wm_cache_failed: 8419 devid_failed: 8420 ddi_remove_minor_node(devi, NULL); 8421 8422 cmlb_attach_failed: 8423 /* 8424 * Cleanup from the scsi_ifsetcap() calls (437868) 8425 */ 8426 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8427 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8428 8429 /* 8430 * Refer to the comments of setting tagged-qing in the beginning of 8431 * sd_unit_attach. We can only disable tagged queuing when there is 8432 * no lun attached on the target. 8433 */ 8434 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8435 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8436 } 8437 8438 if (un->un_f_is_fibre == FALSE) { 8439 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8440 } 8441 8442 spinup_failed: 8443 8444 /* Uninitialize sd_ssc_t pointer */ 8445 sd_ssc_fini(ssc); 8446 8447 mutex_enter(SD_MUTEX(un)); 8448 8449 /* Deallocate SCSI FMA memory spaces */ 8450 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8451 8452 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8453 if (un->un_direct_priority_timeid != NULL) { 8454 timeout_id_t temp_id = un->un_direct_priority_timeid; 8455 un->un_direct_priority_timeid = NULL; 8456 mutex_exit(SD_MUTEX(un)); 8457 (void) untimeout(temp_id); 8458 mutex_enter(SD_MUTEX(un)); 8459 } 8460 8461 /* Cancel any pending start/stop timeouts */ 8462 if (un->un_startstop_timeid != NULL) { 8463 timeout_id_t temp_id = un->un_startstop_timeid; 8464 un->un_startstop_timeid = NULL; 8465 mutex_exit(SD_MUTEX(un)); 8466 (void) untimeout(temp_id); 8467 mutex_enter(SD_MUTEX(un)); 8468 } 8469 8470 /* Cancel any pending reset-throttle timeouts */ 8471 if (un->un_reset_throttle_timeid != NULL) { 8472 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8473 un->un_reset_throttle_timeid = NULL; 8474 mutex_exit(SD_MUTEX(un)); 8475 (void) untimeout(temp_id); 8476 mutex_enter(SD_MUTEX(un)); 8477 } 8478 8479 /* Cancel rmw warning message timeouts */ 8480 if (un->un_rmw_msg_timeid != NULL) { 8481 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8482 un->un_rmw_msg_timeid = NULL; 8483 mutex_exit(SD_MUTEX(un)); 8484 (void) untimeout(temp_id); 8485 mutex_enter(SD_MUTEX(un)); 8486 } 8487 8488 /* Cancel any pending retry timeouts */ 8489 if (un->un_retry_timeid != NULL) { 8490 timeout_id_t temp_id = un->un_retry_timeid; 8491 un->un_retry_timeid = NULL; 8492 mutex_exit(SD_MUTEX(un)); 8493 (void) untimeout(temp_id); 8494 mutex_enter(SD_MUTEX(un)); 8495 } 8496 8497 /* Cancel any pending delayed cv broadcast timeouts */ 8498 if (un->un_dcvb_timeid != NULL) { 8499 timeout_id_t temp_id = un->un_dcvb_timeid; 8500 un->un_dcvb_timeid = NULL; 8501 mutex_exit(SD_MUTEX(un)); 8502 (void) untimeout(temp_id); 8503 mutex_enter(SD_MUTEX(un)); 8504 } 8505 8506 mutex_exit(SD_MUTEX(un)); 8507 8508 /* There should not be any in-progress I/O so ASSERT this check */ 8509 ASSERT(un->un_ncmds_in_transport == 0); 8510 ASSERT(un->un_ncmds_in_driver == 0); 8511 8512 /* Do not free the softstate if the callback routine is active */ 8513 sd_sync_with_callback(un); 8514 8515 /* 8516 * Partition stats apparently are not used with removables. These would 8517 * not have been created during attach, so no need to clean them up... 8518 */ 8519 if (un->un_errstats != NULL) { 8520 kstat_delete(un->un_errstats); 8521 un->un_errstats = NULL; 8522 } 8523 8524 create_errstats_failed: 8525 8526 if (un->un_stats != NULL) { 8527 kstat_delete(un->un_stats); 8528 un->un_stats = NULL; 8529 } 8530 8531 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8532 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8533 8534 ddi_prop_remove_all(devi); 8535 sema_destroy(&un->un_semoclose); 8536 cv_destroy(&un->un_state_cv); 8537 8538 sd_free_rqs(un); 8539 8540 alloc_rqs_failed: 8541 8542 devp->sd_private = NULL; 8543 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8544 8545 /* 8546 * Note: the man pages are unclear as to whether or not doing a 8547 * ddi_soft_state_free(sd_state, instance) is the right way to 8548 * clean up after the ddi_soft_state_zalloc() if the subsequent 8549 * ddi_get_soft_state() fails. The implication seems to be 8550 * that the get_soft_state cannot fail if the zalloc succeeds. 8551 */ 8552 #ifndef XPV_HVM_DRIVER 8553 ddi_soft_state_free(sd_state, instance); 8554 #endif /* !XPV_HVM_DRIVER */ 8555 8556 probe_failed: 8557 scsi_unprobe(devp); 8558 8559 return (DDI_FAILURE); 8560 } 8561 8562 8563 /* 8564 * Function: sd_unit_detach 8565 * 8566 * Description: Performs DDI_DETACH processing for sddetach(). 8567 * 8568 * Return Code: DDI_SUCCESS 8569 * DDI_FAILURE 8570 * 8571 * Context: Kernel thread context 8572 */ 8573 8574 static int 8575 sd_unit_detach(dev_info_t *devi) 8576 { 8577 struct scsi_device *devp; 8578 struct sd_lun *un; 8579 int i; 8580 int tgt; 8581 dev_t dev; 8582 dev_info_t *pdip = ddi_get_parent(devi); 8583 #ifndef XPV_HVM_DRIVER 8584 int instance = ddi_get_instance(devi); 8585 #endif /* !XPV_HVM_DRIVER */ 8586 8587 mutex_enter(&sd_detach_mutex); 8588 8589 /* 8590 * Fail the detach for any of the following: 8591 * - Unable to get the sd_lun struct for the instance 8592 * - A layered driver has an outstanding open on the instance 8593 * - Another thread is already detaching this instance 8594 * - Another thread is currently performing an open 8595 */ 8596 devp = ddi_get_driver_private(devi); 8597 if ((devp == NULL) || 8598 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8599 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8600 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8601 mutex_exit(&sd_detach_mutex); 8602 return (DDI_FAILURE); 8603 } 8604 8605 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8606 8607 /* 8608 * Mark this instance as currently in a detach, to inhibit any 8609 * opens from a layered driver. 8610 */ 8611 un->un_detach_count++; 8612 mutex_exit(&sd_detach_mutex); 8613 8614 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8615 SCSI_ADDR_PROP_TARGET, -1); 8616 8617 dev = sd_make_device(SD_DEVINFO(un)); 8618 8619 #ifndef lint 8620 _NOTE(COMPETING_THREADS_NOW); 8621 #endif 8622 8623 mutex_enter(SD_MUTEX(un)); 8624 8625 /* 8626 * Fail the detach if there are any outstanding layered 8627 * opens on this device. 8628 */ 8629 for (i = 0; i < NDKMAP; i++) { 8630 if (un->un_ocmap.lyropen[i] != 0) { 8631 goto err_notclosed; 8632 } 8633 } 8634 8635 /* 8636 * Verify there are NO outstanding commands issued to this device. 8637 * ie, un_ncmds_in_transport == 0. 8638 * It's possible to have outstanding commands through the physio 8639 * code path, even though everything's closed. 8640 */ 8641 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8642 (un->un_direct_priority_timeid != NULL) || 8643 (un->un_state == SD_STATE_RWAIT)) { 8644 mutex_exit(SD_MUTEX(un)); 8645 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8646 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8647 goto err_stillbusy; 8648 } 8649 8650 /* 8651 * If we have the device reserved, release the reservation. 8652 */ 8653 if ((un->un_resvd_status & SD_RESERVE) && 8654 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8655 mutex_exit(SD_MUTEX(un)); 8656 /* 8657 * Note: sd_reserve_release sends a command to the device 8658 * via the sd_ioctlcmd() path, and can sleep. 8659 */ 8660 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8661 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8662 "sd_dr_detach: Cannot release reservation \n"); 8663 } 8664 } else { 8665 mutex_exit(SD_MUTEX(un)); 8666 } 8667 8668 /* 8669 * Untimeout any reserve recover, throttle reset, restart unit 8670 * and delayed broadcast timeout threads. Protect the timeout pointer 8671 * from getting nulled by their callback functions. 8672 */ 8673 mutex_enter(SD_MUTEX(un)); 8674 if (un->un_resvd_timeid != NULL) { 8675 timeout_id_t temp_id = un->un_resvd_timeid; 8676 un->un_resvd_timeid = NULL; 8677 mutex_exit(SD_MUTEX(un)); 8678 (void) untimeout(temp_id); 8679 mutex_enter(SD_MUTEX(un)); 8680 } 8681 8682 if (un->un_reset_throttle_timeid != NULL) { 8683 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8684 un->un_reset_throttle_timeid = NULL; 8685 mutex_exit(SD_MUTEX(un)); 8686 (void) untimeout(temp_id); 8687 mutex_enter(SD_MUTEX(un)); 8688 } 8689 8690 if (un->un_startstop_timeid != NULL) { 8691 timeout_id_t temp_id = un->un_startstop_timeid; 8692 un->un_startstop_timeid = NULL; 8693 mutex_exit(SD_MUTEX(un)); 8694 (void) untimeout(temp_id); 8695 mutex_enter(SD_MUTEX(un)); 8696 } 8697 8698 if (un->un_rmw_msg_timeid != NULL) { 8699 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8700 un->un_rmw_msg_timeid = NULL; 8701 mutex_exit(SD_MUTEX(un)); 8702 (void) untimeout(temp_id); 8703 mutex_enter(SD_MUTEX(un)); 8704 } 8705 8706 if (un->un_dcvb_timeid != NULL) { 8707 timeout_id_t temp_id = un->un_dcvb_timeid; 8708 un->un_dcvb_timeid = NULL; 8709 mutex_exit(SD_MUTEX(un)); 8710 (void) untimeout(temp_id); 8711 } else { 8712 mutex_exit(SD_MUTEX(un)); 8713 } 8714 8715 /* Remove any pending reservation reclaim requests for this device */ 8716 sd_rmv_resv_reclaim_req(dev); 8717 8718 mutex_enter(SD_MUTEX(un)); 8719 8720 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8721 if (un->un_direct_priority_timeid != NULL) { 8722 timeout_id_t temp_id = un->un_direct_priority_timeid; 8723 un->un_direct_priority_timeid = NULL; 8724 mutex_exit(SD_MUTEX(un)); 8725 (void) untimeout(temp_id); 8726 mutex_enter(SD_MUTEX(un)); 8727 } 8728 8729 /* Cancel any active multi-host disk watch thread requests */ 8730 if (un->un_mhd_token != NULL) { 8731 mutex_exit(SD_MUTEX(un)); 8732 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8733 if (scsi_watch_request_terminate(un->un_mhd_token, 8734 SCSI_WATCH_TERMINATE_NOWAIT)) { 8735 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8736 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8737 /* 8738 * Note: We are returning here after having removed 8739 * some driver timeouts above. This is consistent with 8740 * the legacy implementation but perhaps the watch 8741 * terminate call should be made with the wait flag set. 8742 */ 8743 goto err_stillbusy; 8744 } 8745 mutex_enter(SD_MUTEX(un)); 8746 un->un_mhd_token = NULL; 8747 } 8748 8749 if (un->un_swr_token != NULL) { 8750 mutex_exit(SD_MUTEX(un)); 8751 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8752 if (scsi_watch_request_terminate(un->un_swr_token, 8753 SCSI_WATCH_TERMINATE_NOWAIT)) { 8754 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8755 "sd_dr_detach: Cannot cancel swr watch request\n"); 8756 /* 8757 * Note: We are returning here after having removed 8758 * some driver timeouts above. This is consistent with 8759 * the legacy implementation but perhaps the watch 8760 * terminate call should be made with the wait flag set. 8761 */ 8762 goto err_stillbusy; 8763 } 8764 mutex_enter(SD_MUTEX(un)); 8765 un->un_swr_token = NULL; 8766 } 8767 8768 mutex_exit(SD_MUTEX(un)); 8769 8770 /* 8771 * Clear any scsi_reset_notifies. We clear the reset notifies 8772 * if we have not registered one. 8773 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8774 */ 8775 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8776 sd_mhd_reset_notify_cb, (caddr_t)un); 8777 8778 /* 8779 * protect the timeout pointers from getting nulled by 8780 * their callback functions during the cancellation process. 8781 * In such a scenario untimeout can be invoked with a null value. 8782 */ 8783 _NOTE(NO_COMPETING_THREADS_NOW); 8784 8785 mutex_enter(&un->un_pm_mutex); 8786 if (un->un_pm_idle_timeid != NULL) { 8787 timeout_id_t temp_id = un->un_pm_idle_timeid; 8788 un->un_pm_idle_timeid = NULL; 8789 mutex_exit(&un->un_pm_mutex); 8790 8791 /* 8792 * Timeout is active; cancel it. 8793 * Note that it'll never be active on a device 8794 * that does not support PM therefore we don't 8795 * have to check before calling pm_idle_component. 8796 */ 8797 (void) untimeout(temp_id); 8798 (void) pm_idle_component(SD_DEVINFO(un), 0); 8799 mutex_enter(&un->un_pm_mutex); 8800 } 8801 8802 /* 8803 * Check whether there is already a timeout scheduled for power 8804 * management. If yes then don't lower the power here, that's. 8805 * the timeout handler's job. 8806 */ 8807 if (un->un_pm_timeid != NULL) { 8808 timeout_id_t temp_id = un->un_pm_timeid; 8809 un->un_pm_timeid = NULL; 8810 mutex_exit(&un->un_pm_mutex); 8811 /* 8812 * Timeout is active; cancel it. 8813 * Note that it'll never be active on a device 8814 * that does not support PM therefore we don't 8815 * have to check before calling pm_idle_component. 8816 */ 8817 (void) untimeout(temp_id); 8818 (void) pm_idle_component(SD_DEVINFO(un), 0); 8819 8820 } else { 8821 mutex_exit(&un->un_pm_mutex); 8822 if ((un->un_f_pm_is_enabled == TRUE) && 8823 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) 8824 != DDI_SUCCESS)) { 8825 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8826 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8827 /* 8828 * Fix for bug: 4297749, item # 13 8829 * The above test now includes a check to see if PM is 8830 * supported by this device before call 8831 * pm_lower_power(). 8832 * Note, the following is not dead code. The call to 8833 * pm_lower_power above will generate a call back into 8834 * our sdpower routine which might result in a timeout 8835 * handler getting activated. Therefore the following 8836 * code is valid and necessary. 8837 */ 8838 mutex_enter(&un->un_pm_mutex); 8839 if (un->un_pm_timeid != NULL) { 8840 timeout_id_t temp_id = un->un_pm_timeid; 8841 un->un_pm_timeid = NULL; 8842 mutex_exit(&un->un_pm_mutex); 8843 (void) untimeout(temp_id); 8844 (void) pm_idle_component(SD_DEVINFO(un), 0); 8845 } else { 8846 mutex_exit(&un->un_pm_mutex); 8847 } 8848 } 8849 } 8850 8851 /* 8852 * Cleanup from the scsi_ifsetcap() calls (437868) 8853 * Relocated here from above to be after the call to 8854 * pm_lower_power, which was getting errors. 8855 */ 8856 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8857 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8858 8859 /* 8860 * Currently, tagged queuing is supported per target based by HBA. 8861 * Setting this per lun instance actually sets the capability of this 8862 * target in HBA, which affects those luns already attached on the 8863 * same target. So during detach, we can only disable this capability 8864 * only when this is the only lun left on this target. By doing 8865 * this, we assume a target has the same tagged queuing capability 8866 * for every lun. The condition can be removed when HBA is changed to 8867 * support per lun based tagged queuing capability. 8868 */ 8869 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8870 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8871 } 8872 8873 if (un->un_f_is_fibre == FALSE) { 8874 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8875 } 8876 8877 /* 8878 * Remove any event callbacks, fibre only 8879 */ 8880 if (un->un_f_is_fibre == TRUE) { 8881 if ((un->un_insert_event != NULL) && 8882 (ddi_remove_event_handler(un->un_insert_cb_id) != 8883 DDI_SUCCESS)) { 8884 /* 8885 * Note: We are returning here after having done 8886 * substantial cleanup above. This is consistent 8887 * with the legacy implementation but this may not 8888 * be the right thing to do. 8889 */ 8890 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8891 "sd_dr_detach: Cannot cancel insert event\n"); 8892 goto err_remove_event; 8893 } 8894 un->un_insert_event = NULL; 8895 8896 if ((un->un_remove_event != NULL) && 8897 (ddi_remove_event_handler(un->un_remove_cb_id) != 8898 DDI_SUCCESS)) { 8899 /* 8900 * Note: We are returning here after having done 8901 * substantial cleanup above. This is consistent 8902 * with the legacy implementation but this may not 8903 * be the right thing to do. 8904 */ 8905 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8906 "sd_dr_detach: Cannot cancel remove event\n"); 8907 goto err_remove_event; 8908 } 8909 un->un_remove_event = NULL; 8910 } 8911 8912 /* Do not free the softstate if the callback routine is active */ 8913 sd_sync_with_callback(un); 8914 8915 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8916 cmlb_free_handle(&un->un_cmlbhandle); 8917 8918 /* 8919 * Hold the detach mutex here, to make sure that no other threads ever 8920 * can access a (partially) freed soft state structure. 8921 */ 8922 mutex_enter(&sd_detach_mutex); 8923 8924 /* 8925 * Clean up the soft state struct. 8926 * Cleanup is done in reverse order of allocs/inits. 8927 * At this point there should be no competing threads anymore. 8928 */ 8929 8930 scsi_fm_fini(devp); 8931 8932 /* 8933 * Deallocate memory for SCSI FMA. 8934 */ 8935 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8936 8937 /* 8938 * Unregister and free device id if it was not registered 8939 * by the transport. 8940 */ 8941 if (un->un_f_devid_transport_defined == FALSE) 8942 ddi_devid_unregister(devi); 8943 8944 /* 8945 * free the devid structure if allocated before (by ddi_devid_init() 8946 * or ddi_devid_get()). 8947 */ 8948 if (un->un_devid) { 8949 ddi_devid_free(un->un_devid); 8950 un->un_devid = NULL; 8951 } 8952 8953 /* 8954 * Destroy wmap cache if it exists. 8955 */ 8956 if (un->un_wm_cache != NULL) { 8957 kmem_cache_destroy(un->un_wm_cache); 8958 un->un_wm_cache = NULL; 8959 } 8960 8961 /* 8962 * kstat cleanup is done in detach for all device types (4363169). 8963 * We do not want to fail detach if the device kstats are not deleted 8964 * since there is a confusion about the devo_refcnt for the device. 8965 * We just delete the kstats and let detach complete successfully. 8966 */ 8967 if (un->un_stats != NULL) { 8968 kstat_delete(un->un_stats); 8969 un->un_stats = NULL; 8970 } 8971 if (un->un_errstats != NULL) { 8972 kstat_delete(un->un_errstats); 8973 un->un_errstats = NULL; 8974 } 8975 8976 /* Remove partition stats */ 8977 if (un->un_f_pkstats_enabled) { 8978 for (i = 0; i < NSDMAP; i++) { 8979 if (un->un_pstats[i] != NULL) { 8980 kstat_delete(un->un_pstats[i]); 8981 un->un_pstats[i] = NULL; 8982 } 8983 } 8984 } 8985 8986 /* Remove xbuf registration */ 8987 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8988 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8989 8990 /* Remove driver properties */ 8991 ddi_prop_remove_all(devi); 8992 8993 mutex_destroy(&un->un_pm_mutex); 8994 cv_destroy(&un->un_pm_busy_cv); 8995 8996 cv_destroy(&un->un_wcc_cv); 8997 8998 /* Open/close semaphore */ 8999 sema_destroy(&un->un_semoclose); 9000 9001 /* Removable media condvar. */ 9002 cv_destroy(&un->un_state_cv); 9003 9004 /* Suspend/resume condvar. */ 9005 cv_destroy(&un->un_suspend_cv); 9006 cv_destroy(&un->un_disk_busy_cv); 9007 9008 sd_free_rqs(un); 9009 9010 /* Free up soft state */ 9011 devp->sd_private = NULL; 9012 9013 bzero(un, sizeof (struct sd_lun)); 9014 #ifndef XPV_HVM_DRIVER 9015 ddi_soft_state_free(sd_state, instance); 9016 #endif /* !XPV_HVM_DRIVER */ 9017 9018 mutex_exit(&sd_detach_mutex); 9019 9020 /* This frees up the INQUIRY data associated with the device. */ 9021 scsi_unprobe(devp); 9022 9023 /* 9024 * After successfully detaching an instance, we update the information 9025 * of how many luns have been attached in the relative target and 9026 * controller for parallel SCSI. This information is used when sd tries 9027 * to set the tagged queuing capability in HBA. 9028 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 9029 * check if the device is parallel SCSI. However, we don't need to 9030 * check here because we've already checked during attach. No device 9031 * that is not parallel SCSI is in the chain. 9032 */ 9033 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 9034 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 9035 } 9036 9037 return (DDI_SUCCESS); 9038 9039 err_notclosed: 9040 mutex_exit(SD_MUTEX(un)); 9041 9042 err_stillbusy: 9043 _NOTE(NO_COMPETING_THREADS_NOW); 9044 9045 err_remove_event: 9046 mutex_enter(&sd_detach_mutex); 9047 un->un_detach_count--; 9048 mutex_exit(&sd_detach_mutex); 9049 9050 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9051 return (DDI_FAILURE); 9052 } 9053 9054 9055 /* 9056 * Function: sd_create_errstats 9057 * 9058 * Description: This routine instantiates the device error stats. 9059 * 9060 * Note: During attach the stats are instantiated first so they are 9061 * available for attach-time routines that utilize the driver 9062 * iopath to send commands to the device. The stats are initialized 9063 * separately so data obtained during some attach-time routines is 9064 * available. (4362483) 9065 * 9066 * Arguments: un - driver soft state (unit) structure 9067 * instance - driver instance 9068 * 9069 * Context: Kernel thread context 9070 */ 9071 9072 static void 9073 sd_create_errstats(struct sd_lun *un, int instance) 9074 { 9075 struct sd_errstats *stp; 9076 char kstatmodule_err[KSTAT_STRLEN]; 9077 char kstatname[KSTAT_STRLEN]; 9078 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9079 9080 ASSERT(un != NULL); 9081 9082 if (un->un_errstats != NULL) { 9083 return; 9084 } 9085 9086 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9087 "%serr", sd_label); 9088 (void) snprintf(kstatname, sizeof (kstatname), 9089 "%s%d,err", sd_label, instance); 9090 9091 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9092 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9093 9094 if (un->un_errstats == NULL) { 9095 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9096 "sd_create_errstats: Failed kstat_create\n"); 9097 return; 9098 } 9099 9100 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9101 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9102 KSTAT_DATA_UINT32); 9103 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9104 KSTAT_DATA_UINT32); 9105 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9106 KSTAT_DATA_UINT32); 9107 kstat_named_init(&stp->sd_vid, "Vendor", 9108 KSTAT_DATA_CHAR); 9109 kstat_named_init(&stp->sd_pid, "Product", 9110 KSTAT_DATA_CHAR); 9111 kstat_named_init(&stp->sd_revision, "Revision", 9112 KSTAT_DATA_CHAR); 9113 kstat_named_init(&stp->sd_serial, "Serial No", 9114 KSTAT_DATA_CHAR); 9115 kstat_named_init(&stp->sd_capacity, "Size", 9116 KSTAT_DATA_ULONGLONG); 9117 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9118 KSTAT_DATA_UINT32); 9119 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9120 KSTAT_DATA_UINT32); 9121 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9122 KSTAT_DATA_UINT32); 9123 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9124 KSTAT_DATA_UINT32); 9125 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9126 KSTAT_DATA_UINT32); 9127 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9128 KSTAT_DATA_UINT32); 9129 9130 un->un_errstats->ks_private = un; 9131 un->un_errstats->ks_update = nulldev; 9132 9133 kstat_install(un->un_errstats); 9134 } 9135 9136 9137 /* 9138 * Function: sd_set_errstats 9139 * 9140 * Description: This routine sets the value of the vendor id, product id, 9141 * revision, serial number, and capacity device error stats. 9142 * 9143 * Note: During attach the stats are instantiated first so they are 9144 * available for attach-time routines that utilize the driver 9145 * iopath to send commands to the device. The stats are initialized 9146 * separately so data obtained during some attach-time routines is 9147 * available. (4362483) 9148 * 9149 * Arguments: un - driver soft state (unit) structure 9150 * 9151 * Context: Kernel thread context 9152 */ 9153 9154 static void 9155 sd_set_errstats(struct sd_lun *un) 9156 { 9157 struct sd_errstats *stp; 9158 char *sn; 9159 9160 ASSERT(un != NULL); 9161 ASSERT(un->un_errstats != NULL); 9162 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9163 ASSERT(stp != NULL); 9164 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9165 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9166 (void) strncpy(stp->sd_revision.value.c, 9167 un->un_sd->sd_inq->inq_revision, 4); 9168 9169 /* 9170 * All the errstats are persistent across detach/attach, 9171 * so reset all the errstats here in case of the hot 9172 * replacement of disk drives, except for not changed 9173 * Sun qualified drives. 9174 */ 9175 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 9176 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9177 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 9178 stp->sd_softerrs.value.ui32 = 0; 9179 stp->sd_harderrs.value.ui32 = 0; 9180 stp->sd_transerrs.value.ui32 = 0; 9181 stp->sd_rq_media_err.value.ui32 = 0; 9182 stp->sd_rq_ntrdy_err.value.ui32 = 0; 9183 stp->sd_rq_nodev_err.value.ui32 = 0; 9184 stp->sd_rq_recov_err.value.ui32 = 0; 9185 stp->sd_rq_illrq_err.value.ui32 = 0; 9186 stp->sd_rq_pfa_err.value.ui32 = 0; 9187 } 9188 9189 /* 9190 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9191 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9192 * (4376302)) 9193 */ 9194 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9195 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9196 sizeof (SD_INQUIRY(un)->inq_serial)); 9197 } else { 9198 /* 9199 * Set the "Serial No" kstat for non-Sun qualified drives 9200 */ 9201 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un), 9202 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 9203 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) { 9204 (void) strlcpy(stp->sd_serial.value.c, sn, 9205 sizeof (stp->sd_serial.value.c)); 9206 ddi_prop_free(sn); 9207 } 9208 } 9209 9210 if (un->un_f_blockcount_is_valid != TRUE) { 9211 /* 9212 * Set capacity error stat to 0 for no media. This ensures 9213 * a valid capacity is displayed in response to 'iostat -E' 9214 * when no media is present in the device. 9215 */ 9216 stp->sd_capacity.value.ui64 = 0; 9217 } else { 9218 /* 9219 * Multiply un_blockcount by un->un_sys_blocksize to get 9220 * capacity. 9221 * 9222 * Note: for non-512 blocksize devices "un_blockcount" has been 9223 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9224 * (un_tgt_blocksize / un->un_sys_blocksize). 9225 */ 9226 stp->sd_capacity.value.ui64 = (uint64_t) 9227 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9228 } 9229 } 9230 9231 9232 /* 9233 * Function: sd_set_pstats 9234 * 9235 * Description: This routine instantiates and initializes the partition 9236 * stats for each partition with more than zero blocks. 9237 * (4363169) 9238 * 9239 * Arguments: un - driver soft state (unit) structure 9240 * 9241 * Context: Kernel thread context 9242 */ 9243 9244 static void 9245 sd_set_pstats(struct sd_lun *un) 9246 { 9247 char kstatname[KSTAT_STRLEN]; 9248 int instance; 9249 int i; 9250 diskaddr_t nblks = 0; 9251 char *partname = NULL; 9252 9253 ASSERT(un != NULL); 9254 9255 instance = ddi_get_instance(SD_DEVINFO(un)); 9256 9257 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9258 for (i = 0; i < NSDMAP; i++) { 9259 9260 if (cmlb_partinfo(un->un_cmlbhandle, i, 9261 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9262 continue; 9263 mutex_enter(SD_MUTEX(un)); 9264 9265 if ((un->un_pstats[i] == NULL) && 9266 (nblks != 0)) { 9267 9268 (void) snprintf(kstatname, sizeof (kstatname), 9269 "%s%d,%s", sd_label, instance, 9270 partname); 9271 9272 un->un_pstats[i] = kstat_create(sd_label, 9273 instance, kstatname, "partition", KSTAT_TYPE_IO, 9274 1, KSTAT_FLAG_PERSISTENT); 9275 if (un->un_pstats[i] != NULL) { 9276 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9277 kstat_install(un->un_pstats[i]); 9278 } 9279 } 9280 mutex_exit(SD_MUTEX(un)); 9281 } 9282 } 9283 9284 9285 #if (defined(__fibre)) 9286 /* 9287 * Function: sd_init_event_callbacks 9288 * 9289 * Description: This routine initializes the insertion and removal event 9290 * callbacks. (fibre only) 9291 * 9292 * Arguments: un - driver soft state (unit) structure 9293 * 9294 * Context: Kernel thread context 9295 */ 9296 9297 static void 9298 sd_init_event_callbacks(struct sd_lun *un) 9299 { 9300 ASSERT(un != NULL); 9301 9302 if ((un->un_insert_event == NULL) && 9303 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9304 &un->un_insert_event) == DDI_SUCCESS)) { 9305 /* 9306 * Add the callback for an insertion event 9307 */ 9308 (void) ddi_add_event_handler(SD_DEVINFO(un), 9309 un->un_insert_event, sd_event_callback, (void *)un, 9310 &(un->un_insert_cb_id)); 9311 } 9312 9313 if ((un->un_remove_event == NULL) && 9314 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9315 &un->un_remove_event) == DDI_SUCCESS)) { 9316 /* 9317 * Add the callback for a removal event 9318 */ 9319 (void) ddi_add_event_handler(SD_DEVINFO(un), 9320 un->un_remove_event, sd_event_callback, (void *)un, 9321 &(un->un_remove_cb_id)); 9322 } 9323 } 9324 9325 9326 /* 9327 * Function: sd_event_callback 9328 * 9329 * Description: This routine handles insert/remove events (photon). The 9330 * state is changed to OFFLINE which can be used to supress 9331 * error msgs. (fibre only) 9332 * 9333 * Arguments: un - driver soft state (unit) structure 9334 * 9335 * Context: Callout thread context 9336 */ 9337 /* ARGSUSED */ 9338 static void 9339 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9340 void *bus_impldata) 9341 { 9342 struct sd_lun *un = (struct sd_lun *)arg; 9343 9344 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9345 if (event == un->un_insert_event) { 9346 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9347 mutex_enter(SD_MUTEX(un)); 9348 if (un->un_state == SD_STATE_OFFLINE) { 9349 if (un->un_last_state != SD_STATE_SUSPENDED) { 9350 un->un_state = un->un_last_state; 9351 } else { 9352 /* 9353 * We have gone through SUSPEND/RESUME while 9354 * we were offline. Restore the last state 9355 */ 9356 un->un_state = un->un_save_state; 9357 } 9358 } 9359 mutex_exit(SD_MUTEX(un)); 9360 9361 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9362 } else if (event == un->un_remove_event) { 9363 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9364 mutex_enter(SD_MUTEX(un)); 9365 /* 9366 * We need to handle an event callback that occurs during 9367 * the suspend operation, since we don't prevent it. 9368 */ 9369 if (un->un_state != SD_STATE_OFFLINE) { 9370 if (un->un_state != SD_STATE_SUSPENDED) { 9371 New_state(un, SD_STATE_OFFLINE); 9372 } else { 9373 un->un_last_state = SD_STATE_OFFLINE; 9374 } 9375 } 9376 mutex_exit(SD_MUTEX(un)); 9377 } else { 9378 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9379 "!Unknown event\n"); 9380 } 9381 9382 } 9383 #endif 9384 9385 /* 9386 * Values related to caching mode page depending on whether the unit is ATAPI. 9387 */ 9388 #define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ 9389 CDB_GROUP1 : CDB_GROUP0) 9390 #define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ 9391 MODE_HEADER_LENGTH_GRP2 : MODE_HEADER_LENGTH) 9392 /* 9393 * Use mode_cache_scsi3 to ensure we get all of the mode sense data, otherwise 9394 * the mode select will fail (mode_cache_scsi3 is a superset of mode_caching). 9395 */ 9396 #define SDC_BUFLEN(un) (SDC_HDRLEN(un) + MODE_BLK_DESC_LENGTH + \ 9397 sizeof (struct mode_cache_scsi3)) 9398 9399 static int 9400 sd_get_caching_mode_page(sd_ssc_t *ssc, uchar_t page_control, uchar_t **header, 9401 int *bdlen) 9402 { 9403 struct sd_lun *un = ssc->ssc_un; 9404 struct mode_caching *mode_caching_page; 9405 size_t buflen = SDC_BUFLEN(un); 9406 int hdrlen = SDC_HDRLEN(un); 9407 int rval; 9408 9409 /* 9410 * Do a test unit ready, otherwise a mode sense may not work if this 9411 * is the first command sent to the device after boot. 9412 */ 9413 if (sd_send_scsi_TEST_UNIT_READY(ssc, 0) != 0) 9414 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9415 9416 /* 9417 * Allocate memory for the retrieved mode page and its headers. Set 9418 * a pointer to the page itself. 9419 */ 9420 *header = kmem_zalloc(buflen, KM_SLEEP); 9421 9422 /* Get the information from the device */ 9423 rval = sd_send_scsi_MODE_SENSE(ssc, SDC_CDB_GROUP(un), *header, buflen, 9424 page_control | MODEPAGE_CACHING, SD_PATH_DIRECT); 9425 if (rval != 0) { 9426 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, "%s: Mode Sense Failed\n", 9427 __func__); 9428 goto mode_sense_failed; 9429 } 9430 9431 /* 9432 * Determine size of Block Descriptors in order to locate 9433 * the mode page data. ATAPI devices return 0, SCSI devices 9434 * should return MODE_BLK_DESC_LENGTH. 9435 */ 9436 if (un->un_f_cfg_is_atapi == TRUE) { 9437 struct mode_header_grp2 *mhp = 9438 (struct mode_header_grp2 *)(*header); 9439 *bdlen = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9440 } else { 9441 *bdlen = ((struct mode_header *)(*header))->bdesc_length; 9442 } 9443 9444 if (*bdlen > MODE_BLK_DESC_LENGTH) { 9445 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9446 "%s: Mode Sense returned invalid block descriptor length\n", 9447 __func__); 9448 rval = EIO; 9449 goto mode_sense_failed; 9450 } 9451 9452 mode_caching_page = (struct mode_caching *)(*header + hdrlen + *bdlen); 9453 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9454 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9455 "%s: Mode Sense caching page code mismatch %d\n", 9456 __func__, mode_caching_page->mode_page.code); 9457 rval = EIO; 9458 } 9459 9460 mode_sense_failed: 9461 if (rval != 0) { 9462 kmem_free(*header, buflen); 9463 *header = NULL; 9464 *bdlen = 0; 9465 } 9466 return (rval); 9467 } 9468 9469 /* 9470 * Function: sd_cache_control() 9471 * 9472 * Description: This routine is the driver entry point for setting 9473 * read and write caching by modifying the WCE (write cache 9474 * enable) and RCD (read cache disable) bits of mode 9475 * page 8 (MODEPAGE_CACHING). 9476 * 9477 * Arguments: ssc - ssc contains pointer to driver soft state 9478 * (unit) structure for this target. 9479 * rcd_flag - flag for controlling the read cache 9480 * wce_flag - flag for controlling the write cache 9481 * 9482 * Return Code: EIO 9483 * code returned by sd_send_scsi_MODE_SENSE and 9484 * sd_send_scsi_MODE_SELECT 9485 * 9486 * Context: Kernel Thread 9487 */ 9488 9489 static int 9490 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9491 { 9492 struct sd_lun *un = ssc->ssc_un; 9493 struct mode_caching *mode_caching_page; 9494 uchar_t *header; 9495 size_t buflen = SDC_BUFLEN(un); 9496 int hdrlen = SDC_HDRLEN(un); 9497 int bdlen; 9498 int rval; 9499 9500 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen); 9501 switch (rval) { 9502 case 0: 9503 /* Check the relevant bits on successful mode sense */ 9504 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9505 bdlen); 9506 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9507 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9508 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9509 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9510 size_t sbuflen; 9511 uchar_t save_pg; 9512 9513 /* 9514 * Construct select buffer length based on the 9515 * length of the sense data returned. 9516 */ 9517 sbuflen = hdrlen + bdlen + sizeof (struct mode_page) + 9518 (int)mode_caching_page->mode_page.length; 9519 9520 /* Set the caching bits as requested */ 9521 if (rcd_flag == SD_CACHE_ENABLE) 9522 mode_caching_page->rcd = 0; 9523 else if (rcd_flag == SD_CACHE_DISABLE) 9524 mode_caching_page->rcd = 1; 9525 9526 if (wce_flag == SD_CACHE_ENABLE) 9527 mode_caching_page->wce = 1; 9528 else if (wce_flag == SD_CACHE_DISABLE) 9529 mode_caching_page->wce = 0; 9530 9531 /* 9532 * Save the page if the mode sense says the 9533 * drive supports it. 9534 */ 9535 save_pg = mode_caching_page->mode_page.ps ? 9536 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9537 9538 /* Clear reserved bits before mode select */ 9539 mode_caching_page->mode_page.ps = 0; 9540 9541 /* 9542 * Clear out mode header for mode select. 9543 * The rest of the retrieved page will be reused. 9544 */ 9545 bzero(header, hdrlen); 9546 9547 if (un->un_f_cfg_is_atapi == TRUE) { 9548 struct mode_header_grp2 *mhp = 9549 (struct mode_header_grp2 *)header; 9550 mhp->bdesc_length_hi = bdlen >> 8; 9551 mhp->bdesc_length_lo = (uchar_t)bdlen & 0xff; 9552 } else { 9553 ((struct mode_header *)header)->bdesc_length = 9554 bdlen; 9555 } 9556 9557 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9558 9559 /* Issue mode select to change the cache settings */ 9560 rval = sd_send_scsi_MODE_SELECT(ssc, SDC_CDB_GROUP(un), 9561 header, sbuflen, save_pg, SD_PATH_DIRECT); 9562 } 9563 kmem_free(header, buflen); 9564 break; 9565 case EIO: 9566 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9567 break; 9568 default: 9569 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9570 break; 9571 } 9572 9573 return (rval); 9574 } 9575 9576 9577 /* 9578 * Function: sd_get_write_cache_enabled() 9579 * 9580 * Description: This routine is the driver entry point for determining if write 9581 * caching is enabled. It examines the WCE (write cache enable) 9582 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field 9583 * bits set to MODEPAGE_CURRENT. 9584 * 9585 * Arguments: ssc - ssc contains pointer to driver soft state 9586 * (unit) structure for this target. 9587 * is_enabled - pointer to int where write cache enabled state 9588 * is returned (non-zero -> write cache enabled) 9589 * 9590 * Return Code: EIO 9591 * code returned by sd_send_scsi_MODE_SENSE 9592 * 9593 * Context: Kernel Thread 9594 * 9595 * NOTE: If ioctl is added to disable write cache, this sequence should 9596 * be followed so that no locking is required for accesses to 9597 * un->un_f_write_cache_enabled: 9598 * do mode select to clear wce 9599 * do synchronize cache to flush cache 9600 * set un->un_f_write_cache_enabled = FALSE 9601 * 9602 * Conversely, an ioctl to enable the write cache should be done 9603 * in this order: 9604 * set un->un_f_write_cache_enabled = TRUE 9605 * do mode select to set wce 9606 */ 9607 9608 static int 9609 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9610 { 9611 struct sd_lun *un = ssc->ssc_un; 9612 struct mode_caching *mode_caching_page; 9613 uchar_t *header; 9614 size_t buflen = SDC_BUFLEN(un); 9615 int hdrlen = SDC_HDRLEN(un); 9616 int bdlen; 9617 int rval; 9618 9619 /* In case of error, flag as enabled */ 9620 *is_enabled = TRUE; 9621 9622 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen); 9623 switch (rval) { 9624 case 0: 9625 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9626 bdlen); 9627 *is_enabled = mode_caching_page->wce; 9628 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9629 kmem_free(header, buflen); 9630 break; 9631 case EIO: { 9632 /* 9633 * Some disks do not support Mode Sense(6), we 9634 * should ignore this kind of error (sense key is 9635 * 0x5 - illegal request). 9636 */ 9637 uint8_t *sensep; 9638 int senlen; 9639 9640 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9641 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9642 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9643 9644 if (senlen > 0 && 9645 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9646 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9647 } else { 9648 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9649 } 9650 break; 9651 } 9652 default: 9653 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9654 break; 9655 } 9656 9657 return (rval); 9658 } 9659 9660 /* 9661 * Function: sd_get_write_cache_changeable() 9662 * 9663 * Description: This routine is the driver entry point for determining if write 9664 * caching is changeable. It examines the WCE (write cache enable) 9665 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field 9666 * bits set to MODEPAGE_CHANGEABLE. 9667 * 9668 * Arguments: ssc - ssc contains pointer to driver soft state 9669 * (unit) structure for this target. 9670 * is_changeable - pointer to int where write cache changeable 9671 * state is returned (non-zero -> write cache 9672 * changeable) 9673 * 9674 * Context: Kernel Thread 9675 */ 9676 9677 static void 9678 sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable) 9679 { 9680 struct sd_lun *un = ssc->ssc_un; 9681 struct mode_caching *mode_caching_page; 9682 uchar_t *header; 9683 size_t buflen = SDC_BUFLEN(un); 9684 int hdrlen = SDC_HDRLEN(un); 9685 int bdlen; 9686 int rval; 9687 9688 /* In case of error, flag as enabled */ 9689 *is_changeable = TRUE; 9690 9691 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CHANGEABLE, &header, 9692 &bdlen); 9693 switch (rval) { 9694 case 0: 9695 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9696 bdlen); 9697 *is_changeable = mode_caching_page->wce; 9698 kmem_free(header, buflen); 9699 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9700 break; 9701 case EIO: 9702 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9703 break; 9704 default: 9705 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9706 break; 9707 } 9708 } 9709 9710 /* 9711 * Function: sd_get_nv_sup() 9712 * 9713 * Description: This routine is the driver entry point for 9714 * determining whether non-volatile cache is supported. This 9715 * determination process works as follows: 9716 * 9717 * 1. sd first queries sd.conf on whether 9718 * suppress_cache_flush bit is set for this device. 9719 * 9720 * 2. if not there, then queries the internal disk table. 9721 * 9722 * 3. if either sd.conf or internal disk table specifies 9723 * cache flush be suppressed, we don't bother checking 9724 * NV_SUP bit. 9725 * 9726 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9727 * the optional INQUIRY VPD page 0x86. If the device 9728 * supports VPD page 0x86, sd examines the NV_SUP 9729 * (non-volatile cache support) bit in the INQUIRY VPD page 9730 * 0x86: 9731 * o If NV_SUP bit is set, sd assumes the device has a 9732 * non-volatile cache and set the 9733 * un_f_sync_nv_supported to TRUE. 9734 * o Otherwise cache is not non-volatile, 9735 * un_f_sync_nv_supported is set to FALSE. 9736 * 9737 * Arguments: un - driver soft state (unit) structure 9738 * 9739 * Return Code: 9740 * 9741 * Context: Kernel Thread 9742 */ 9743 9744 static void 9745 sd_get_nv_sup(sd_ssc_t *ssc) 9746 { 9747 int rval = 0; 9748 uchar_t *inq86 = NULL; 9749 size_t inq86_len = MAX_INQUIRY_SIZE; 9750 size_t inq86_resid = 0; 9751 struct dk_callback *dkc; 9752 struct sd_lun *un; 9753 9754 ASSERT(ssc != NULL); 9755 un = ssc->ssc_un; 9756 ASSERT(un != NULL); 9757 9758 mutex_enter(SD_MUTEX(un)); 9759 9760 /* 9761 * Be conservative on the device's support of 9762 * SYNC_NV bit: un_f_sync_nv_supported is 9763 * initialized to be false. 9764 */ 9765 un->un_f_sync_nv_supported = FALSE; 9766 9767 /* 9768 * If either sd.conf or internal disk table 9769 * specifies cache flush be suppressed, then 9770 * we don't bother checking NV_SUP bit. 9771 */ 9772 if (un->un_f_suppress_cache_flush == TRUE) { 9773 mutex_exit(SD_MUTEX(un)); 9774 return; 9775 } 9776 9777 if (sd_check_vpd_page_support(ssc) == 0 && 9778 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9779 mutex_exit(SD_MUTEX(un)); 9780 /* collect page 86 data if available */ 9781 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9782 9783 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9784 0x01, 0x86, &inq86_resid); 9785 9786 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9787 SD_TRACE(SD_LOG_COMMON, un, 9788 "sd_get_nv_sup: \ 9789 successfully get VPD page: %x \ 9790 PAGE LENGTH: %x BYTE 6: %x\n", 9791 inq86[1], inq86[3], inq86[6]); 9792 9793 mutex_enter(SD_MUTEX(un)); 9794 /* 9795 * check the value of NV_SUP bit: only if the device 9796 * reports NV_SUP bit to be 1, the 9797 * un_f_sync_nv_supported bit will be set to true. 9798 */ 9799 if (inq86[6] & SD_VPD_NV_SUP) { 9800 un->un_f_sync_nv_supported = TRUE; 9801 } 9802 mutex_exit(SD_MUTEX(un)); 9803 } else if (rval != 0) { 9804 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9805 } 9806 9807 kmem_free(inq86, inq86_len); 9808 } else { 9809 mutex_exit(SD_MUTEX(un)); 9810 } 9811 9812 /* 9813 * Send a SYNC CACHE command to check whether 9814 * SYNC_NV bit is supported. This command should have 9815 * un_f_sync_nv_supported set to correct value. 9816 */ 9817 mutex_enter(SD_MUTEX(un)); 9818 if (un->un_f_sync_nv_supported) { 9819 mutex_exit(SD_MUTEX(un)); 9820 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9821 dkc->dkc_flag = FLUSH_VOLATILE; 9822 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9823 9824 /* 9825 * Send a TEST UNIT READY command to the device. This should 9826 * clear any outstanding UNIT ATTENTION that may be present. 9827 */ 9828 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9829 if (rval != 0) 9830 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9831 9832 kmem_free(dkc, sizeof (struct dk_callback)); 9833 } else { 9834 mutex_exit(SD_MUTEX(un)); 9835 } 9836 9837 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9838 un_f_suppress_cache_flush is set to %d\n", 9839 un->un_f_suppress_cache_flush); 9840 } 9841 9842 /* 9843 * Function: sd_make_device 9844 * 9845 * Description: Utility routine to return the Solaris device number from 9846 * the data in the device's dev_info structure. 9847 * 9848 * Return Code: The Solaris device number 9849 * 9850 * Context: Any 9851 */ 9852 9853 static dev_t 9854 sd_make_device(dev_info_t *devi) 9855 { 9856 return (makedevice(ddi_driver_major(devi), 9857 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9858 } 9859 9860 9861 /* 9862 * Function: sd_pm_entry 9863 * 9864 * Description: Called at the start of a new command to manage power 9865 * and busy status of a device. This includes determining whether 9866 * the current power state of the device is sufficient for 9867 * performing the command or whether it must be changed. 9868 * The PM framework is notified appropriately. 9869 * Only with a return status of DDI_SUCCESS will the 9870 * component be busy to the framework. 9871 * 9872 * All callers of sd_pm_entry must check the return status 9873 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9874 * of DDI_FAILURE indicates the device failed to power up. 9875 * In this case un_pm_count has been adjusted so the result 9876 * on exit is still powered down, ie. count is less than 0. 9877 * Calling sd_pm_exit with this count value hits an ASSERT. 9878 * 9879 * Return Code: DDI_SUCCESS or DDI_FAILURE 9880 * 9881 * Context: Kernel thread context. 9882 */ 9883 9884 static int 9885 sd_pm_entry(struct sd_lun *un) 9886 { 9887 int return_status = DDI_SUCCESS; 9888 9889 ASSERT(!mutex_owned(SD_MUTEX(un))); 9890 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9891 9892 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9893 9894 if (un->un_f_pm_is_enabled == FALSE) { 9895 SD_TRACE(SD_LOG_IO_PM, un, 9896 "sd_pm_entry: exiting, PM not enabled\n"); 9897 return (return_status); 9898 } 9899 9900 /* 9901 * Just increment a counter if PM is enabled. On the transition from 9902 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9903 * the count with each IO and mark the device as idle when the count 9904 * hits 0. 9905 * 9906 * If the count is less than 0 the device is powered down. If a powered 9907 * down device is successfully powered up then the count must be 9908 * incremented to reflect the power up. Note that it'll get incremented 9909 * a second time to become busy. 9910 * 9911 * Because the following has the potential to change the device state 9912 * and must release the un_pm_mutex to do so, only one thread can be 9913 * allowed through at a time. 9914 */ 9915 9916 mutex_enter(&un->un_pm_mutex); 9917 while (un->un_pm_busy == TRUE) { 9918 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9919 } 9920 un->un_pm_busy = TRUE; 9921 9922 if (un->un_pm_count < 1) { 9923 9924 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9925 9926 /* 9927 * Indicate we are now busy so the framework won't attempt to 9928 * power down the device. This call will only fail if either 9929 * we passed a bad component number or the device has no 9930 * components. Neither of these should ever happen. 9931 */ 9932 mutex_exit(&un->un_pm_mutex); 9933 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9934 ASSERT(return_status == DDI_SUCCESS); 9935 9936 mutex_enter(&un->un_pm_mutex); 9937 9938 if (un->un_pm_count < 0) { 9939 mutex_exit(&un->un_pm_mutex); 9940 9941 SD_TRACE(SD_LOG_IO_PM, un, 9942 "sd_pm_entry: power up component\n"); 9943 9944 /* 9945 * pm_raise_power will cause sdpower to be called 9946 * which brings the device power level to the 9947 * desired state, If successful, un_pm_count and 9948 * un_power_level will be updated appropriately. 9949 */ 9950 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9951 SD_PM_STATE_ACTIVE(un)); 9952 9953 mutex_enter(&un->un_pm_mutex); 9954 9955 if (return_status != DDI_SUCCESS) { 9956 /* 9957 * Power up failed. 9958 * Idle the device and adjust the count 9959 * so the result on exit is that we're 9960 * still powered down, ie. count is less than 0. 9961 */ 9962 SD_TRACE(SD_LOG_IO_PM, un, 9963 "sd_pm_entry: power up failed," 9964 " idle the component\n"); 9965 9966 (void) pm_idle_component(SD_DEVINFO(un), 0); 9967 un->un_pm_count--; 9968 } else { 9969 /* 9970 * Device is powered up, verify the 9971 * count is non-negative. 9972 * This is debug only. 9973 */ 9974 ASSERT(un->un_pm_count == 0); 9975 } 9976 } 9977 9978 if (return_status == DDI_SUCCESS) { 9979 /* 9980 * For performance, now that the device has been tagged 9981 * as busy, and it's known to be powered up, update the 9982 * chain types to use jump tables that do not include 9983 * pm. This significantly lowers the overhead and 9984 * therefore improves performance. 9985 */ 9986 9987 mutex_exit(&un->un_pm_mutex); 9988 mutex_enter(SD_MUTEX(un)); 9989 SD_TRACE(SD_LOG_IO_PM, un, 9990 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9991 un->un_uscsi_chain_type); 9992 9993 if (un->un_f_non_devbsize_supported) { 9994 un->un_buf_chain_type = 9995 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9996 } else { 9997 un->un_buf_chain_type = 9998 SD_CHAIN_INFO_DISK_NO_PM; 9999 } 10000 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 10001 10002 SD_TRACE(SD_LOG_IO_PM, un, 10003 " changed uscsi_chain_type to %d\n", 10004 un->un_uscsi_chain_type); 10005 mutex_exit(SD_MUTEX(un)); 10006 mutex_enter(&un->un_pm_mutex); 10007 10008 if (un->un_pm_idle_timeid == NULL) { 10009 /* 300 ms. */ 10010 un->un_pm_idle_timeid = 10011 timeout(sd_pm_idletimeout_handler, un, 10012 (drv_usectohz((clock_t)300000))); 10013 /* 10014 * Include an extra call to busy which keeps the 10015 * device busy with-respect-to the PM layer 10016 * until the timer fires, at which time it'll 10017 * get the extra idle call. 10018 */ 10019 (void) pm_busy_component(SD_DEVINFO(un), 0); 10020 } 10021 } 10022 } 10023 un->un_pm_busy = FALSE; 10024 /* Next... */ 10025 cv_signal(&un->un_pm_busy_cv); 10026 10027 un->un_pm_count++; 10028 10029 SD_TRACE(SD_LOG_IO_PM, un, 10030 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 10031 10032 mutex_exit(&un->un_pm_mutex); 10033 10034 return (return_status); 10035 } 10036 10037 10038 /* 10039 * Function: sd_pm_exit 10040 * 10041 * Description: Called at the completion of a command to manage busy 10042 * status for the device. If the device becomes idle the 10043 * PM framework is notified. 10044 * 10045 * Context: Kernel thread context 10046 */ 10047 10048 static void 10049 sd_pm_exit(struct sd_lun *un) 10050 { 10051 ASSERT(!mutex_owned(SD_MUTEX(un))); 10052 ASSERT(!mutex_owned(&un->un_pm_mutex)); 10053 10054 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 10055 10056 /* 10057 * After attach the following flag is only read, so don't 10058 * take the penalty of acquiring a mutex for it. 10059 */ 10060 if (un->un_f_pm_is_enabled == TRUE) { 10061 10062 mutex_enter(&un->un_pm_mutex); 10063 un->un_pm_count--; 10064 10065 SD_TRACE(SD_LOG_IO_PM, un, 10066 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 10067 10068 ASSERT(un->un_pm_count >= 0); 10069 if (un->un_pm_count == 0) { 10070 mutex_exit(&un->un_pm_mutex); 10071 10072 SD_TRACE(SD_LOG_IO_PM, un, 10073 "sd_pm_exit: idle component\n"); 10074 10075 (void) pm_idle_component(SD_DEVINFO(un), 0); 10076 10077 } else { 10078 mutex_exit(&un->un_pm_mutex); 10079 } 10080 } 10081 10082 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 10083 } 10084 10085 10086 /* 10087 * Function: sdopen 10088 * 10089 * Description: Driver's open(9e) entry point function. 10090 * 10091 * Arguments: dev_i - pointer to device number 10092 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 10093 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10094 * cred_p - user credential pointer 10095 * 10096 * Return Code: EINVAL 10097 * ENXIO 10098 * EIO 10099 * EROFS 10100 * EBUSY 10101 * 10102 * Context: Kernel thread context 10103 */ 10104 /* ARGSUSED */ 10105 static int 10106 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 10107 { 10108 struct sd_lun *un; 10109 int nodelay; 10110 int part; 10111 uint64_t partmask; 10112 int instance; 10113 dev_t dev; 10114 int rval = EIO; 10115 diskaddr_t nblks = 0; 10116 diskaddr_t label_cap; 10117 10118 /* Validate the open type */ 10119 if (otyp >= OTYPCNT) { 10120 return (EINVAL); 10121 } 10122 10123 dev = *dev_p; 10124 instance = SDUNIT(dev); 10125 mutex_enter(&sd_detach_mutex); 10126 10127 /* 10128 * Fail the open if there is no softstate for the instance, or 10129 * if another thread somewhere is trying to detach the instance. 10130 */ 10131 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10132 (un->un_detach_count != 0)) { 10133 mutex_exit(&sd_detach_mutex); 10134 /* 10135 * The probe cache only needs to be cleared when open (9e) fails 10136 * with ENXIO (4238046). 10137 */ 10138 /* 10139 * un-conditionally clearing probe cache is ok with 10140 * separate sd/ssd binaries 10141 * x86 platform can be an issue with both parallel 10142 * and fibre in 1 binary 10143 */ 10144 sd_scsi_clear_probe_cache(); 10145 return (ENXIO); 10146 } 10147 10148 /* 10149 * The un_layer_count is to prevent another thread in specfs from 10150 * trying to detach the instance, which can happen when we are 10151 * called from a higher-layer driver instead of thru specfs. 10152 * This will not be needed when DDI provides a layered driver 10153 * interface that allows specfs to know that an instance is in 10154 * use by a layered driver & should not be detached. 10155 * 10156 * Note: the semantics for layered driver opens are exactly one 10157 * close for every open. 10158 */ 10159 if (otyp == OTYP_LYR) { 10160 un->un_layer_count++; 10161 } 10162 10163 /* 10164 * Keep a count of the current # of opens in progress. This is because 10165 * some layered drivers try to call us as a regular open. This can 10166 * cause problems that we cannot prevent, however by keeping this count 10167 * we can at least keep our open and detach routines from racing against 10168 * each other under such conditions. 10169 */ 10170 un->un_opens_in_progress++; 10171 mutex_exit(&sd_detach_mutex); 10172 10173 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10174 part = SDPART(dev); 10175 partmask = 1 << part; 10176 10177 /* 10178 * We use a semaphore here in order to serialize 10179 * open and close requests on the device. 10180 */ 10181 sema_p(&un->un_semoclose); 10182 10183 mutex_enter(SD_MUTEX(un)); 10184 10185 /* 10186 * All device accesses go thru sdstrategy() where we check 10187 * on suspend status but there could be a scsi_poll command, 10188 * which bypasses sdstrategy(), so we need to check pm 10189 * status. 10190 */ 10191 10192 if (!nodelay) { 10193 while ((un->un_state == SD_STATE_SUSPENDED) || 10194 (un->un_state == SD_STATE_PM_CHANGING)) { 10195 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10196 } 10197 10198 mutex_exit(SD_MUTEX(un)); 10199 if (sd_pm_entry(un) != DDI_SUCCESS) { 10200 rval = EIO; 10201 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10202 "sdopen: sd_pm_entry failed\n"); 10203 goto open_failed_with_pm; 10204 } 10205 mutex_enter(SD_MUTEX(un)); 10206 } 10207 10208 /* check for previous exclusive open */ 10209 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10210 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10211 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10212 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10213 10214 if (un->un_exclopen & (partmask)) { 10215 goto excl_open_fail; 10216 } 10217 10218 if (flag & FEXCL) { 10219 int i; 10220 if (un->un_ocmap.lyropen[part]) { 10221 goto excl_open_fail; 10222 } 10223 for (i = 0; i < (OTYPCNT - 1); i++) { 10224 if (un->un_ocmap.regopen[i] & (partmask)) { 10225 goto excl_open_fail; 10226 } 10227 } 10228 } 10229 10230 /* 10231 * Check the write permission if this is a removable media device, 10232 * NDELAY has not been set, and writable permission is requested. 10233 * 10234 * Note: If NDELAY was set and this is write-protected media the WRITE 10235 * attempt will fail with EIO as part of the I/O processing. This is a 10236 * more permissive implementation that allows the open to succeed and 10237 * WRITE attempts to fail when appropriate. 10238 */ 10239 if (un->un_f_chk_wp_open) { 10240 if ((flag & FWRITE) && (!nodelay)) { 10241 mutex_exit(SD_MUTEX(un)); 10242 /* 10243 * Defer the check for write permission on writable 10244 * DVD drive till sdstrategy and will not fail open even 10245 * if FWRITE is set as the device can be writable 10246 * depending upon the media and the media can change 10247 * after the call to open(). 10248 */ 10249 if (un->un_f_dvdram_writable_device == FALSE) { 10250 if (ISCD(un) || sr_check_wp(dev)) { 10251 rval = EROFS; 10252 mutex_enter(SD_MUTEX(un)); 10253 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10254 "write to cd or write protected media\n"); 10255 goto open_fail; 10256 } 10257 } 10258 mutex_enter(SD_MUTEX(un)); 10259 } 10260 } 10261 10262 /* 10263 * If opening in NDELAY/NONBLOCK mode, just return. 10264 * Check if disk is ready and has a valid geometry later. 10265 */ 10266 if (!nodelay) { 10267 sd_ssc_t *ssc; 10268 10269 mutex_exit(SD_MUTEX(un)); 10270 ssc = sd_ssc_init(un); 10271 rval = sd_ready_and_valid(ssc, part); 10272 sd_ssc_fini(ssc); 10273 mutex_enter(SD_MUTEX(un)); 10274 /* 10275 * Fail if device is not ready or if the number of disk 10276 * blocks is zero or negative for non CD devices. 10277 */ 10278 10279 nblks = 0; 10280 10281 if (rval == SD_READY_VALID && (!ISCD(un))) { 10282 /* if cmlb_partinfo fails, nblks remains 0 */ 10283 mutex_exit(SD_MUTEX(un)); 10284 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10285 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10286 mutex_enter(SD_MUTEX(un)); 10287 } 10288 10289 if ((rval != SD_READY_VALID) || 10290 (!ISCD(un) && nblks <= 0)) { 10291 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10292 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10293 "device not ready or invalid disk block value\n"); 10294 goto open_fail; 10295 } 10296 #if defined(__i386) || defined(__amd64) 10297 } else { 10298 uchar_t *cp; 10299 /* 10300 * x86 requires special nodelay handling, so that p0 is 10301 * always defined and accessible. 10302 * Invalidate geometry only if device is not already open. 10303 */ 10304 cp = &un->un_ocmap.chkd[0]; 10305 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10306 if (*cp != (uchar_t)0) { 10307 break; 10308 } 10309 cp++; 10310 } 10311 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10312 mutex_exit(SD_MUTEX(un)); 10313 cmlb_invalidate(un->un_cmlbhandle, 10314 (void *)SD_PATH_DIRECT); 10315 mutex_enter(SD_MUTEX(un)); 10316 } 10317 10318 #endif 10319 } 10320 10321 if (otyp == OTYP_LYR) { 10322 un->un_ocmap.lyropen[part]++; 10323 } else { 10324 un->un_ocmap.regopen[otyp] |= partmask; 10325 } 10326 10327 /* Set up open and exclusive open flags */ 10328 if (flag & FEXCL) { 10329 un->un_exclopen |= (partmask); 10330 } 10331 10332 /* 10333 * If the lun is EFI labeled and lun capacity is greater than the 10334 * capacity contained in the label, log a sys-event to notify the 10335 * interested module. 10336 * To avoid an infinite loop of logging sys-event, we only log the 10337 * event when the lun is not opened in NDELAY mode. The event handler 10338 * should open the lun in NDELAY mode. 10339 */ 10340 if (!nodelay) { 10341 mutex_exit(SD_MUTEX(un)); 10342 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10343 (void*)SD_PATH_DIRECT) == 0) { 10344 mutex_enter(SD_MUTEX(un)); 10345 if (un->un_f_blockcount_is_valid && 10346 un->un_blockcount > label_cap && 10347 un->un_f_expnevent == B_FALSE) { 10348 un->un_f_expnevent = B_TRUE; 10349 mutex_exit(SD_MUTEX(un)); 10350 sd_log_lun_expansion_event(un, 10351 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10352 mutex_enter(SD_MUTEX(un)); 10353 } 10354 } else { 10355 mutex_enter(SD_MUTEX(un)); 10356 } 10357 } 10358 10359 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10360 "open of part %d type %d\n", part, otyp); 10361 10362 mutex_exit(SD_MUTEX(un)); 10363 if (!nodelay) { 10364 sd_pm_exit(un); 10365 } 10366 10367 sema_v(&un->un_semoclose); 10368 10369 mutex_enter(&sd_detach_mutex); 10370 un->un_opens_in_progress--; 10371 mutex_exit(&sd_detach_mutex); 10372 10373 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10374 return (DDI_SUCCESS); 10375 10376 excl_open_fail: 10377 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10378 rval = EBUSY; 10379 10380 open_fail: 10381 mutex_exit(SD_MUTEX(un)); 10382 10383 /* 10384 * On a failed open we must exit the pm management. 10385 */ 10386 if (!nodelay) { 10387 sd_pm_exit(un); 10388 } 10389 open_failed_with_pm: 10390 sema_v(&un->un_semoclose); 10391 10392 mutex_enter(&sd_detach_mutex); 10393 un->un_opens_in_progress--; 10394 if (otyp == OTYP_LYR) { 10395 un->un_layer_count--; 10396 } 10397 mutex_exit(&sd_detach_mutex); 10398 10399 return (rval); 10400 } 10401 10402 10403 /* 10404 * Function: sdclose 10405 * 10406 * Description: Driver's close(9e) entry point function. 10407 * 10408 * Arguments: dev - device number 10409 * flag - file status flag, informational only 10410 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10411 * cred_p - user credential pointer 10412 * 10413 * Return Code: ENXIO 10414 * 10415 * Context: Kernel thread context 10416 */ 10417 /* ARGSUSED */ 10418 static int 10419 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10420 { 10421 struct sd_lun *un; 10422 uchar_t *cp; 10423 int part; 10424 int nodelay; 10425 int rval = 0; 10426 10427 /* Validate the open type */ 10428 if (otyp >= OTYPCNT) { 10429 return (ENXIO); 10430 } 10431 10432 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10433 return (ENXIO); 10434 } 10435 10436 part = SDPART(dev); 10437 nodelay = flag & (FNDELAY | FNONBLOCK); 10438 10439 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10440 "sdclose: close of part %d type %d\n", part, otyp); 10441 10442 /* 10443 * We use a semaphore here in order to serialize 10444 * open and close requests on the device. 10445 */ 10446 sema_p(&un->un_semoclose); 10447 10448 mutex_enter(SD_MUTEX(un)); 10449 10450 /* Don't proceed if power is being changed. */ 10451 while (un->un_state == SD_STATE_PM_CHANGING) { 10452 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10453 } 10454 10455 if (un->un_exclopen & (1 << part)) { 10456 un->un_exclopen &= ~(1 << part); 10457 } 10458 10459 /* Update the open partition map */ 10460 if (otyp == OTYP_LYR) { 10461 un->un_ocmap.lyropen[part] -= 1; 10462 } else { 10463 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10464 } 10465 10466 cp = &un->un_ocmap.chkd[0]; 10467 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10468 if (*cp != NULL) { 10469 break; 10470 } 10471 cp++; 10472 } 10473 10474 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10475 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10476 10477 /* 10478 * We avoid persistance upon the last close, and set 10479 * the throttle back to the maximum. 10480 */ 10481 un->un_throttle = un->un_saved_throttle; 10482 10483 if (un->un_state == SD_STATE_OFFLINE) { 10484 if (un->un_f_is_fibre == FALSE) { 10485 scsi_log(SD_DEVINFO(un), sd_label, 10486 CE_WARN, "offline\n"); 10487 } 10488 mutex_exit(SD_MUTEX(un)); 10489 cmlb_invalidate(un->un_cmlbhandle, 10490 (void *)SD_PATH_DIRECT); 10491 mutex_enter(SD_MUTEX(un)); 10492 10493 } else { 10494 /* 10495 * Flush any outstanding writes in NVRAM cache. 10496 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10497 * cmd, it may not work for non-Pluto devices. 10498 * SYNCHRONIZE CACHE is not required for removables, 10499 * except DVD-RAM drives. 10500 * 10501 * Also note: because SYNCHRONIZE CACHE is currently 10502 * the only command issued here that requires the 10503 * drive be powered up, only do the power up before 10504 * sending the Sync Cache command. If additional 10505 * commands are added which require a powered up 10506 * drive, the following sequence may have to change. 10507 * 10508 * And finally, note that parallel SCSI on SPARC 10509 * only issues a Sync Cache to DVD-RAM, a newly 10510 * supported device. 10511 */ 10512 #if defined(__i386) || defined(__amd64) 10513 if ((un->un_f_sync_cache_supported && 10514 un->un_f_sync_cache_required) || 10515 un->un_f_dvdram_writable_device == TRUE) { 10516 #else 10517 if (un->un_f_dvdram_writable_device == TRUE) { 10518 #endif 10519 mutex_exit(SD_MUTEX(un)); 10520 if (sd_pm_entry(un) == DDI_SUCCESS) { 10521 rval = 10522 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10523 NULL); 10524 /* ignore error if not supported */ 10525 if (rval == ENOTSUP) { 10526 rval = 0; 10527 } else if (rval != 0) { 10528 rval = EIO; 10529 } 10530 sd_pm_exit(un); 10531 } else { 10532 rval = EIO; 10533 } 10534 mutex_enter(SD_MUTEX(un)); 10535 } 10536 10537 /* 10538 * For devices which supports DOOR_LOCK, send an ALLOW 10539 * MEDIA REMOVAL command, but don't get upset if it 10540 * fails. We need to raise the power of the drive before 10541 * we can call sd_send_scsi_DOORLOCK() 10542 */ 10543 if (un->un_f_doorlock_supported) { 10544 mutex_exit(SD_MUTEX(un)); 10545 if (sd_pm_entry(un) == DDI_SUCCESS) { 10546 sd_ssc_t *ssc; 10547 10548 ssc = sd_ssc_init(un); 10549 rval = sd_send_scsi_DOORLOCK(ssc, 10550 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10551 if (rval != 0) 10552 sd_ssc_assessment(ssc, 10553 SD_FMT_IGNORE); 10554 sd_ssc_fini(ssc); 10555 10556 sd_pm_exit(un); 10557 if (ISCD(un) && (rval != 0) && 10558 (nodelay != 0)) { 10559 rval = ENXIO; 10560 } 10561 } else { 10562 rval = EIO; 10563 } 10564 mutex_enter(SD_MUTEX(un)); 10565 } 10566 10567 /* 10568 * If a device has removable media, invalidate all 10569 * parameters related to media, such as geometry, 10570 * blocksize, and blockcount. 10571 */ 10572 if (un->un_f_has_removable_media) { 10573 sr_ejected(un); 10574 } 10575 10576 /* 10577 * Destroy the cache (if it exists) which was 10578 * allocated for the write maps since this is 10579 * the last close for this media. 10580 */ 10581 if (un->un_wm_cache) { 10582 /* 10583 * Check if there are pending commands. 10584 * and if there are give a warning and 10585 * do not destroy the cache. 10586 */ 10587 if (un->un_ncmds_in_driver > 0) { 10588 scsi_log(SD_DEVINFO(un), 10589 sd_label, CE_WARN, 10590 "Unable to clean up memory " 10591 "because of pending I/O\n"); 10592 } else { 10593 kmem_cache_destroy( 10594 un->un_wm_cache); 10595 un->un_wm_cache = NULL; 10596 } 10597 } 10598 } 10599 } 10600 10601 mutex_exit(SD_MUTEX(un)); 10602 sema_v(&un->un_semoclose); 10603 10604 if (otyp == OTYP_LYR) { 10605 mutex_enter(&sd_detach_mutex); 10606 /* 10607 * The detach routine may run when the layer count 10608 * drops to zero. 10609 */ 10610 un->un_layer_count--; 10611 mutex_exit(&sd_detach_mutex); 10612 } 10613 10614 return (rval); 10615 } 10616 10617 10618 /* 10619 * Function: sd_ready_and_valid 10620 * 10621 * Description: Test if device is ready and has a valid geometry. 10622 * 10623 * Arguments: ssc - sd_ssc_t will contain un 10624 * un - driver soft state (unit) structure 10625 * 10626 * Return Code: SD_READY_VALID ready and valid label 10627 * SD_NOT_READY_VALID not ready, no label 10628 * SD_RESERVED_BY_OTHERS reservation conflict 10629 * 10630 * Context: Never called at interrupt context. 10631 */ 10632 10633 static int 10634 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10635 { 10636 struct sd_errstats *stp; 10637 uint64_t capacity; 10638 uint_t lbasize; 10639 int rval = SD_READY_VALID; 10640 char name_str[48]; 10641 boolean_t is_valid; 10642 struct sd_lun *un; 10643 int status; 10644 10645 ASSERT(ssc != NULL); 10646 un = ssc->ssc_un; 10647 ASSERT(un != NULL); 10648 ASSERT(!mutex_owned(SD_MUTEX(un))); 10649 10650 mutex_enter(SD_MUTEX(un)); 10651 /* 10652 * If a device has removable media, we must check if media is 10653 * ready when checking if this device is ready and valid. 10654 */ 10655 if (un->un_f_has_removable_media) { 10656 mutex_exit(SD_MUTEX(un)); 10657 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10658 10659 if (status != 0) { 10660 rval = SD_NOT_READY_VALID; 10661 mutex_enter(SD_MUTEX(un)); 10662 10663 /* Ignore all failed status for removalbe media */ 10664 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10665 10666 goto done; 10667 } 10668 10669 is_valid = SD_IS_VALID_LABEL(un); 10670 mutex_enter(SD_MUTEX(un)); 10671 if (!is_valid || 10672 (un->un_f_blockcount_is_valid == FALSE) || 10673 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10674 10675 /* capacity has to be read every open. */ 10676 mutex_exit(SD_MUTEX(un)); 10677 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10678 &lbasize, SD_PATH_DIRECT); 10679 10680 if (status != 0) { 10681 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10682 10683 cmlb_invalidate(un->un_cmlbhandle, 10684 (void *)SD_PATH_DIRECT); 10685 mutex_enter(SD_MUTEX(un)); 10686 rval = SD_NOT_READY_VALID; 10687 10688 goto done; 10689 } else { 10690 mutex_enter(SD_MUTEX(un)); 10691 sd_update_block_info(un, lbasize, capacity); 10692 } 10693 } 10694 10695 /* 10696 * Check if the media in the device is writable or not. 10697 */ 10698 if (!is_valid && ISCD(un)) { 10699 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10700 } 10701 10702 } else { 10703 /* 10704 * Do a test unit ready to clear any unit attention from non-cd 10705 * devices. 10706 */ 10707 mutex_exit(SD_MUTEX(un)); 10708 10709 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10710 if (status != 0) { 10711 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10712 } 10713 10714 mutex_enter(SD_MUTEX(un)); 10715 } 10716 10717 10718 /* 10719 * If this is a non 512 block device, allocate space for 10720 * the wmap cache. This is being done here since every time 10721 * a media is changed this routine will be called and the 10722 * block size is a function of media rather than device. 10723 */ 10724 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10725 un->un_f_non_devbsize_supported) && 10726 un->un_tgt_blocksize != DEV_BSIZE) || 10727 un->un_f_enable_rmw) { 10728 if (!(un->un_wm_cache)) { 10729 (void) snprintf(name_str, sizeof (name_str), 10730 "%s%d_cache", 10731 ddi_driver_name(SD_DEVINFO(un)), 10732 ddi_get_instance(SD_DEVINFO(un))); 10733 un->un_wm_cache = kmem_cache_create( 10734 name_str, sizeof (struct sd_w_map), 10735 8, sd_wm_cache_constructor, 10736 sd_wm_cache_destructor, NULL, 10737 (void *)un, NULL, 0); 10738 if (!(un->un_wm_cache)) { 10739 rval = ENOMEM; 10740 goto done; 10741 } 10742 } 10743 } 10744 10745 if (un->un_state == SD_STATE_NORMAL) { 10746 /* 10747 * If the target is not yet ready here (defined by a TUR 10748 * failure), invalidate the geometry and print an 'offline' 10749 * message. This is a legacy message, as the state of the 10750 * target is not actually changed to SD_STATE_OFFLINE. 10751 * 10752 * If the TUR fails for EACCES (Reservation Conflict), 10753 * SD_RESERVED_BY_OTHERS will be returned to indicate 10754 * reservation conflict. If the TUR fails for other 10755 * reasons, SD_NOT_READY_VALID will be returned. 10756 */ 10757 int err; 10758 10759 mutex_exit(SD_MUTEX(un)); 10760 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10761 mutex_enter(SD_MUTEX(un)); 10762 10763 if (err != 0) { 10764 mutex_exit(SD_MUTEX(un)); 10765 cmlb_invalidate(un->un_cmlbhandle, 10766 (void *)SD_PATH_DIRECT); 10767 mutex_enter(SD_MUTEX(un)); 10768 if (err == EACCES) { 10769 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10770 "reservation conflict\n"); 10771 rval = SD_RESERVED_BY_OTHERS; 10772 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10773 } else { 10774 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10775 "drive offline\n"); 10776 rval = SD_NOT_READY_VALID; 10777 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10778 } 10779 goto done; 10780 } 10781 } 10782 10783 if (un->un_f_format_in_progress == FALSE) { 10784 mutex_exit(SD_MUTEX(un)); 10785 10786 (void) cmlb_validate(un->un_cmlbhandle, 0, 10787 (void *)SD_PATH_DIRECT); 10788 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10789 NULL, (void *) SD_PATH_DIRECT) != 0) { 10790 rval = SD_NOT_READY_VALID; 10791 mutex_enter(SD_MUTEX(un)); 10792 10793 goto done; 10794 } 10795 if (un->un_f_pkstats_enabled) { 10796 sd_set_pstats(un); 10797 SD_TRACE(SD_LOG_IO_PARTITION, un, 10798 "sd_ready_and_valid: un:0x%p pstats created and " 10799 "set\n", un); 10800 } 10801 mutex_enter(SD_MUTEX(un)); 10802 } 10803 10804 /* 10805 * If this device supports DOOR_LOCK command, try and send 10806 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10807 * if it fails. For a CD, however, it is an error 10808 */ 10809 if (un->un_f_doorlock_supported) { 10810 mutex_exit(SD_MUTEX(un)); 10811 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10812 SD_PATH_DIRECT); 10813 10814 if ((status != 0) && ISCD(un)) { 10815 rval = SD_NOT_READY_VALID; 10816 mutex_enter(SD_MUTEX(un)); 10817 10818 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10819 10820 goto done; 10821 } else if (status != 0) 10822 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10823 mutex_enter(SD_MUTEX(un)); 10824 } 10825 10826 /* The state has changed, inform the media watch routines */ 10827 un->un_mediastate = DKIO_INSERTED; 10828 cv_broadcast(&un->un_state_cv); 10829 rval = SD_READY_VALID; 10830 10831 done: 10832 10833 /* 10834 * Initialize the capacity kstat value, if no media previously 10835 * (capacity kstat is 0) and a media has been inserted 10836 * (un_blockcount > 0). 10837 */ 10838 if (un->un_errstats != NULL) { 10839 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10840 if ((stp->sd_capacity.value.ui64 == 0) && 10841 (un->un_f_blockcount_is_valid == TRUE)) { 10842 stp->sd_capacity.value.ui64 = 10843 (uint64_t)((uint64_t)un->un_blockcount * 10844 un->un_sys_blocksize); 10845 } 10846 } 10847 10848 mutex_exit(SD_MUTEX(un)); 10849 return (rval); 10850 } 10851 10852 10853 /* 10854 * Function: sdmin 10855 * 10856 * Description: Routine to limit the size of a data transfer. Used in 10857 * conjunction with physio(9F). 10858 * 10859 * Arguments: bp - pointer to the indicated buf(9S) struct. 10860 * 10861 * Context: Kernel thread context. 10862 */ 10863 10864 static void 10865 sdmin(struct buf *bp) 10866 { 10867 struct sd_lun *un; 10868 int instance; 10869 10870 instance = SDUNIT(bp->b_edev); 10871 10872 un = ddi_get_soft_state(sd_state, instance); 10873 ASSERT(un != NULL); 10874 10875 /* 10876 * We depend on buf breakup to restrict 10877 * IO size if it is enabled. 10878 */ 10879 if (un->un_buf_breakup_supported) { 10880 return; 10881 } 10882 10883 if (bp->b_bcount > un->un_max_xfer_size) { 10884 bp->b_bcount = un->un_max_xfer_size; 10885 } 10886 } 10887 10888 10889 /* 10890 * Function: sdread 10891 * 10892 * Description: Driver's read(9e) entry point function. 10893 * 10894 * Arguments: dev - device number 10895 * uio - structure pointer describing where data is to be stored 10896 * in user's space 10897 * cred_p - user credential pointer 10898 * 10899 * Return Code: ENXIO 10900 * EIO 10901 * EINVAL 10902 * value returned by physio 10903 * 10904 * Context: Kernel thread context. 10905 */ 10906 /* ARGSUSED */ 10907 static int 10908 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10909 { 10910 struct sd_lun *un = NULL; 10911 int secmask; 10912 int err = 0; 10913 sd_ssc_t *ssc; 10914 10915 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10916 return (ENXIO); 10917 } 10918 10919 ASSERT(!mutex_owned(SD_MUTEX(un))); 10920 10921 10922 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10923 mutex_enter(SD_MUTEX(un)); 10924 /* 10925 * Because the call to sd_ready_and_valid will issue I/O we 10926 * must wait here if either the device is suspended or 10927 * if it's power level is changing. 10928 */ 10929 while ((un->un_state == SD_STATE_SUSPENDED) || 10930 (un->un_state == SD_STATE_PM_CHANGING)) { 10931 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10932 } 10933 un->un_ncmds_in_driver++; 10934 mutex_exit(SD_MUTEX(un)); 10935 10936 /* Initialize sd_ssc_t for internal uscsi commands */ 10937 ssc = sd_ssc_init(un); 10938 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10939 err = EIO; 10940 } else { 10941 err = 0; 10942 } 10943 sd_ssc_fini(ssc); 10944 10945 mutex_enter(SD_MUTEX(un)); 10946 un->un_ncmds_in_driver--; 10947 ASSERT(un->un_ncmds_in_driver >= 0); 10948 mutex_exit(SD_MUTEX(un)); 10949 if (err != 0) 10950 return (err); 10951 } 10952 10953 /* 10954 * Read requests are restricted to multiples of the system block size. 10955 */ 10956 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 10957 !un->un_f_enable_rmw) 10958 secmask = un->un_tgt_blocksize - 1; 10959 else 10960 secmask = DEV_BSIZE - 1; 10961 10962 if (uio->uio_loffset & ((offset_t)(secmask))) { 10963 SD_ERROR(SD_LOG_READ_WRITE, un, 10964 "sdread: file offset not modulo %d\n", 10965 secmask + 1); 10966 err = EINVAL; 10967 } else if (uio->uio_iov->iov_len & (secmask)) { 10968 SD_ERROR(SD_LOG_READ_WRITE, un, 10969 "sdread: transfer length not modulo %d\n", 10970 secmask + 1); 10971 err = EINVAL; 10972 } else { 10973 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10974 } 10975 10976 return (err); 10977 } 10978 10979 10980 /* 10981 * Function: sdwrite 10982 * 10983 * Description: Driver's write(9e) entry point function. 10984 * 10985 * Arguments: dev - device number 10986 * uio - structure pointer describing where data is stored in 10987 * user's space 10988 * cred_p - user credential pointer 10989 * 10990 * Return Code: ENXIO 10991 * EIO 10992 * EINVAL 10993 * value returned by physio 10994 * 10995 * Context: Kernel thread context. 10996 */ 10997 /* ARGSUSED */ 10998 static int 10999 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 11000 { 11001 struct sd_lun *un = NULL; 11002 int secmask; 11003 int err = 0; 11004 sd_ssc_t *ssc; 11005 11006 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11007 return (ENXIO); 11008 } 11009 11010 ASSERT(!mutex_owned(SD_MUTEX(un))); 11011 11012 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11013 mutex_enter(SD_MUTEX(un)); 11014 /* 11015 * Because the call to sd_ready_and_valid will issue I/O we 11016 * must wait here if either the device is suspended or 11017 * if it's power level is changing. 11018 */ 11019 while ((un->un_state == SD_STATE_SUSPENDED) || 11020 (un->un_state == SD_STATE_PM_CHANGING)) { 11021 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11022 } 11023 un->un_ncmds_in_driver++; 11024 mutex_exit(SD_MUTEX(un)); 11025 11026 /* Initialize sd_ssc_t for internal uscsi commands */ 11027 ssc = sd_ssc_init(un); 11028 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11029 err = EIO; 11030 } else { 11031 err = 0; 11032 } 11033 sd_ssc_fini(ssc); 11034 11035 mutex_enter(SD_MUTEX(un)); 11036 un->un_ncmds_in_driver--; 11037 ASSERT(un->un_ncmds_in_driver >= 0); 11038 mutex_exit(SD_MUTEX(un)); 11039 if (err != 0) 11040 return (err); 11041 } 11042 11043 /* 11044 * Write requests are restricted to multiples of the system block size. 11045 */ 11046 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11047 !un->un_f_enable_rmw) 11048 secmask = un->un_tgt_blocksize - 1; 11049 else 11050 secmask = DEV_BSIZE - 1; 11051 11052 if (uio->uio_loffset & ((offset_t)(secmask))) { 11053 SD_ERROR(SD_LOG_READ_WRITE, un, 11054 "sdwrite: file offset not modulo %d\n", 11055 secmask + 1); 11056 err = EINVAL; 11057 } else if (uio->uio_iov->iov_len & (secmask)) { 11058 SD_ERROR(SD_LOG_READ_WRITE, un, 11059 "sdwrite: transfer length not modulo %d\n", 11060 secmask + 1); 11061 err = EINVAL; 11062 } else { 11063 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 11064 } 11065 11066 return (err); 11067 } 11068 11069 11070 /* 11071 * Function: sdaread 11072 * 11073 * Description: Driver's aread(9e) entry point function. 11074 * 11075 * Arguments: dev - device number 11076 * aio - structure pointer describing where data is to be stored 11077 * cred_p - user credential pointer 11078 * 11079 * Return Code: ENXIO 11080 * EIO 11081 * EINVAL 11082 * value returned by aphysio 11083 * 11084 * Context: Kernel thread context. 11085 */ 11086 /* ARGSUSED */ 11087 static int 11088 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11089 { 11090 struct sd_lun *un = NULL; 11091 struct uio *uio = aio->aio_uio; 11092 int secmask; 11093 int err = 0; 11094 sd_ssc_t *ssc; 11095 11096 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11097 return (ENXIO); 11098 } 11099 11100 ASSERT(!mutex_owned(SD_MUTEX(un))); 11101 11102 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11103 mutex_enter(SD_MUTEX(un)); 11104 /* 11105 * Because the call to sd_ready_and_valid will issue I/O we 11106 * must wait here if either the device is suspended or 11107 * if it's power level is changing. 11108 */ 11109 while ((un->un_state == SD_STATE_SUSPENDED) || 11110 (un->un_state == SD_STATE_PM_CHANGING)) { 11111 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11112 } 11113 un->un_ncmds_in_driver++; 11114 mutex_exit(SD_MUTEX(un)); 11115 11116 /* Initialize sd_ssc_t for internal uscsi commands */ 11117 ssc = sd_ssc_init(un); 11118 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11119 err = EIO; 11120 } else { 11121 err = 0; 11122 } 11123 sd_ssc_fini(ssc); 11124 11125 mutex_enter(SD_MUTEX(un)); 11126 un->un_ncmds_in_driver--; 11127 ASSERT(un->un_ncmds_in_driver >= 0); 11128 mutex_exit(SD_MUTEX(un)); 11129 if (err != 0) 11130 return (err); 11131 } 11132 11133 /* 11134 * Read requests are restricted to multiples of the system block size. 11135 */ 11136 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11137 !un->un_f_enable_rmw) 11138 secmask = un->un_tgt_blocksize - 1; 11139 else 11140 secmask = DEV_BSIZE - 1; 11141 11142 if (uio->uio_loffset & ((offset_t)(secmask))) { 11143 SD_ERROR(SD_LOG_READ_WRITE, un, 11144 "sdaread: file offset not modulo %d\n", 11145 secmask + 1); 11146 err = EINVAL; 11147 } else if (uio->uio_iov->iov_len & (secmask)) { 11148 SD_ERROR(SD_LOG_READ_WRITE, un, 11149 "sdaread: transfer length not modulo %d\n", 11150 secmask + 1); 11151 err = EINVAL; 11152 } else { 11153 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11154 } 11155 11156 return (err); 11157 } 11158 11159 11160 /* 11161 * Function: sdawrite 11162 * 11163 * Description: Driver's awrite(9e) entry point function. 11164 * 11165 * Arguments: dev - device number 11166 * aio - structure pointer describing where data is stored 11167 * cred_p - user credential pointer 11168 * 11169 * Return Code: ENXIO 11170 * EIO 11171 * EINVAL 11172 * value returned by aphysio 11173 * 11174 * Context: Kernel thread context. 11175 */ 11176 /* ARGSUSED */ 11177 static int 11178 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11179 { 11180 struct sd_lun *un = NULL; 11181 struct uio *uio = aio->aio_uio; 11182 int secmask; 11183 int err = 0; 11184 sd_ssc_t *ssc; 11185 11186 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11187 return (ENXIO); 11188 } 11189 11190 ASSERT(!mutex_owned(SD_MUTEX(un))); 11191 11192 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11193 mutex_enter(SD_MUTEX(un)); 11194 /* 11195 * Because the call to sd_ready_and_valid will issue I/O we 11196 * must wait here if either the device is suspended or 11197 * if it's power level is changing. 11198 */ 11199 while ((un->un_state == SD_STATE_SUSPENDED) || 11200 (un->un_state == SD_STATE_PM_CHANGING)) { 11201 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11202 } 11203 un->un_ncmds_in_driver++; 11204 mutex_exit(SD_MUTEX(un)); 11205 11206 /* Initialize sd_ssc_t for internal uscsi commands */ 11207 ssc = sd_ssc_init(un); 11208 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11209 err = EIO; 11210 } else { 11211 err = 0; 11212 } 11213 sd_ssc_fini(ssc); 11214 11215 mutex_enter(SD_MUTEX(un)); 11216 un->un_ncmds_in_driver--; 11217 ASSERT(un->un_ncmds_in_driver >= 0); 11218 mutex_exit(SD_MUTEX(un)); 11219 if (err != 0) 11220 return (err); 11221 } 11222 11223 /* 11224 * Write requests are restricted to multiples of the system block size. 11225 */ 11226 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11227 !un->un_f_enable_rmw) 11228 secmask = un->un_tgt_blocksize - 1; 11229 else 11230 secmask = DEV_BSIZE - 1; 11231 11232 if (uio->uio_loffset & ((offset_t)(secmask))) { 11233 SD_ERROR(SD_LOG_READ_WRITE, un, 11234 "sdawrite: file offset not modulo %d\n", 11235 secmask + 1); 11236 err = EINVAL; 11237 } else if (uio->uio_iov->iov_len & (secmask)) { 11238 SD_ERROR(SD_LOG_READ_WRITE, un, 11239 "sdawrite: transfer length not modulo %d\n", 11240 secmask + 1); 11241 err = EINVAL; 11242 } else { 11243 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11244 } 11245 11246 return (err); 11247 } 11248 11249 11250 11251 11252 11253 /* 11254 * Driver IO processing follows the following sequence: 11255 * 11256 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11257 * | | ^ 11258 * v v | 11259 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11260 * | | | | 11261 * v | | | 11262 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11263 * | | ^ ^ 11264 * v v | | 11265 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11266 * | | | | 11267 * +---+ | +------------+ +-------+ 11268 * | | | | 11269 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11270 * | v | | 11271 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11272 * | | ^ | 11273 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11274 * | v | | 11275 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11276 * | | ^ | 11277 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11278 * | v | | 11279 * | sd_checksum_iostart() sd_checksum_iodone() | 11280 * | | ^ | 11281 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11282 * | v | | 11283 * | sd_pm_iostart() sd_pm_iodone() | 11284 * | | ^ | 11285 * | | | | 11286 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11287 * | ^ 11288 * v | 11289 * sd_core_iostart() | 11290 * | | 11291 * | +------>(*destroypkt)() 11292 * +-> sd_start_cmds() <-+ | | 11293 * | | | v 11294 * | | | scsi_destroy_pkt(9F) 11295 * | | | 11296 * +->(*initpkt)() +- sdintr() 11297 * | | | | 11298 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11299 * | +-> scsi_setup_cdb(9F) | 11300 * | | 11301 * +--> scsi_transport(9F) | 11302 * | | 11303 * +----> SCSA ---->+ 11304 * 11305 * 11306 * This code is based upon the following presumptions: 11307 * 11308 * - iostart and iodone functions operate on buf(9S) structures. These 11309 * functions perform the necessary operations on the buf(9S) and pass 11310 * them along to the next function in the chain by using the macros 11311 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11312 * (for iodone side functions). 11313 * 11314 * - The iostart side functions may sleep. The iodone side functions 11315 * are called under interrupt context and may NOT sleep. Therefore 11316 * iodone side functions also may not call iostart side functions. 11317 * (NOTE: iostart side functions should NOT sleep for memory, as 11318 * this could result in deadlock.) 11319 * 11320 * - An iostart side function may call its corresponding iodone side 11321 * function directly (if necessary). 11322 * 11323 * - In the event of an error, an iostart side function can return a buf(9S) 11324 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11325 * b_error in the usual way of course). 11326 * 11327 * - The taskq mechanism may be used by the iodone side functions to dispatch 11328 * requests to the iostart side functions. The iostart side functions in 11329 * this case would be called under the context of a taskq thread, so it's 11330 * OK for them to block/sleep/spin in this case. 11331 * 11332 * - iostart side functions may allocate "shadow" buf(9S) structs and 11333 * pass them along to the next function in the chain. The corresponding 11334 * iodone side functions must coalesce the "shadow" bufs and return 11335 * the "original" buf to the next higher layer. 11336 * 11337 * - The b_private field of the buf(9S) struct holds a pointer to 11338 * an sd_xbuf struct, which contains information needed to 11339 * construct the scsi_pkt for the command. 11340 * 11341 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11342 * layer must acquire & release the SD_MUTEX(un) as needed. 11343 */ 11344 11345 11346 /* 11347 * Create taskq for all targets in the system. This is created at 11348 * _init(9E) and destroyed at _fini(9E). 11349 * 11350 * Note: here we set the minalloc to a reasonably high number to ensure that 11351 * we will have an adequate supply of task entries available at interrupt time. 11352 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11353 * sd_create_taskq(). Since we do not want to sleep for allocations at 11354 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11355 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11356 * requests any one instant in time. 11357 */ 11358 #define SD_TASKQ_NUMTHREADS 8 11359 #define SD_TASKQ_MINALLOC 256 11360 #define SD_TASKQ_MAXALLOC 256 11361 11362 static taskq_t *sd_tq = NULL; 11363 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11364 11365 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11366 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11367 11368 /* 11369 * The following task queue is being created for the write part of 11370 * read-modify-write of non-512 block size devices. 11371 * Limit the number of threads to 1 for now. This number has been chosen 11372 * considering the fact that it applies only to dvd ram drives/MO drives 11373 * currently. Performance for which is not main criteria at this stage. 11374 * Note: It needs to be explored if we can use a single taskq in future 11375 */ 11376 #define SD_WMR_TASKQ_NUMTHREADS 1 11377 static taskq_t *sd_wmr_tq = NULL; 11378 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11379 11380 /* 11381 * Function: sd_taskq_create 11382 * 11383 * Description: Create taskq thread(s) and preallocate task entries 11384 * 11385 * Return Code: Returns a pointer to the allocated taskq_t. 11386 * 11387 * Context: Can sleep. Requires blockable context. 11388 * 11389 * Notes: - The taskq() facility currently is NOT part of the DDI. 11390 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11391 * - taskq_create() will block for memory, also it will panic 11392 * if it cannot create the requested number of threads. 11393 * - Currently taskq_create() creates threads that cannot be 11394 * swapped. 11395 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11396 * supply of taskq entries at interrupt time (ie, so that we 11397 * do not have to sleep for memory) 11398 */ 11399 11400 static void 11401 sd_taskq_create(void) 11402 { 11403 char taskq_name[TASKQ_NAMELEN]; 11404 11405 ASSERT(sd_tq == NULL); 11406 ASSERT(sd_wmr_tq == NULL); 11407 11408 (void) snprintf(taskq_name, sizeof (taskq_name), 11409 "%s_drv_taskq", sd_label); 11410 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11411 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11412 TASKQ_PREPOPULATE)); 11413 11414 (void) snprintf(taskq_name, sizeof (taskq_name), 11415 "%s_rmw_taskq", sd_label); 11416 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11417 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11418 TASKQ_PREPOPULATE)); 11419 } 11420 11421 11422 /* 11423 * Function: sd_taskq_delete 11424 * 11425 * Description: Complementary cleanup routine for sd_taskq_create(). 11426 * 11427 * Context: Kernel thread context. 11428 */ 11429 11430 static void 11431 sd_taskq_delete(void) 11432 { 11433 ASSERT(sd_tq != NULL); 11434 ASSERT(sd_wmr_tq != NULL); 11435 taskq_destroy(sd_tq); 11436 taskq_destroy(sd_wmr_tq); 11437 sd_tq = NULL; 11438 sd_wmr_tq = NULL; 11439 } 11440 11441 11442 /* 11443 * Function: sdstrategy 11444 * 11445 * Description: Driver's strategy (9E) entry point function. 11446 * 11447 * Arguments: bp - pointer to buf(9S) 11448 * 11449 * Return Code: Always returns zero 11450 * 11451 * Context: Kernel thread context. 11452 */ 11453 11454 static int 11455 sdstrategy(struct buf *bp) 11456 { 11457 struct sd_lun *un; 11458 11459 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11460 if (un == NULL) { 11461 bioerror(bp, EIO); 11462 bp->b_resid = bp->b_bcount; 11463 biodone(bp); 11464 return (0); 11465 } 11466 11467 /* As was done in the past, fail new cmds. if state is dumping. */ 11468 if (un->un_state == SD_STATE_DUMPING) { 11469 bioerror(bp, ENXIO); 11470 bp->b_resid = bp->b_bcount; 11471 biodone(bp); 11472 return (0); 11473 } 11474 11475 ASSERT(!mutex_owned(SD_MUTEX(un))); 11476 11477 /* 11478 * Commands may sneak in while we released the mutex in 11479 * DDI_SUSPEND, we should block new commands. However, old 11480 * commands that are still in the driver at this point should 11481 * still be allowed to drain. 11482 */ 11483 mutex_enter(SD_MUTEX(un)); 11484 /* 11485 * Must wait here if either the device is suspended or 11486 * if it's power level is changing. 11487 */ 11488 while ((un->un_state == SD_STATE_SUSPENDED) || 11489 (un->un_state == SD_STATE_PM_CHANGING)) { 11490 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11491 } 11492 11493 un->un_ncmds_in_driver++; 11494 11495 /* 11496 * atapi: Since we are running the CD for now in PIO mode we need to 11497 * call bp_mapin here to avoid bp_mapin called interrupt context under 11498 * the HBA's init_pkt routine. 11499 */ 11500 if (un->un_f_cfg_is_atapi == TRUE) { 11501 mutex_exit(SD_MUTEX(un)); 11502 bp_mapin(bp); 11503 mutex_enter(SD_MUTEX(un)); 11504 } 11505 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11506 un->un_ncmds_in_driver); 11507 11508 if (bp->b_flags & B_WRITE) 11509 un->un_f_sync_cache_required = TRUE; 11510 11511 mutex_exit(SD_MUTEX(un)); 11512 11513 /* 11514 * This will (eventually) allocate the sd_xbuf area and 11515 * call sd_xbuf_strategy(). We just want to return the 11516 * result of ddi_xbuf_qstrategy so that we have an opt- 11517 * imized tail call which saves us a stack frame. 11518 */ 11519 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11520 } 11521 11522 11523 /* 11524 * Function: sd_xbuf_strategy 11525 * 11526 * Description: Function for initiating IO operations via the 11527 * ddi_xbuf_qstrategy() mechanism. 11528 * 11529 * Context: Kernel thread context. 11530 */ 11531 11532 static void 11533 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11534 { 11535 struct sd_lun *un = arg; 11536 11537 ASSERT(bp != NULL); 11538 ASSERT(xp != NULL); 11539 ASSERT(un != NULL); 11540 ASSERT(!mutex_owned(SD_MUTEX(un))); 11541 11542 /* 11543 * Initialize the fields in the xbuf and save a pointer to the 11544 * xbuf in bp->b_private. 11545 */ 11546 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11547 11548 /* Send the buf down the iostart chain */ 11549 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11550 } 11551 11552 11553 /* 11554 * Function: sd_xbuf_init 11555 * 11556 * Description: Prepare the given sd_xbuf struct for use. 11557 * 11558 * Arguments: un - ptr to softstate 11559 * bp - ptr to associated buf(9S) 11560 * xp - ptr to associated sd_xbuf 11561 * chain_type - IO chain type to use: 11562 * SD_CHAIN_NULL 11563 * SD_CHAIN_BUFIO 11564 * SD_CHAIN_USCSI 11565 * SD_CHAIN_DIRECT 11566 * SD_CHAIN_DIRECT_PRIORITY 11567 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11568 * initialization; may be NULL if none. 11569 * 11570 * Context: Kernel thread context 11571 */ 11572 11573 static void 11574 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11575 uchar_t chain_type, void *pktinfop) 11576 { 11577 int index; 11578 11579 ASSERT(un != NULL); 11580 ASSERT(bp != NULL); 11581 ASSERT(xp != NULL); 11582 11583 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11584 bp, chain_type); 11585 11586 xp->xb_un = un; 11587 xp->xb_pktp = NULL; 11588 xp->xb_pktinfo = pktinfop; 11589 xp->xb_private = bp->b_private; 11590 xp->xb_blkno = (daddr_t)bp->b_blkno; 11591 11592 /* 11593 * Set up the iostart and iodone chain indexes in the xbuf, based 11594 * upon the specified chain type to use. 11595 */ 11596 switch (chain_type) { 11597 case SD_CHAIN_NULL: 11598 /* 11599 * Fall thru to just use the values for the buf type, even 11600 * tho for the NULL chain these values will never be used. 11601 */ 11602 /* FALLTHRU */ 11603 case SD_CHAIN_BUFIO: 11604 index = un->un_buf_chain_type; 11605 if ((!un->un_f_has_removable_media) && 11606 (un->un_tgt_blocksize != 0) && 11607 (un->un_tgt_blocksize != DEV_BSIZE || 11608 un->un_f_enable_rmw)) { 11609 int secmask = 0, blknomask = 0; 11610 if (un->un_f_enable_rmw) { 11611 blknomask = 11612 (un->un_phy_blocksize / DEV_BSIZE) - 1; 11613 secmask = un->un_phy_blocksize - 1; 11614 } else { 11615 blknomask = 11616 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11617 secmask = un->un_tgt_blocksize - 1; 11618 } 11619 11620 if ((bp->b_lblkno & (blknomask)) || 11621 (bp->b_bcount & (secmask))) { 11622 if ((un->un_f_rmw_type != 11623 SD_RMW_TYPE_RETURN_ERROR) || 11624 un->un_f_enable_rmw) { 11625 if (un->un_f_pm_is_enabled == FALSE) 11626 index = 11627 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11628 else 11629 index = 11630 SD_CHAIN_INFO_MSS_DISK; 11631 } 11632 } 11633 } 11634 break; 11635 case SD_CHAIN_USCSI: 11636 index = un->un_uscsi_chain_type; 11637 break; 11638 case SD_CHAIN_DIRECT: 11639 index = un->un_direct_chain_type; 11640 break; 11641 case SD_CHAIN_DIRECT_PRIORITY: 11642 index = un->un_priority_chain_type; 11643 break; 11644 default: 11645 /* We're really broken if we ever get here... */ 11646 panic("sd_xbuf_init: illegal chain type!"); 11647 /*NOTREACHED*/ 11648 } 11649 11650 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11651 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11652 11653 /* 11654 * It might be a bit easier to simply bzero the entire xbuf above, 11655 * but it turns out that since we init a fair number of members anyway, 11656 * we save a fair number cycles by doing explicit assignment of zero. 11657 */ 11658 xp->xb_pkt_flags = 0; 11659 xp->xb_dma_resid = 0; 11660 xp->xb_retry_count = 0; 11661 xp->xb_victim_retry_count = 0; 11662 xp->xb_ua_retry_count = 0; 11663 xp->xb_nr_retry_count = 0; 11664 xp->xb_sense_bp = NULL; 11665 xp->xb_sense_status = 0; 11666 xp->xb_sense_state = 0; 11667 xp->xb_sense_resid = 0; 11668 xp->xb_ena = 0; 11669 11670 bp->b_private = xp; 11671 bp->b_flags &= ~(B_DONE | B_ERROR); 11672 bp->b_resid = 0; 11673 bp->av_forw = NULL; 11674 bp->av_back = NULL; 11675 bioerror(bp, 0); 11676 11677 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11678 } 11679 11680 11681 /* 11682 * Function: sd_uscsi_strategy 11683 * 11684 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11685 * 11686 * Arguments: bp - buf struct ptr 11687 * 11688 * Return Code: Always returns 0 11689 * 11690 * Context: Kernel thread context 11691 */ 11692 11693 static int 11694 sd_uscsi_strategy(struct buf *bp) 11695 { 11696 struct sd_lun *un; 11697 struct sd_uscsi_info *uip; 11698 struct sd_xbuf *xp; 11699 uchar_t chain_type; 11700 uchar_t cmd; 11701 11702 ASSERT(bp != NULL); 11703 11704 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11705 if (un == NULL) { 11706 bioerror(bp, EIO); 11707 bp->b_resid = bp->b_bcount; 11708 biodone(bp); 11709 return (0); 11710 } 11711 11712 ASSERT(!mutex_owned(SD_MUTEX(un))); 11713 11714 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11715 11716 /* 11717 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11718 */ 11719 ASSERT(bp->b_private != NULL); 11720 uip = (struct sd_uscsi_info *)bp->b_private; 11721 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11722 11723 mutex_enter(SD_MUTEX(un)); 11724 /* 11725 * atapi: Since we are running the CD for now in PIO mode we need to 11726 * call bp_mapin here to avoid bp_mapin called interrupt context under 11727 * the HBA's init_pkt routine. 11728 */ 11729 if (un->un_f_cfg_is_atapi == TRUE) { 11730 mutex_exit(SD_MUTEX(un)); 11731 bp_mapin(bp); 11732 mutex_enter(SD_MUTEX(un)); 11733 } 11734 un->un_ncmds_in_driver++; 11735 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11736 un->un_ncmds_in_driver); 11737 11738 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11739 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11740 un->un_f_sync_cache_required = TRUE; 11741 11742 mutex_exit(SD_MUTEX(un)); 11743 11744 switch (uip->ui_flags) { 11745 case SD_PATH_DIRECT: 11746 chain_type = SD_CHAIN_DIRECT; 11747 break; 11748 case SD_PATH_DIRECT_PRIORITY: 11749 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11750 break; 11751 default: 11752 chain_type = SD_CHAIN_USCSI; 11753 break; 11754 } 11755 11756 /* 11757 * We may allocate extra buf for external USCSI commands. If the 11758 * application asks for bigger than 20-byte sense data via USCSI, 11759 * SCSA layer will allocate 252 bytes sense buf for that command. 11760 */ 11761 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11762 SENSE_LENGTH) { 11763 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11764 MAX_SENSE_LENGTH, KM_SLEEP); 11765 } else { 11766 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11767 } 11768 11769 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11770 11771 /* Use the index obtained within xbuf_init */ 11772 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11773 11774 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11775 11776 return (0); 11777 } 11778 11779 /* 11780 * Function: sd_send_scsi_cmd 11781 * 11782 * Description: Runs a USCSI command for user (when called thru sdioctl), 11783 * or for the driver 11784 * 11785 * Arguments: dev - the dev_t for the device 11786 * incmd - ptr to a valid uscsi_cmd struct 11787 * flag - bit flag, indicating open settings, 32/64 bit type 11788 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11789 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11790 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11791 * to use the USCSI "direct" chain and bypass the normal 11792 * command waitq. 11793 * 11794 * Return Code: 0 - successful completion of the given command 11795 * EIO - scsi_uscsi_handle_command() failed 11796 * ENXIO - soft state not found for specified dev 11797 * EINVAL 11798 * EFAULT - copyin/copyout error 11799 * return code of scsi_uscsi_handle_command(): 11800 * EIO 11801 * ENXIO 11802 * EACCES 11803 * 11804 * Context: Waits for command to complete. Can sleep. 11805 */ 11806 11807 static int 11808 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11809 enum uio_seg dataspace, int path_flag) 11810 { 11811 struct sd_lun *un; 11812 sd_ssc_t *ssc; 11813 int rval; 11814 11815 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11816 if (un == NULL) { 11817 return (ENXIO); 11818 } 11819 11820 /* 11821 * Using sd_ssc_send to handle uscsi cmd 11822 */ 11823 ssc = sd_ssc_init(un); 11824 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11825 sd_ssc_fini(ssc); 11826 11827 return (rval); 11828 } 11829 11830 /* 11831 * Function: sd_ssc_init 11832 * 11833 * Description: Uscsi end-user call this function to initialize necessary 11834 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11835 * 11836 * The return value of sd_send_scsi_cmd will be treated as a 11837 * fault in various conditions. Even it is not Zero, some 11838 * callers may ignore the return value. That is to say, we can 11839 * not make an accurate assessment in sdintr, since if a 11840 * command is failed in sdintr it does not mean the caller of 11841 * sd_send_scsi_cmd will treat it as a real failure. 11842 * 11843 * To avoid printing too many error logs for a failed uscsi 11844 * packet that the caller may not treat it as a failure, the 11845 * sd will keep silent for handling all uscsi commands. 11846 * 11847 * During detach->attach and attach-open, for some types of 11848 * problems, the driver should be providing information about 11849 * the problem encountered. Device use USCSI_SILENT, which 11850 * suppresses all driver information. The result is that no 11851 * information about the problem is available. Being 11852 * completely silent during this time is inappropriate. The 11853 * driver needs a more selective filter than USCSI_SILENT, so 11854 * that information related to faults is provided. 11855 * 11856 * To make the accurate accessment, the caller of 11857 * sd_send_scsi_USCSI_CMD should take the ownership and 11858 * get necessary information to print error messages. 11859 * 11860 * If we want to print necessary info of uscsi command, we need to 11861 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11862 * assessment. We use sd_ssc_init to alloc necessary 11863 * structs for sending an uscsi command and we are also 11864 * responsible for free the memory by calling 11865 * sd_ssc_fini. 11866 * 11867 * The calling secquences will look like: 11868 * sd_ssc_init-> 11869 * 11870 * ... 11871 * 11872 * sd_send_scsi_USCSI_CMD-> 11873 * sd_ssc_send-> - - - sdintr 11874 * ... 11875 * 11876 * if we think the return value should be treated as a 11877 * failure, we make the accessment here and print out 11878 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11879 * 11880 * ... 11881 * 11882 * sd_ssc_fini 11883 * 11884 * 11885 * Arguments: un - pointer to driver soft state (unit) structure for this 11886 * target. 11887 * 11888 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11889 * uscsi_cmd and sd_uscsi_info. 11890 * NULL - if can not alloc memory for sd_ssc_t struct 11891 * 11892 * Context: Kernel Thread. 11893 */ 11894 static sd_ssc_t * 11895 sd_ssc_init(struct sd_lun *un) 11896 { 11897 sd_ssc_t *ssc; 11898 struct uscsi_cmd *ucmdp; 11899 struct sd_uscsi_info *uip; 11900 11901 ASSERT(un != NULL); 11902 ASSERT(!mutex_owned(SD_MUTEX(un))); 11903 11904 /* 11905 * Allocate sd_ssc_t structure 11906 */ 11907 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11908 11909 /* 11910 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11911 */ 11912 ucmdp = scsi_uscsi_alloc(); 11913 11914 /* 11915 * Allocate sd_uscsi_info structure 11916 */ 11917 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11918 11919 ssc->ssc_uscsi_cmd = ucmdp; 11920 ssc->ssc_uscsi_info = uip; 11921 ssc->ssc_un = un; 11922 11923 return (ssc); 11924 } 11925 11926 /* 11927 * Function: sd_ssc_fini 11928 * 11929 * Description: To free sd_ssc_t and it's hanging off 11930 * 11931 * Arguments: ssc - struct pointer of sd_ssc_t. 11932 */ 11933 static void 11934 sd_ssc_fini(sd_ssc_t *ssc) 11935 { 11936 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11937 11938 if (ssc->ssc_uscsi_info != NULL) { 11939 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11940 ssc->ssc_uscsi_info = NULL; 11941 } 11942 11943 kmem_free(ssc, sizeof (sd_ssc_t)); 11944 ssc = NULL; 11945 } 11946 11947 /* 11948 * Function: sd_ssc_send 11949 * 11950 * Description: Runs a USCSI command for user when called through sdioctl, 11951 * or for the driver. 11952 * 11953 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11954 * sd_uscsi_info in. 11955 * incmd - ptr to a valid uscsi_cmd struct 11956 * flag - bit flag, indicating open settings, 32/64 bit type 11957 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11958 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11959 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11960 * to use the USCSI "direct" chain and bypass the normal 11961 * command waitq. 11962 * 11963 * Return Code: 0 - successful completion of the given command 11964 * EIO - scsi_uscsi_handle_command() failed 11965 * ENXIO - soft state not found for specified dev 11966 * ECANCELED - command cancelled due to low power 11967 * EINVAL 11968 * EFAULT - copyin/copyout error 11969 * return code of scsi_uscsi_handle_command(): 11970 * EIO 11971 * ENXIO 11972 * EACCES 11973 * 11974 * Context: Kernel Thread; 11975 * Waits for command to complete. Can sleep. 11976 */ 11977 static int 11978 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11979 enum uio_seg dataspace, int path_flag) 11980 { 11981 struct sd_uscsi_info *uip; 11982 struct uscsi_cmd *uscmd; 11983 struct sd_lun *un; 11984 dev_t dev; 11985 11986 int format = 0; 11987 int rval; 11988 11989 ASSERT(ssc != NULL); 11990 un = ssc->ssc_un; 11991 ASSERT(un != NULL); 11992 uscmd = ssc->ssc_uscsi_cmd; 11993 ASSERT(uscmd != NULL); 11994 ASSERT(!mutex_owned(SD_MUTEX(un))); 11995 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11996 /* 11997 * If enter here, it indicates that the previous uscsi 11998 * command has not been processed by sd_ssc_assessment. 11999 * This is violating our rules of FMA telemetry processing. 12000 * We should print out this message and the last undisposed 12001 * uscsi command. 12002 */ 12003 if (uscmd->uscsi_cdb != NULL) { 12004 SD_INFO(SD_LOG_SDTEST, un, 12005 "sd_ssc_send is missing the alternative " 12006 "sd_ssc_assessment when running command 0x%x.\n", 12007 uscmd->uscsi_cdb[0]); 12008 } 12009 /* 12010 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 12011 * the initial status. 12012 */ 12013 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12014 } 12015 12016 /* 12017 * We need to make sure sd_ssc_send will have sd_ssc_assessment 12018 * followed to avoid missing FMA telemetries. 12019 */ 12020 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 12021 12022 /* 12023 * if USCSI_PMFAILFAST is set and un is in low power, fail the 12024 * command immediately. 12025 */ 12026 mutex_enter(SD_MUTEX(un)); 12027 mutex_enter(&un->un_pm_mutex); 12028 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && 12029 SD_DEVICE_IS_IN_LOW_POWER(un)) { 12030 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" 12031 "un:0x%p is in low power\n", un); 12032 mutex_exit(&un->un_pm_mutex); 12033 mutex_exit(SD_MUTEX(un)); 12034 return (ECANCELED); 12035 } 12036 mutex_exit(&un->un_pm_mutex); 12037 mutex_exit(SD_MUTEX(un)); 12038 12039 #ifdef SDDEBUG 12040 switch (dataspace) { 12041 case UIO_USERSPACE: 12042 SD_TRACE(SD_LOG_IO, un, 12043 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 12044 break; 12045 case UIO_SYSSPACE: 12046 SD_TRACE(SD_LOG_IO, un, 12047 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 12048 break; 12049 default: 12050 SD_TRACE(SD_LOG_IO, un, 12051 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 12052 break; 12053 } 12054 #endif 12055 12056 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 12057 SD_ADDRESS(un), &uscmd); 12058 if (rval != 0) { 12059 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 12060 "scsi_uscsi_alloc_and_copyin failed\n", un); 12061 return (rval); 12062 } 12063 12064 if ((uscmd->uscsi_cdb != NULL) && 12065 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 12066 mutex_enter(SD_MUTEX(un)); 12067 un->un_f_format_in_progress = TRUE; 12068 mutex_exit(SD_MUTEX(un)); 12069 format = 1; 12070 } 12071 12072 /* 12073 * Allocate an sd_uscsi_info struct and fill it with the info 12074 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 12075 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 12076 * since we allocate the buf here in this function, we do not 12077 * need to preserve the prior contents of b_private. 12078 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 12079 */ 12080 uip = ssc->ssc_uscsi_info; 12081 uip->ui_flags = path_flag; 12082 uip->ui_cmdp = uscmd; 12083 12084 /* 12085 * Commands sent with priority are intended for error recovery 12086 * situations, and do not have retries performed. 12087 */ 12088 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 12089 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 12090 } 12091 uscmd->uscsi_flags &= ~USCSI_NOINTR; 12092 12093 dev = SD_GET_DEV(un); 12094 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 12095 sd_uscsi_strategy, NULL, uip); 12096 12097 /* 12098 * mark ssc_flags right after handle_cmd to make sure 12099 * the uscsi has been sent 12100 */ 12101 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 12102 12103 #ifdef SDDEBUG 12104 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12105 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 12106 uscmd->uscsi_status, uscmd->uscsi_resid); 12107 if (uscmd->uscsi_bufaddr != NULL) { 12108 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 12109 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 12110 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 12111 if (dataspace == UIO_SYSSPACE) { 12112 SD_DUMP_MEMORY(un, SD_LOG_IO, 12113 "data", (uchar_t *)uscmd->uscsi_bufaddr, 12114 uscmd->uscsi_buflen, SD_LOG_HEX); 12115 } 12116 } 12117 #endif 12118 12119 if (format == 1) { 12120 mutex_enter(SD_MUTEX(un)); 12121 un->un_f_format_in_progress = FALSE; 12122 mutex_exit(SD_MUTEX(un)); 12123 } 12124 12125 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 12126 12127 return (rval); 12128 } 12129 12130 /* 12131 * Function: sd_ssc_print 12132 * 12133 * Description: Print information available to the console. 12134 * 12135 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12136 * sd_uscsi_info in. 12137 * sd_severity - log level. 12138 * Context: Kernel thread or interrupt context. 12139 */ 12140 static void 12141 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 12142 { 12143 struct uscsi_cmd *ucmdp; 12144 struct scsi_device *devp; 12145 dev_info_t *devinfo; 12146 uchar_t *sensep; 12147 int senlen; 12148 union scsi_cdb *cdbp; 12149 uchar_t com; 12150 extern struct scsi_key_strings scsi_cmds[]; 12151 12152 ASSERT(ssc != NULL); 12153 ASSERT(ssc->ssc_un != NULL); 12154 12155 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 12156 return; 12157 ucmdp = ssc->ssc_uscsi_cmd; 12158 devp = SD_SCSI_DEVP(ssc->ssc_un); 12159 devinfo = SD_DEVINFO(ssc->ssc_un); 12160 ASSERT(ucmdp != NULL); 12161 ASSERT(devp != NULL); 12162 ASSERT(devinfo != NULL); 12163 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 12164 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 12165 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 12166 12167 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 12168 if (cdbp == NULL) 12169 return; 12170 /* We don't print log if no sense data available. */ 12171 if (senlen == 0) 12172 sensep = NULL; 12173 com = cdbp->scc_cmd; 12174 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 12175 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 12176 } 12177 12178 /* 12179 * Function: sd_ssc_assessment 12180 * 12181 * Description: We use this function to make an assessment at the point 12182 * where SD driver may encounter a potential error. 12183 * 12184 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12185 * sd_uscsi_info in. 12186 * tp_assess - a hint of strategy for ereport posting. 12187 * Possible values of tp_assess include: 12188 * SD_FMT_IGNORE - we don't post any ereport because we're 12189 * sure that it is ok to ignore the underlying problems. 12190 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 12191 * but it might be not correct to ignore the underlying hardware 12192 * error. 12193 * SD_FMT_STATUS_CHECK - we will post an ereport with the 12194 * payload driver-assessment of value "fail" or 12195 * "fatal"(depending on what information we have here). This 12196 * assessment value is usually set when SD driver think there 12197 * is a potential error occurred(Typically, when return value 12198 * of the SCSI command is EIO). 12199 * SD_FMT_STANDARD - we will post an ereport with the payload 12200 * driver-assessment of value "info". This assessment value is 12201 * set when the SCSI command returned successfully and with 12202 * sense data sent back. 12203 * 12204 * Context: Kernel thread. 12205 */ 12206 static void 12207 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 12208 { 12209 int senlen = 0; 12210 struct uscsi_cmd *ucmdp = NULL; 12211 struct sd_lun *un; 12212 12213 ASSERT(ssc != NULL); 12214 un = ssc->ssc_un; 12215 ASSERT(un != NULL); 12216 ucmdp = ssc->ssc_uscsi_cmd; 12217 ASSERT(ucmdp != NULL); 12218 12219 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12220 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 12221 } else { 12222 /* 12223 * If enter here, it indicates that we have a wrong 12224 * calling sequence of sd_ssc_send and sd_ssc_assessment, 12225 * both of which should be called in a pair in case of 12226 * loss of FMA telemetries. 12227 */ 12228 if (ucmdp->uscsi_cdb != NULL) { 12229 SD_INFO(SD_LOG_SDTEST, un, 12230 "sd_ssc_assessment is missing the " 12231 "alternative sd_ssc_send when running 0x%x, " 12232 "or there are superfluous sd_ssc_assessment for " 12233 "the same sd_ssc_send.\n", 12234 ucmdp->uscsi_cdb[0]); 12235 } 12236 /* 12237 * Set the ssc_flags to the initial value to avoid passing 12238 * down dirty flags to the following sd_ssc_send function. 12239 */ 12240 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12241 return; 12242 } 12243 12244 /* 12245 * Only handle an issued command which is waiting for assessment. 12246 * A command which is not issued will not have 12247 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 12248 */ 12249 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 12250 sd_ssc_print(ssc, SCSI_ERR_INFO); 12251 return; 12252 } else { 12253 /* 12254 * For an issued command, we should clear this flag in 12255 * order to make the sd_ssc_t structure be used off 12256 * multiple uscsi commands. 12257 */ 12258 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12259 } 12260 12261 /* 12262 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12263 * commands here. And we should clear the ssc_flags before return. 12264 */ 12265 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12266 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12267 return; 12268 } 12269 12270 switch (tp_assess) { 12271 case SD_FMT_IGNORE: 12272 case SD_FMT_IGNORE_COMPROMISE: 12273 break; 12274 case SD_FMT_STATUS_CHECK: 12275 /* 12276 * For a failed command(including the succeeded command 12277 * with invalid data sent back). 12278 */ 12279 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12280 break; 12281 case SD_FMT_STANDARD: 12282 /* 12283 * Always for the succeeded commands probably with sense 12284 * data sent back. 12285 * Limitation: 12286 * We can only handle a succeeded command with sense 12287 * data sent back when auto-request-sense is enabled. 12288 */ 12289 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12290 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12291 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12292 (un->un_f_arq_enabled == TRUE) && 12293 senlen > 0 && 12294 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12295 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12296 } 12297 break; 12298 default: 12299 /* 12300 * Should not have other type of assessment. 12301 */ 12302 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12303 "sd_ssc_assessment got wrong " 12304 "sd_type_assessment %d.\n", tp_assess); 12305 break; 12306 } 12307 /* 12308 * Clear up the ssc_flags before return. 12309 */ 12310 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12311 } 12312 12313 /* 12314 * Function: sd_ssc_post 12315 * 12316 * Description: 1. read the driver property to get fm-scsi-log flag. 12317 * 2. print log if fm_log_capable is non-zero. 12318 * 3. call sd_ssc_ereport_post to post ereport if possible. 12319 * 12320 * Context: May be called from kernel thread or interrupt context. 12321 */ 12322 static void 12323 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12324 { 12325 struct sd_lun *un; 12326 int sd_severity; 12327 12328 ASSERT(ssc != NULL); 12329 un = ssc->ssc_un; 12330 ASSERT(un != NULL); 12331 12332 /* 12333 * We may enter here from sd_ssc_assessment(for USCSI command) or 12334 * by directly called from sdintr context. 12335 * We don't handle a non-disk drive(CD-ROM, removable media). 12336 * Clear the ssc_flags before return in case we've set 12337 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12338 * driver. 12339 */ 12340 if (ISCD(un) || un->un_f_has_removable_media) { 12341 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12342 return; 12343 } 12344 12345 switch (sd_assess) { 12346 case SD_FM_DRV_FATAL: 12347 sd_severity = SCSI_ERR_FATAL; 12348 break; 12349 case SD_FM_DRV_RECOVERY: 12350 sd_severity = SCSI_ERR_RECOVERED; 12351 break; 12352 case SD_FM_DRV_RETRY: 12353 sd_severity = SCSI_ERR_RETRYABLE; 12354 break; 12355 case SD_FM_DRV_NOTICE: 12356 sd_severity = SCSI_ERR_INFO; 12357 break; 12358 default: 12359 sd_severity = SCSI_ERR_UNKNOWN; 12360 } 12361 /* print log */ 12362 sd_ssc_print(ssc, sd_severity); 12363 12364 /* always post ereport */ 12365 sd_ssc_ereport_post(ssc, sd_assess); 12366 } 12367 12368 /* 12369 * Function: sd_ssc_set_info 12370 * 12371 * Description: Mark ssc_flags and set ssc_info which would be the 12372 * payload of uderr ereport. This function will cause 12373 * sd_ssc_ereport_post to post uderr ereport only. 12374 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12375 * the function will also call SD_ERROR or scsi_log for a 12376 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12377 * 12378 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12379 * sd_uscsi_info in. 12380 * ssc_flags - indicate the sub-category of a uderr. 12381 * comp - this argument is meaningful only when 12382 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12383 * values include: 12384 * > 0, SD_ERROR is used with comp as the driver logging 12385 * component; 12386 * = 0, scsi-log is used to log error telemetries; 12387 * < 0, no log available for this telemetry. 12388 * 12389 * Context: Kernel thread or interrupt context 12390 */ 12391 static void 12392 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12393 { 12394 va_list ap; 12395 12396 ASSERT(ssc != NULL); 12397 ASSERT(ssc->ssc_un != NULL); 12398 12399 ssc->ssc_flags |= ssc_flags; 12400 va_start(ap, fmt); 12401 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12402 va_end(ap); 12403 12404 /* 12405 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12406 * with invalid data sent back. For non-uscsi command, the 12407 * following code will be bypassed. 12408 */ 12409 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12410 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12411 /* 12412 * If the error belong to certain component and we 12413 * do not want it to show up on the console, we 12414 * will use SD_ERROR, otherwise scsi_log is 12415 * preferred. 12416 */ 12417 if (comp > 0) { 12418 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12419 } else if (comp == 0) { 12420 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12421 CE_WARN, ssc->ssc_info); 12422 } 12423 } 12424 } 12425 } 12426 12427 /* 12428 * Function: sd_buf_iodone 12429 * 12430 * Description: Frees the sd_xbuf & returns the buf to its originator. 12431 * 12432 * Context: May be called from interrupt context. 12433 */ 12434 /* ARGSUSED */ 12435 static void 12436 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12437 { 12438 struct sd_xbuf *xp; 12439 12440 ASSERT(un != NULL); 12441 ASSERT(bp != NULL); 12442 ASSERT(!mutex_owned(SD_MUTEX(un))); 12443 12444 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12445 12446 xp = SD_GET_XBUF(bp); 12447 ASSERT(xp != NULL); 12448 12449 /* xbuf is gone after this */ 12450 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12451 mutex_enter(SD_MUTEX(un)); 12452 12453 /* 12454 * Grab time when the cmd completed. 12455 * This is used for determining if the system has been 12456 * idle long enough to make it idle to the PM framework. 12457 * This is for lowering the overhead, and therefore improving 12458 * performance per I/O operation. 12459 */ 12460 un->un_pm_idle_time = gethrtime(); 12461 12462 un->un_ncmds_in_driver--; 12463 ASSERT(un->un_ncmds_in_driver >= 0); 12464 SD_INFO(SD_LOG_IO, un, 12465 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12466 un->un_ncmds_in_driver); 12467 12468 mutex_exit(SD_MUTEX(un)); 12469 } 12470 12471 biodone(bp); /* bp is gone after this */ 12472 12473 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12474 } 12475 12476 12477 /* 12478 * Function: sd_uscsi_iodone 12479 * 12480 * Description: Frees the sd_xbuf & returns the buf to its originator. 12481 * 12482 * Context: May be called from interrupt context. 12483 */ 12484 /* ARGSUSED */ 12485 static void 12486 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12487 { 12488 struct sd_xbuf *xp; 12489 12490 ASSERT(un != NULL); 12491 ASSERT(bp != NULL); 12492 12493 xp = SD_GET_XBUF(bp); 12494 ASSERT(xp != NULL); 12495 ASSERT(!mutex_owned(SD_MUTEX(un))); 12496 12497 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12498 12499 bp->b_private = xp->xb_private; 12500 12501 mutex_enter(SD_MUTEX(un)); 12502 12503 /* 12504 * Grab time when the cmd completed. 12505 * This is used for determining if the system has been 12506 * idle long enough to make it idle to the PM framework. 12507 * This is for lowering the overhead, and therefore improving 12508 * performance per I/O operation. 12509 */ 12510 un->un_pm_idle_time = gethrtime(); 12511 12512 un->un_ncmds_in_driver--; 12513 ASSERT(un->un_ncmds_in_driver >= 0); 12514 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12515 un->un_ncmds_in_driver); 12516 12517 mutex_exit(SD_MUTEX(un)); 12518 12519 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12520 SENSE_LENGTH) { 12521 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12522 MAX_SENSE_LENGTH); 12523 } else { 12524 kmem_free(xp, sizeof (struct sd_xbuf)); 12525 } 12526 12527 biodone(bp); 12528 12529 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12530 } 12531 12532 12533 /* 12534 * Function: sd_mapblockaddr_iostart 12535 * 12536 * Description: Verify request lies within the partition limits for 12537 * the indicated minor device. Issue "overrun" buf if 12538 * request would exceed partition range. Converts 12539 * partition-relative block address to absolute. 12540 * 12541 * Upon exit of this function: 12542 * 1.I/O is aligned 12543 * xp->xb_blkno represents the absolute sector address 12544 * 2.I/O is misaligned 12545 * xp->xb_blkno represents the absolute logical block address 12546 * based on DEV_BSIZE. The logical block address will be 12547 * converted to physical sector address in sd_mapblocksize_\ 12548 * iostart. 12549 * 3.I/O is misaligned but is aligned in "overrun" buf 12550 * xp->xb_blkno represents the absolute logical block address 12551 * based on DEV_BSIZE. The logical block address will be 12552 * converted to physical sector address in sd_mapblocksize_\ 12553 * iostart. But no RMW will be issued in this case. 12554 * 12555 * Context: Can sleep 12556 * 12557 * Issues: This follows what the old code did, in terms of accessing 12558 * some of the partition info in the unit struct without holding 12559 * the mutext. This is a general issue, if the partition info 12560 * can be altered while IO is in progress... as soon as we send 12561 * a buf, its partitioning can be invalid before it gets to the 12562 * device. Probably the right fix is to move partitioning out 12563 * of the driver entirely. 12564 */ 12565 12566 static void 12567 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12568 { 12569 diskaddr_t nblocks; /* #blocks in the given partition */ 12570 daddr_t blocknum; /* Block number specified by the buf */ 12571 size_t requested_nblocks; 12572 size_t available_nblocks; 12573 int partition; 12574 diskaddr_t partition_offset; 12575 struct sd_xbuf *xp; 12576 int secmask = 0, blknomask = 0; 12577 ushort_t is_aligned = TRUE; 12578 12579 ASSERT(un != NULL); 12580 ASSERT(bp != NULL); 12581 ASSERT(!mutex_owned(SD_MUTEX(un))); 12582 12583 SD_TRACE(SD_LOG_IO_PARTITION, un, 12584 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12585 12586 xp = SD_GET_XBUF(bp); 12587 ASSERT(xp != NULL); 12588 12589 /* 12590 * If the geometry is not indicated as valid, attempt to access 12591 * the unit & verify the geometry/label. This can be the case for 12592 * removable-media devices, of if the device was opened in 12593 * NDELAY/NONBLOCK mode. 12594 */ 12595 partition = SDPART(bp->b_edev); 12596 12597 if (!SD_IS_VALID_LABEL(un)) { 12598 sd_ssc_t *ssc; 12599 /* 12600 * Initialize sd_ssc_t for internal uscsi commands 12601 * In case of potential porformance issue, we need 12602 * to alloc memory only if there is invalid label 12603 */ 12604 ssc = sd_ssc_init(un); 12605 12606 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12607 /* 12608 * For removable devices it is possible to start an 12609 * I/O without a media by opening the device in nodelay 12610 * mode. Also for writable CDs there can be many 12611 * scenarios where there is no geometry yet but volume 12612 * manager is trying to issue a read() just because 12613 * it can see TOC on the CD. So do not print a message 12614 * for removables. 12615 */ 12616 if (!un->un_f_has_removable_media) { 12617 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12618 "i/o to invalid geometry\n"); 12619 } 12620 bioerror(bp, EIO); 12621 bp->b_resid = bp->b_bcount; 12622 SD_BEGIN_IODONE(index, un, bp); 12623 12624 sd_ssc_fini(ssc); 12625 return; 12626 } 12627 sd_ssc_fini(ssc); 12628 } 12629 12630 nblocks = 0; 12631 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12632 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12633 12634 if (un->un_f_enable_rmw) { 12635 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1; 12636 secmask = un->un_phy_blocksize - 1; 12637 } else { 12638 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12639 secmask = un->un_tgt_blocksize - 1; 12640 } 12641 12642 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12643 is_aligned = FALSE; 12644 } 12645 12646 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) { 12647 /* 12648 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12649 * Convert the logical block number to target's physical sector 12650 * number. 12651 */ 12652 if (is_aligned) { 12653 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12654 } else { 12655 /* 12656 * There is no RMW if we're just reading, so don't 12657 * warn or error out because of it. 12658 */ 12659 if (bp->b_flags & B_READ) { 12660 /*EMPTY*/ 12661 } else if (!un->un_f_enable_rmw && 12662 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) { 12663 bp->b_flags |= B_ERROR; 12664 goto error_exit; 12665 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) { 12666 mutex_enter(SD_MUTEX(un)); 12667 if (!un->un_f_enable_rmw && 12668 un->un_rmw_msg_timeid == NULL) { 12669 scsi_log(SD_DEVINFO(un), sd_label, 12670 CE_WARN, "I/O request is not " 12671 "aligned with %d disk sector size. " 12672 "It is handled through Read Modify " 12673 "Write but the performance is " 12674 "very low.\n", 12675 un->un_tgt_blocksize); 12676 un->un_rmw_msg_timeid = 12677 timeout(sd_rmw_msg_print_handler, 12678 un, SD_RMW_MSG_PRINT_TIMEOUT); 12679 } else { 12680 un->un_rmw_incre_count ++; 12681 } 12682 mutex_exit(SD_MUTEX(un)); 12683 } 12684 12685 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12686 partition_offset = SD_TGT2SYSBLOCK(un, 12687 partition_offset); 12688 } 12689 } 12690 12691 /* 12692 * blocknum is the starting block number of the request. At this 12693 * point it is still relative to the start of the minor device. 12694 */ 12695 blocknum = xp->xb_blkno; 12696 12697 /* 12698 * Legacy: If the starting block number is one past the last block 12699 * in the partition, do not set B_ERROR in the buf. 12700 */ 12701 if (blocknum == nblocks) { 12702 goto error_exit; 12703 } 12704 12705 /* 12706 * Confirm that the first block of the request lies within the 12707 * partition limits. Also the requested number of bytes must be 12708 * a multiple of the system block size. 12709 */ 12710 if ((blocknum < 0) || (blocknum >= nblocks) || 12711 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12712 bp->b_flags |= B_ERROR; 12713 goto error_exit; 12714 } 12715 12716 /* 12717 * If the requsted # blocks exceeds the available # blocks, that 12718 * is an overrun of the partition. 12719 */ 12720 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12721 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12722 } else { 12723 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12724 } 12725 12726 available_nblocks = (size_t)(nblocks - blocknum); 12727 ASSERT(nblocks >= blocknum); 12728 12729 if (requested_nblocks > available_nblocks) { 12730 size_t resid; 12731 12732 /* 12733 * Allocate an "overrun" buf to allow the request to proceed 12734 * for the amount of space available in the partition. The 12735 * amount not transferred will be added into the b_resid 12736 * when the operation is complete. The overrun buf 12737 * replaces the original buf here, and the original buf 12738 * is saved inside the overrun buf, for later use. 12739 */ 12740 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12741 resid = SD_TGTBLOCKS2BYTES(un, 12742 (offset_t)(requested_nblocks - available_nblocks)); 12743 } else { 12744 resid = SD_SYSBLOCKS2BYTES( 12745 (offset_t)(requested_nblocks - available_nblocks)); 12746 } 12747 12748 size_t count = bp->b_bcount - resid; 12749 /* 12750 * Note: count is an unsigned entity thus it'll NEVER 12751 * be less than 0 so ASSERT the original values are 12752 * correct. 12753 */ 12754 ASSERT(bp->b_bcount >= resid); 12755 12756 bp = sd_bioclone_alloc(bp, count, blocknum, 12757 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12758 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12759 ASSERT(xp != NULL); 12760 } 12761 12762 /* At this point there should be no residual for this buf. */ 12763 ASSERT(bp->b_resid == 0); 12764 12765 /* Convert the block number to an absolute address. */ 12766 xp->xb_blkno += partition_offset; 12767 12768 SD_NEXT_IOSTART(index, un, bp); 12769 12770 SD_TRACE(SD_LOG_IO_PARTITION, un, 12771 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12772 12773 return; 12774 12775 error_exit: 12776 bp->b_resid = bp->b_bcount; 12777 SD_BEGIN_IODONE(index, un, bp); 12778 SD_TRACE(SD_LOG_IO_PARTITION, un, 12779 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12780 } 12781 12782 12783 /* 12784 * Function: sd_mapblockaddr_iodone 12785 * 12786 * Description: Completion-side processing for partition management. 12787 * 12788 * Context: May be called under interrupt context 12789 */ 12790 12791 static void 12792 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12793 { 12794 /* int partition; */ /* Not used, see below. */ 12795 ASSERT(un != NULL); 12796 ASSERT(bp != NULL); 12797 ASSERT(!mutex_owned(SD_MUTEX(un))); 12798 12799 SD_TRACE(SD_LOG_IO_PARTITION, un, 12800 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12801 12802 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12803 /* 12804 * We have an "overrun" buf to deal with... 12805 */ 12806 struct sd_xbuf *xp; 12807 struct buf *obp; /* ptr to the original buf */ 12808 12809 xp = SD_GET_XBUF(bp); 12810 ASSERT(xp != NULL); 12811 12812 /* Retrieve the pointer to the original buf */ 12813 obp = (struct buf *)xp->xb_private; 12814 ASSERT(obp != NULL); 12815 12816 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12817 bioerror(obp, bp->b_error); 12818 12819 sd_bioclone_free(bp); 12820 12821 /* 12822 * Get back the original buf. 12823 * Note that since the restoration of xb_blkno below 12824 * was removed, the sd_xbuf is not needed. 12825 */ 12826 bp = obp; 12827 /* 12828 * xp = SD_GET_XBUF(bp); 12829 * ASSERT(xp != NULL); 12830 */ 12831 } 12832 12833 /* 12834 * Convert sd->xb_blkno back to a minor-device relative value. 12835 * Note: this has been commented out, as it is not needed in the 12836 * current implementation of the driver (ie, since this function 12837 * is at the top of the layering chains, so the info will be 12838 * discarded) and it is in the "hot" IO path. 12839 * 12840 * partition = getminor(bp->b_edev) & SDPART_MASK; 12841 * xp->xb_blkno -= un->un_offset[partition]; 12842 */ 12843 12844 SD_NEXT_IODONE(index, un, bp); 12845 12846 SD_TRACE(SD_LOG_IO_PARTITION, un, 12847 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12848 } 12849 12850 12851 /* 12852 * Function: sd_mapblocksize_iostart 12853 * 12854 * Description: Convert between system block size (un->un_sys_blocksize) 12855 * and target block size (un->un_tgt_blocksize). 12856 * 12857 * Context: Can sleep to allocate resources. 12858 * 12859 * Assumptions: A higher layer has already performed any partition validation, 12860 * and converted the xp->xb_blkno to an absolute value relative 12861 * to the start of the device. 12862 * 12863 * It is also assumed that the higher layer has implemented 12864 * an "overrun" mechanism for the case where the request would 12865 * read/write beyond the end of a partition. In this case we 12866 * assume (and ASSERT) that bp->b_resid == 0. 12867 * 12868 * Note: The implementation for this routine assumes the target 12869 * block size remains constant between allocation and transport. 12870 */ 12871 12872 static void 12873 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12874 { 12875 struct sd_mapblocksize_info *bsp; 12876 struct sd_xbuf *xp; 12877 offset_t first_byte; 12878 daddr_t start_block, end_block; 12879 daddr_t request_bytes; 12880 ushort_t is_aligned = FALSE; 12881 12882 ASSERT(un != NULL); 12883 ASSERT(bp != NULL); 12884 ASSERT(!mutex_owned(SD_MUTEX(un))); 12885 ASSERT(bp->b_resid == 0); 12886 12887 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12888 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12889 12890 /* 12891 * For a non-writable CD, a write request is an error 12892 */ 12893 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12894 (un->un_f_mmc_writable_media == FALSE)) { 12895 bioerror(bp, EIO); 12896 bp->b_resid = bp->b_bcount; 12897 SD_BEGIN_IODONE(index, un, bp); 12898 return; 12899 } 12900 12901 /* 12902 * We do not need a shadow buf if the device is using 12903 * un->un_sys_blocksize as its block size or if bcount == 0. 12904 * In this case there is no layer-private data block allocated. 12905 */ 12906 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 12907 (bp->b_bcount == 0)) { 12908 goto done; 12909 } 12910 12911 #if defined(__i386) || defined(__amd64) 12912 /* We do not support non-block-aligned transfers for ROD devices */ 12913 ASSERT(!ISROD(un)); 12914 #endif 12915 12916 xp = SD_GET_XBUF(bp); 12917 ASSERT(xp != NULL); 12918 12919 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12920 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12921 un->un_tgt_blocksize, DEV_BSIZE); 12922 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12923 "request start block:0x%x\n", xp->xb_blkno); 12924 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12925 "request len:0x%x\n", bp->b_bcount); 12926 12927 /* 12928 * Allocate the layer-private data area for the mapblocksize layer. 12929 * Layers are allowed to use the xp_private member of the sd_xbuf 12930 * struct to store the pointer to their layer-private data block, but 12931 * each layer also has the responsibility of restoring the prior 12932 * contents of xb_private before returning the buf/xbuf to the 12933 * higher layer that sent it. 12934 * 12935 * Here we save the prior contents of xp->xb_private into the 12936 * bsp->mbs_oprivate field of our layer-private data area. This value 12937 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12938 * the layer-private area and returning the buf/xbuf to the layer 12939 * that sent it. 12940 * 12941 * Note that here we use kmem_zalloc for the allocation as there are 12942 * parts of the mapblocksize code that expect certain fields to be 12943 * zero unless explicitly set to a required value. 12944 */ 12945 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12946 bsp->mbs_oprivate = xp->xb_private; 12947 xp->xb_private = bsp; 12948 12949 /* 12950 * This treats the data on the disk (target) as an array of bytes. 12951 * first_byte is the byte offset, from the beginning of the device, 12952 * to the location of the request. This is converted from a 12953 * un->un_sys_blocksize block address to a byte offset, and then back 12954 * to a block address based upon a un->un_tgt_blocksize block size. 12955 * 12956 * xp->xb_blkno should be absolute upon entry into this function, 12957 * but, but it is based upon partitions that use the "system" 12958 * block size. It must be adjusted to reflect the block size of 12959 * the target. 12960 * 12961 * Note that end_block is actually the block that follows the last 12962 * block of the request, but that's what is needed for the computation. 12963 */ 12964 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12965 if (un->un_f_enable_rmw) { 12966 start_block = xp->xb_blkno = 12967 (first_byte / un->un_phy_blocksize) * 12968 (un->un_phy_blocksize / DEV_BSIZE); 12969 end_block = ((first_byte + bp->b_bcount + 12970 un->un_phy_blocksize - 1) / un->un_phy_blocksize) * 12971 (un->un_phy_blocksize / DEV_BSIZE); 12972 } else { 12973 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12974 end_block = (first_byte + bp->b_bcount + 12975 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 12976 } 12977 12978 /* request_bytes is rounded up to a multiple of the target block size */ 12979 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12980 12981 /* 12982 * See if the starting address of the request and the request 12983 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12984 * then we do not need to allocate a shadow buf to handle the request. 12985 */ 12986 if (un->un_f_enable_rmw) { 12987 if (((first_byte % un->un_phy_blocksize) == 0) && 12988 ((bp->b_bcount % un->un_phy_blocksize) == 0)) { 12989 is_aligned = TRUE; 12990 } 12991 } else { 12992 if (((first_byte % un->un_tgt_blocksize) == 0) && 12993 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12994 is_aligned = TRUE; 12995 } 12996 } 12997 12998 if ((bp->b_flags & B_READ) == 0) { 12999 /* 13000 * Lock the range for a write operation. An aligned request is 13001 * considered a simple write; otherwise the request must be a 13002 * read-modify-write. 13003 */ 13004 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 13005 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 13006 } 13007 13008 /* 13009 * Alloc a shadow buf if the request is not aligned. Also, this is 13010 * where the READ command is generated for a read-modify-write. (The 13011 * write phase is deferred until after the read completes.) 13012 */ 13013 if (is_aligned == FALSE) { 13014 13015 struct sd_mapblocksize_info *shadow_bsp; 13016 struct sd_xbuf *shadow_xp; 13017 struct buf *shadow_bp; 13018 13019 /* 13020 * Allocate the shadow buf and it associated xbuf. Note that 13021 * after this call the xb_blkno value in both the original 13022 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 13023 * same: absolute relative to the start of the device, and 13024 * adjusted for the target block size. The b_blkno in the 13025 * shadow buf will also be set to this value. We should never 13026 * change b_blkno in the original bp however. 13027 * 13028 * Note also that the shadow buf will always need to be a 13029 * READ command, regardless of whether the incoming command 13030 * is a READ or a WRITE. 13031 */ 13032 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 13033 xp->xb_blkno, 13034 (int (*)(struct buf *)) sd_mapblocksize_iodone); 13035 13036 shadow_xp = SD_GET_XBUF(shadow_bp); 13037 13038 /* 13039 * Allocate the layer-private data for the shadow buf. 13040 * (No need to preserve xb_private in the shadow xbuf.) 13041 */ 13042 shadow_xp->xb_private = shadow_bsp = 13043 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 13044 13045 /* 13046 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 13047 * to figure out where the start of the user data is (based upon 13048 * the system block size) in the data returned by the READ 13049 * command (which will be based upon the target blocksize). Note 13050 * that this is only really used if the request is unaligned. 13051 */ 13052 if (un->un_f_enable_rmw) { 13053 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13054 ((offset_t)xp->xb_blkno * un->un_sys_blocksize)); 13055 ASSERT((bsp->mbs_copy_offset >= 0) && 13056 (bsp->mbs_copy_offset < un->un_phy_blocksize)); 13057 } else { 13058 bsp->mbs_copy_offset = (ssize_t)(first_byte - 13059 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 13060 ASSERT((bsp->mbs_copy_offset >= 0) && 13061 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 13062 } 13063 13064 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 13065 13066 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 13067 13068 /* Transfer the wmap (if any) to the shadow buf */ 13069 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 13070 bsp->mbs_wmp = NULL; 13071 13072 /* 13073 * The shadow buf goes on from here in place of the 13074 * original buf. 13075 */ 13076 shadow_bsp->mbs_orig_bp = bp; 13077 bp = shadow_bp; 13078 } 13079 13080 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13081 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 13082 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13083 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 13084 request_bytes); 13085 SD_INFO(SD_LOG_IO_RMMEDIA, un, 13086 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 13087 13088 done: 13089 SD_NEXT_IOSTART(index, un, bp); 13090 13091 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13092 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 13093 } 13094 13095 13096 /* 13097 * Function: sd_mapblocksize_iodone 13098 * 13099 * Description: Completion side processing for block-size mapping. 13100 * 13101 * Context: May be called under interrupt context 13102 */ 13103 13104 static void 13105 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 13106 { 13107 struct sd_mapblocksize_info *bsp; 13108 struct sd_xbuf *xp; 13109 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 13110 struct buf *orig_bp; /* ptr to the original buf */ 13111 offset_t shadow_end; 13112 offset_t request_end; 13113 offset_t shadow_start; 13114 ssize_t copy_offset; 13115 size_t copy_length; 13116 size_t shortfall; 13117 uint_t is_write; /* TRUE if this bp is a WRITE */ 13118 uint_t has_wmap; /* TRUE is this bp has a wmap */ 13119 13120 ASSERT(un != NULL); 13121 ASSERT(bp != NULL); 13122 13123 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 13124 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 13125 13126 /* 13127 * There is no shadow buf or layer-private data if the target is 13128 * using un->un_sys_blocksize as its block size or if bcount == 0. 13129 */ 13130 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 13131 (bp->b_bcount == 0)) { 13132 goto exit; 13133 } 13134 13135 xp = SD_GET_XBUF(bp); 13136 ASSERT(xp != NULL); 13137 13138 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 13139 bsp = xp->xb_private; 13140 13141 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 13142 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 13143 13144 if (is_write) { 13145 /* 13146 * For a WRITE request we must free up the block range that 13147 * we have locked up. This holds regardless of whether this is 13148 * an aligned write request or a read-modify-write request. 13149 */ 13150 sd_range_unlock(un, bsp->mbs_wmp); 13151 bsp->mbs_wmp = NULL; 13152 } 13153 13154 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 13155 /* 13156 * An aligned read or write command will have no shadow buf; 13157 * there is not much else to do with it. 13158 */ 13159 goto done; 13160 } 13161 13162 orig_bp = bsp->mbs_orig_bp; 13163 ASSERT(orig_bp != NULL); 13164 orig_xp = SD_GET_XBUF(orig_bp); 13165 ASSERT(orig_xp != NULL); 13166 ASSERT(!mutex_owned(SD_MUTEX(un))); 13167 13168 if (!is_write && has_wmap) { 13169 /* 13170 * A READ with a wmap means this is the READ phase of a 13171 * read-modify-write. If an error occurred on the READ then 13172 * we do not proceed with the WRITE phase or copy any data. 13173 * Just release the write maps and return with an error. 13174 */ 13175 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 13176 orig_bp->b_resid = orig_bp->b_bcount; 13177 bioerror(orig_bp, bp->b_error); 13178 sd_range_unlock(un, bsp->mbs_wmp); 13179 goto freebuf_done; 13180 } 13181 } 13182 13183 /* 13184 * Here is where we set up to copy the data from the shadow buf 13185 * into the space associated with the original buf. 13186 * 13187 * To deal with the conversion between block sizes, these 13188 * computations treat the data as an array of bytes, with the 13189 * first byte (byte 0) corresponding to the first byte in the 13190 * first block on the disk. 13191 */ 13192 13193 /* 13194 * shadow_start and shadow_len indicate the location and size of 13195 * the data returned with the shadow IO request. 13196 */ 13197 if (un->un_f_enable_rmw) { 13198 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 13199 } else { 13200 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 13201 } 13202 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 13203 13204 /* 13205 * copy_offset gives the offset (in bytes) from the start of the first 13206 * block of the READ request to the beginning of the data. We retrieve 13207 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 13208 * there by sd_mapblockize_iostart(). copy_length gives the amount of 13209 * data to be copied (in bytes). 13210 */ 13211 copy_offset = bsp->mbs_copy_offset; 13212 if (un->un_f_enable_rmw) { 13213 ASSERT((copy_offset >= 0) && 13214 (copy_offset < un->un_phy_blocksize)); 13215 } else { 13216 ASSERT((copy_offset >= 0) && 13217 (copy_offset < un->un_tgt_blocksize)); 13218 } 13219 13220 copy_length = orig_bp->b_bcount; 13221 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 13222 13223 /* 13224 * Set up the resid and error fields of orig_bp as appropriate. 13225 */ 13226 if (shadow_end >= request_end) { 13227 /* We got all the requested data; set resid to zero */ 13228 orig_bp->b_resid = 0; 13229 } else { 13230 /* 13231 * We failed to get enough data to fully satisfy the original 13232 * request. Just copy back whatever data we got and set 13233 * up the residual and error code as required. 13234 * 13235 * 'shortfall' is the amount by which the data received with the 13236 * shadow buf has "fallen short" of the requested amount. 13237 */ 13238 shortfall = (size_t)(request_end - shadow_end); 13239 13240 if (shortfall > orig_bp->b_bcount) { 13241 /* 13242 * We did not get enough data to even partially 13243 * fulfill the original request. The residual is 13244 * equal to the amount requested. 13245 */ 13246 orig_bp->b_resid = orig_bp->b_bcount; 13247 } else { 13248 /* 13249 * We did not get all the data that we requested 13250 * from the device, but we will try to return what 13251 * portion we did get. 13252 */ 13253 orig_bp->b_resid = shortfall; 13254 } 13255 ASSERT(copy_length >= orig_bp->b_resid); 13256 copy_length -= orig_bp->b_resid; 13257 } 13258 13259 /* Propagate the error code from the shadow buf to the original buf */ 13260 bioerror(orig_bp, bp->b_error); 13261 13262 if (is_write) { 13263 goto freebuf_done; /* No data copying for a WRITE */ 13264 } 13265 13266 if (has_wmap) { 13267 /* 13268 * This is a READ command from the READ phase of a 13269 * read-modify-write request. We have to copy the data given 13270 * by the user OVER the data returned by the READ command, 13271 * then convert the command from a READ to a WRITE and send 13272 * it back to the target. 13273 */ 13274 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 13275 copy_length); 13276 13277 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 13278 13279 /* 13280 * Dispatch the WRITE command to the taskq thread, which 13281 * will in turn send the command to the target. When the 13282 * WRITE command completes, we (sd_mapblocksize_iodone()) 13283 * will get called again as part of the iodone chain 13284 * processing for it. Note that we will still be dealing 13285 * with the shadow buf at that point. 13286 */ 13287 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 13288 KM_NOSLEEP) != 0) { 13289 /* 13290 * Dispatch was successful so we are done. Return 13291 * without going any higher up the iodone chain. Do 13292 * not free up any layer-private data until after the 13293 * WRITE completes. 13294 */ 13295 return; 13296 } 13297 13298 /* 13299 * Dispatch of the WRITE command failed; set up the error 13300 * condition and send this IO back up the iodone chain. 13301 */ 13302 bioerror(orig_bp, EIO); 13303 orig_bp->b_resid = orig_bp->b_bcount; 13304 13305 } else { 13306 /* 13307 * This is a regular READ request (ie, not a RMW). Copy the 13308 * data from the shadow buf into the original buf. The 13309 * copy_offset compensates for any "misalignment" between the 13310 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13311 * original buf (with its un->un_sys_blocksize blocks). 13312 */ 13313 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13314 copy_length); 13315 } 13316 13317 freebuf_done: 13318 13319 /* 13320 * At this point we still have both the shadow buf AND the original 13321 * buf to deal with, as well as the layer-private data area in each. 13322 * Local variables are as follows: 13323 * 13324 * bp -- points to shadow buf 13325 * xp -- points to xbuf of shadow buf 13326 * bsp -- points to layer-private data area of shadow buf 13327 * orig_bp -- points to original buf 13328 * 13329 * First free the shadow buf and its associated xbuf, then free the 13330 * layer-private data area from the shadow buf. There is no need to 13331 * restore xb_private in the shadow xbuf. 13332 */ 13333 sd_shadow_buf_free(bp); 13334 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13335 13336 /* 13337 * Now update the local variables to point to the original buf, xbuf, 13338 * and layer-private area. 13339 */ 13340 bp = orig_bp; 13341 xp = SD_GET_XBUF(bp); 13342 ASSERT(xp != NULL); 13343 ASSERT(xp == orig_xp); 13344 bsp = xp->xb_private; 13345 ASSERT(bsp != NULL); 13346 13347 done: 13348 /* 13349 * Restore xb_private to whatever it was set to by the next higher 13350 * layer in the chain, then free the layer-private data area. 13351 */ 13352 xp->xb_private = bsp->mbs_oprivate; 13353 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13354 13355 exit: 13356 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13357 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13358 13359 SD_NEXT_IODONE(index, un, bp); 13360 } 13361 13362 13363 /* 13364 * Function: sd_checksum_iostart 13365 * 13366 * Description: A stub function for a layer that's currently not used. 13367 * For now just a placeholder. 13368 * 13369 * Context: Kernel thread context 13370 */ 13371 13372 static void 13373 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13374 { 13375 ASSERT(un != NULL); 13376 ASSERT(bp != NULL); 13377 ASSERT(!mutex_owned(SD_MUTEX(un))); 13378 SD_NEXT_IOSTART(index, un, bp); 13379 } 13380 13381 13382 /* 13383 * Function: sd_checksum_iodone 13384 * 13385 * Description: A stub function for a layer that's currently not used. 13386 * For now just a placeholder. 13387 * 13388 * Context: May be called under interrupt context 13389 */ 13390 13391 static void 13392 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13393 { 13394 ASSERT(un != NULL); 13395 ASSERT(bp != NULL); 13396 ASSERT(!mutex_owned(SD_MUTEX(un))); 13397 SD_NEXT_IODONE(index, un, bp); 13398 } 13399 13400 13401 /* 13402 * Function: sd_checksum_uscsi_iostart 13403 * 13404 * Description: A stub function for a layer that's currently not used. 13405 * For now just a placeholder. 13406 * 13407 * Context: Kernel thread context 13408 */ 13409 13410 static void 13411 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13412 { 13413 ASSERT(un != NULL); 13414 ASSERT(bp != NULL); 13415 ASSERT(!mutex_owned(SD_MUTEX(un))); 13416 SD_NEXT_IOSTART(index, un, bp); 13417 } 13418 13419 13420 /* 13421 * Function: sd_checksum_uscsi_iodone 13422 * 13423 * Description: A stub function for a layer that's currently not used. 13424 * For now just a placeholder. 13425 * 13426 * Context: May be called under interrupt context 13427 */ 13428 13429 static void 13430 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13431 { 13432 ASSERT(un != NULL); 13433 ASSERT(bp != NULL); 13434 ASSERT(!mutex_owned(SD_MUTEX(un))); 13435 SD_NEXT_IODONE(index, un, bp); 13436 } 13437 13438 13439 /* 13440 * Function: sd_pm_iostart 13441 * 13442 * Description: iostart-side routine for Power mangement. 13443 * 13444 * Context: Kernel thread context 13445 */ 13446 13447 static void 13448 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13449 { 13450 ASSERT(un != NULL); 13451 ASSERT(bp != NULL); 13452 ASSERT(!mutex_owned(SD_MUTEX(un))); 13453 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13454 13455 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13456 13457 if (sd_pm_entry(un) != DDI_SUCCESS) { 13458 /* 13459 * Set up to return the failed buf back up the 'iodone' 13460 * side of the calling chain. 13461 */ 13462 bioerror(bp, EIO); 13463 bp->b_resid = bp->b_bcount; 13464 13465 SD_BEGIN_IODONE(index, un, bp); 13466 13467 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13468 return; 13469 } 13470 13471 SD_NEXT_IOSTART(index, un, bp); 13472 13473 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13474 } 13475 13476 13477 /* 13478 * Function: sd_pm_iodone 13479 * 13480 * Description: iodone-side routine for power mangement. 13481 * 13482 * Context: may be called from interrupt context 13483 */ 13484 13485 static void 13486 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13487 { 13488 ASSERT(un != NULL); 13489 ASSERT(bp != NULL); 13490 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13491 13492 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13493 13494 /* 13495 * After attach the following flag is only read, so don't 13496 * take the penalty of acquiring a mutex for it. 13497 */ 13498 if (un->un_f_pm_is_enabled == TRUE) { 13499 sd_pm_exit(un); 13500 } 13501 13502 SD_NEXT_IODONE(index, un, bp); 13503 13504 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13505 } 13506 13507 13508 /* 13509 * Function: sd_core_iostart 13510 * 13511 * Description: Primary driver function for enqueuing buf(9S) structs from 13512 * the system and initiating IO to the target device 13513 * 13514 * Context: Kernel thread context. Can sleep. 13515 * 13516 * Assumptions: - The given xp->xb_blkno is absolute 13517 * (ie, relative to the start of the device). 13518 * - The IO is to be done using the native blocksize of 13519 * the device, as specified in un->un_tgt_blocksize. 13520 */ 13521 /* ARGSUSED */ 13522 static void 13523 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13524 { 13525 struct sd_xbuf *xp; 13526 13527 ASSERT(un != NULL); 13528 ASSERT(bp != NULL); 13529 ASSERT(!mutex_owned(SD_MUTEX(un))); 13530 ASSERT(bp->b_resid == 0); 13531 13532 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13533 13534 xp = SD_GET_XBUF(bp); 13535 ASSERT(xp != NULL); 13536 13537 mutex_enter(SD_MUTEX(un)); 13538 13539 /* 13540 * If we are currently in the failfast state, fail any new IO 13541 * that has B_FAILFAST set, then return. 13542 */ 13543 if ((bp->b_flags & B_FAILFAST) && 13544 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13545 mutex_exit(SD_MUTEX(un)); 13546 bioerror(bp, EIO); 13547 bp->b_resid = bp->b_bcount; 13548 SD_BEGIN_IODONE(index, un, bp); 13549 return; 13550 } 13551 13552 if (SD_IS_DIRECT_PRIORITY(xp)) { 13553 /* 13554 * Priority command -- transport it immediately. 13555 * 13556 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13557 * because all direct priority commands should be associated 13558 * with error recovery actions which we don't want to retry. 13559 */ 13560 sd_start_cmds(un, bp); 13561 } else { 13562 /* 13563 * Normal command -- add it to the wait queue, then start 13564 * transporting commands from the wait queue. 13565 */ 13566 sd_add_buf_to_waitq(un, bp); 13567 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13568 sd_start_cmds(un, NULL); 13569 } 13570 13571 mutex_exit(SD_MUTEX(un)); 13572 13573 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13574 } 13575 13576 13577 /* 13578 * Function: sd_init_cdb_limits 13579 * 13580 * Description: This is to handle scsi_pkt initialization differences 13581 * between the driver platforms. 13582 * 13583 * Legacy behaviors: 13584 * 13585 * If the block number or the sector count exceeds the 13586 * capabilities of a Group 0 command, shift over to a 13587 * Group 1 command. We don't blindly use Group 1 13588 * commands because a) some drives (CDC Wren IVs) get a 13589 * bit confused, and b) there is probably a fair amount 13590 * of speed difference for a target to receive and decode 13591 * a 10 byte command instead of a 6 byte command. 13592 * 13593 * The xfer time difference of 6 vs 10 byte CDBs is 13594 * still significant so this code is still worthwhile. 13595 * 10 byte CDBs are very inefficient with the fas HBA driver 13596 * and older disks. Each CDB byte took 1 usec with some 13597 * popular disks. 13598 * 13599 * Context: Must be called at attach time 13600 */ 13601 13602 static void 13603 sd_init_cdb_limits(struct sd_lun *un) 13604 { 13605 int hba_cdb_limit; 13606 13607 /* 13608 * Use CDB_GROUP1 commands for most devices except for 13609 * parallel SCSI fixed drives in which case we get better 13610 * performance using CDB_GROUP0 commands (where applicable). 13611 */ 13612 un->un_mincdb = SD_CDB_GROUP1; 13613 #if !defined(__fibre) 13614 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13615 !un->un_f_has_removable_media) { 13616 un->un_mincdb = SD_CDB_GROUP0; 13617 } 13618 #endif 13619 13620 /* 13621 * Try to read the max-cdb-length supported by HBA. 13622 */ 13623 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13624 if (0 >= un->un_max_hba_cdb) { 13625 un->un_max_hba_cdb = CDB_GROUP4; 13626 hba_cdb_limit = SD_CDB_GROUP4; 13627 } else if (0 < un->un_max_hba_cdb && 13628 un->un_max_hba_cdb < CDB_GROUP1) { 13629 hba_cdb_limit = SD_CDB_GROUP0; 13630 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13631 un->un_max_hba_cdb < CDB_GROUP5) { 13632 hba_cdb_limit = SD_CDB_GROUP1; 13633 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13634 un->un_max_hba_cdb < CDB_GROUP4) { 13635 hba_cdb_limit = SD_CDB_GROUP5; 13636 } else { 13637 hba_cdb_limit = SD_CDB_GROUP4; 13638 } 13639 13640 /* 13641 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13642 * commands for fixed disks unless we are building for a 32 bit 13643 * kernel. 13644 */ 13645 #ifdef _LP64 13646 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13647 min(hba_cdb_limit, SD_CDB_GROUP4); 13648 #else 13649 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13650 min(hba_cdb_limit, SD_CDB_GROUP1); 13651 #endif 13652 13653 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13654 ? sizeof (struct scsi_arq_status) : 1); 13655 if (!ISCD(un)) 13656 un->un_cmd_timeout = (ushort_t)sd_io_time; 13657 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13658 } 13659 13660 13661 /* 13662 * Function: sd_initpkt_for_buf 13663 * 13664 * Description: Allocate and initialize for transport a scsi_pkt struct, 13665 * based upon the info specified in the given buf struct. 13666 * 13667 * Assumes the xb_blkno in the request is absolute (ie, 13668 * relative to the start of the device (NOT partition!). 13669 * Also assumes that the request is using the native block 13670 * size of the device (as returned by the READ CAPACITY 13671 * command). 13672 * 13673 * Return Code: SD_PKT_ALLOC_SUCCESS 13674 * SD_PKT_ALLOC_FAILURE 13675 * SD_PKT_ALLOC_FAILURE_NO_DMA 13676 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13677 * 13678 * Context: Kernel thread and may be called from software interrupt context 13679 * as part of a sdrunout callback. This function may not block or 13680 * call routines that block 13681 */ 13682 13683 static int 13684 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13685 { 13686 struct sd_xbuf *xp; 13687 struct scsi_pkt *pktp = NULL; 13688 struct sd_lun *un; 13689 size_t blockcount; 13690 daddr_t startblock; 13691 int rval; 13692 int cmd_flags; 13693 13694 ASSERT(bp != NULL); 13695 ASSERT(pktpp != NULL); 13696 xp = SD_GET_XBUF(bp); 13697 ASSERT(xp != NULL); 13698 un = SD_GET_UN(bp); 13699 ASSERT(un != NULL); 13700 ASSERT(mutex_owned(SD_MUTEX(un))); 13701 ASSERT(bp->b_resid == 0); 13702 13703 SD_TRACE(SD_LOG_IO_CORE, un, 13704 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13705 13706 mutex_exit(SD_MUTEX(un)); 13707 13708 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13709 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13710 /* 13711 * Already have a scsi_pkt -- just need DMA resources. 13712 * We must recompute the CDB in case the mapping returns 13713 * a nonzero pkt_resid. 13714 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13715 * that is being retried, the unmap/remap of the DMA resouces 13716 * will result in the entire transfer starting over again 13717 * from the very first block. 13718 */ 13719 ASSERT(xp->xb_pktp != NULL); 13720 pktp = xp->xb_pktp; 13721 } else { 13722 pktp = NULL; 13723 } 13724 #endif /* __i386 || __amd64 */ 13725 13726 startblock = xp->xb_blkno; /* Absolute block num. */ 13727 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13728 13729 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13730 13731 /* 13732 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13733 * call scsi_init_pkt, and build the CDB. 13734 */ 13735 rval = sd_setup_rw_pkt(un, &pktp, bp, 13736 cmd_flags, sdrunout, (caddr_t)un, 13737 startblock, blockcount); 13738 13739 if (rval == 0) { 13740 /* 13741 * Success. 13742 * 13743 * If partial DMA is being used and required for this transfer. 13744 * set it up here. 13745 */ 13746 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13747 (pktp->pkt_resid != 0)) { 13748 13749 /* 13750 * Save the CDB length and pkt_resid for the 13751 * next xfer 13752 */ 13753 xp->xb_dma_resid = pktp->pkt_resid; 13754 13755 /* rezero resid */ 13756 pktp->pkt_resid = 0; 13757 13758 } else { 13759 xp->xb_dma_resid = 0; 13760 } 13761 13762 pktp->pkt_flags = un->un_tagflags; 13763 pktp->pkt_time = un->un_cmd_timeout; 13764 pktp->pkt_comp = sdintr; 13765 13766 pktp->pkt_private = bp; 13767 *pktpp = pktp; 13768 13769 SD_TRACE(SD_LOG_IO_CORE, un, 13770 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13771 13772 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13773 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13774 #endif 13775 13776 mutex_enter(SD_MUTEX(un)); 13777 return (SD_PKT_ALLOC_SUCCESS); 13778 13779 } 13780 13781 /* 13782 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13783 * from sd_setup_rw_pkt. 13784 */ 13785 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13786 13787 if (rval == SD_PKT_ALLOC_FAILURE) { 13788 *pktpp = NULL; 13789 /* 13790 * Set the driver state to RWAIT to indicate the driver 13791 * is waiting on resource allocations. The driver will not 13792 * suspend, pm_suspend, or detatch while the state is RWAIT. 13793 */ 13794 mutex_enter(SD_MUTEX(un)); 13795 New_state(un, SD_STATE_RWAIT); 13796 13797 SD_ERROR(SD_LOG_IO_CORE, un, 13798 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13799 13800 if ((bp->b_flags & B_ERROR) != 0) { 13801 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13802 } 13803 return (SD_PKT_ALLOC_FAILURE); 13804 } else { 13805 /* 13806 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13807 * 13808 * This should never happen. Maybe someone messed with the 13809 * kernel's minphys? 13810 */ 13811 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13812 "Request rejected: too large for CDB: " 13813 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13814 SD_ERROR(SD_LOG_IO_CORE, un, 13815 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13816 mutex_enter(SD_MUTEX(un)); 13817 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13818 13819 } 13820 } 13821 13822 13823 /* 13824 * Function: sd_destroypkt_for_buf 13825 * 13826 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13827 * 13828 * Context: Kernel thread or interrupt context 13829 */ 13830 13831 static void 13832 sd_destroypkt_for_buf(struct buf *bp) 13833 { 13834 ASSERT(bp != NULL); 13835 ASSERT(SD_GET_UN(bp) != NULL); 13836 13837 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13838 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13839 13840 ASSERT(SD_GET_PKTP(bp) != NULL); 13841 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13842 13843 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13844 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13845 } 13846 13847 /* 13848 * Function: sd_setup_rw_pkt 13849 * 13850 * Description: Determines appropriate CDB group for the requested LBA 13851 * and transfer length, calls scsi_init_pkt, and builds 13852 * the CDB. Do not use for partial DMA transfers except 13853 * for the initial transfer since the CDB size must 13854 * remain constant. 13855 * 13856 * Context: Kernel thread and may be called from software interrupt 13857 * context as part of a sdrunout callback. This function may not 13858 * block or call routines that block 13859 */ 13860 13861 13862 int 13863 sd_setup_rw_pkt(struct sd_lun *un, 13864 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13865 int (*callback)(caddr_t), caddr_t callback_arg, 13866 diskaddr_t lba, uint32_t blockcount) 13867 { 13868 struct scsi_pkt *return_pktp; 13869 union scsi_cdb *cdbp; 13870 struct sd_cdbinfo *cp = NULL; 13871 int i; 13872 13873 /* 13874 * See which size CDB to use, based upon the request. 13875 */ 13876 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13877 13878 /* 13879 * Check lba and block count against sd_cdbtab limits. 13880 * In the partial DMA case, we have to use the same size 13881 * CDB for all the transfers. Check lba + blockcount 13882 * against the max LBA so we know that segment of the 13883 * transfer can use the CDB we select. 13884 */ 13885 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13886 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13887 13888 /* 13889 * The command will fit into the CDB type 13890 * specified by sd_cdbtab[i]. 13891 */ 13892 cp = sd_cdbtab + i; 13893 13894 /* 13895 * Call scsi_init_pkt so we can fill in the 13896 * CDB. 13897 */ 13898 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13899 bp, cp->sc_grpcode, un->un_status_len, 0, 13900 flags, callback, callback_arg); 13901 13902 if (return_pktp != NULL) { 13903 13904 /* 13905 * Return new value of pkt 13906 */ 13907 *pktpp = return_pktp; 13908 13909 /* 13910 * To be safe, zero the CDB insuring there is 13911 * no leftover data from a previous command. 13912 */ 13913 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13914 13915 /* 13916 * Handle partial DMA mapping 13917 */ 13918 if (return_pktp->pkt_resid != 0) { 13919 13920 /* 13921 * Not going to xfer as many blocks as 13922 * originally expected 13923 */ 13924 blockcount -= 13925 SD_BYTES2TGTBLOCKS(un, 13926 return_pktp->pkt_resid); 13927 } 13928 13929 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13930 13931 /* 13932 * Set command byte based on the CDB 13933 * type we matched. 13934 */ 13935 cdbp->scc_cmd = cp->sc_grpmask | 13936 ((bp->b_flags & B_READ) ? 13937 SCMD_READ : SCMD_WRITE); 13938 13939 SD_FILL_SCSI1_LUN(un, return_pktp); 13940 13941 /* 13942 * Fill in LBA and length 13943 */ 13944 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13945 (cp->sc_grpcode == CDB_GROUP4) || 13946 (cp->sc_grpcode == CDB_GROUP0) || 13947 (cp->sc_grpcode == CDB_GROUP5)); 13948 13949 if (cp->sc_grpcode == CDB_GROUP1) { 13950 FORMG1ADDR(cdbp, lba); 13951 FORMG1COUNT(cdbp, blockcount); 13952 return (0); 13953 } else if (cp->sc_grpcode == CDB_GROUP4) { 13954 FORMG4LONGADDR(cdbp, lba); 13955 FORMG4COUNT(cdbp, blockcount); 13956 return (0); 13957 } else if (cp->sc_grpcode == CDB_GROUP0) { 13958 FORMG0ADDR(cdbp, lba); 13959 FORMG0COUNT(cdbp, blockcount); 13960 return (0); 13961 } else if (cp->sc_grpcode == CDB_GROUP5) { 13962 FORMG5ADDR(cdbp, lba); 13963 FORMG5COUNT(cdbp, blockcount); 13964 return (0); 13965 } 13966 13967 /* 13968 * It should be impossible to not match one 13969 * of the CDB types above, so we should never 13970 * reach this point. Set the CDB command byte 13971 * to test-unit-ready to avoid writing 13972 * to somewhere we don't intend. 13973 */ 13974 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13975 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13976 } else { 13977 /* 13978 * Couldn't get scsi_pkt 13979 */ 13980 return (SD_PKT_ALLOC_FAILURE); 13981 } 13982 } 13983 } 13984 13985 /* 13986 * None of the available CDB types were suitable. This really 13987 * should never happen: on a 64 bit system we support 13988 * READ16/WRITE16 which will hold an entire 64 bit disk address 13989 * and on a 32 bit system we will refuse to bind to a device 13990 * larger than 2TB so addresses will never be larger than 32 bits. 13991 */ 13992 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13993 } 13994 13995 /* 13996 * Function: sd_setup_next_rw_pkt 13997 * 13998 * Description: Setup packet for partial DMA transfers, except for the 13999 * initial transfer. sd_setup_rw_pkt should be used for 14000 * the initial transfer. 14001 * 14002 * Context: Kernel thread and may be called from interrupt context. 14003 */ 14004 14005 int 14006 sd_setup_next_rw_pkt(struct sd_lun *un, 14007 struct scsi_pkt *pktp, struct buf *bp, 14008 diskaddr_t lba, uint32_t blockcount) 14009 { 14010 uchar_t com; 14011 union scsi_cdb *cdbp; 14012 uchar_t cdb_group_id; 14013 14014 ASSERT(pktp != NULL); 14015 ASSERT(pktp->pkt_cdbp != NULL); 14016 14017 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 14018 com = cdbp->scc_cmd; 14019 cdb_group_id = CDB_GROUPID(com); 14020 14021 ASSERT((cdb_group_id == CDB_GROUPID_0) || 14022 (cdb_group_id == CDB_GROUPID_1) || 14023 (cdb_group_id == CDB_GROUPID_4) || 14024 (cdb_group_id == CDB_GROUPID_5)); 14025 14026 /* 14027 * Move pkt to the next portion of the xfer. 14028 * func is NULL_FUNC so we do not have to release 14029 * the disk mutex here. 14030 */ 14031 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 14032 NULL_FUNC, NULL) == pktp) { 14033 /* Success. Handle partial DMA */ 14034 if (pktp->pkt_resid != 0) { 14035 blockcount -= 14036 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 14037 } 14038 14039 cdbp->scc_cmd = com; 14040 SD_FILL_SCSI1_LUN(un, pktp); 14041 if (cdb_group_id == CDB_GROUPID_1) { 14042 FORMG1ADDR(cdbp, lba); 14043 FORMG1COUNT(cdbp, blockcount); 14044 return (0); 14045 } else if (cdb_group_id == CDB_GROUPID_4) { 14046 FORMG4LONGADDR(cdbp, lba); 14047 FORMG4COUNT(cdbp, blockcount); 14048 return (0); 14049 } else if (cdb_group_id == CDB_GROUPID_0) { 14050 FORMG0ADDR(cdbp, lba); 14051 FORMG0COUNT(cdbp, blockcount); 14052 return (0); 14053 } else if (cdb_group_id == CDB_GROUPID_5) { 14054 FORMG5ADDR(cdbp, lba); 14055 FORMG5COUNT(cdbp, blockcount); 14056 return (0); 14057 } 14058 14059 /* Unreachable */ 14060 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 14061 } 14062 14063 /* 14064 * Error setting up next portion of cmd transfer. 14065 * Something is definitely very wrong and this 14066 * should not happen. 14067 */ 14068 return (SD_PKT_ALLOC_FAILURE); 14069 } 14070 14071 /* 14072 * Function: sd_initpkt_for_uscsi 14073 * 14074 * Description: Allocate and initialize for transport a scsi_pkt struct, 14075 * based upon the info specified in the given uscsi_cmd struct. 14076 * 14077 * Return Code: SD_PKT_ALLOC_SUCCESS 14078 * SD_PKT_ALLOC_FAILURE 14079 * SD_PKT_ALLOC_FAILURE_NO_DMA 14080 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 14081 * 14082 * Context: Kernel thread and may be called from software interrupt context 14083 * as part of a sdrunout callback. This function may not block or 14084 * call routines that block 14085 */ 14086 14087 static int 14088 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 14089 { 14090 struct uscsi_cmd *uscmd; 14091 struct sd_xbuf *xp; 14092 struct scsi_pkt *pktp; 14093 struct sd_lun *un; 14094 uint32_t flags = 0; 14095 14096 ASSERT(bp != NULL); 14097 ASSERT(pktpp != NULL); 14098 xp = SD_GET_XBUF(bp); 14099 ASSERT(xp != NULL); 14100 un = SD_GET_UN(bp); 14101 ASSERT(un != NULL); 14102 ASSERT(mutex_owned(SD_MUTEX(un))); 14103 14104 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14105 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14106 ASSERT(uscmd != NULL); 14107 14108 SD_TRACE(SD_LOG_IO_CORE, un, 14109 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 14110 14111 /* 14112 * Allocate the scsi_pkt for the command. 14113 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 14114 * during scsi_init_pkt time and will continue to use the 14115 * same path as long as the same scsi_pkt is used without 14116 * intervening scsi_dma_free(). Since uscsi command does 14117 * not call scsi_dmafree() before retry failed command, it 14118 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 14119 * set such that scsi_vhci can use other available path for 14120 * retry. Besides, ucsci command does not allow DMA breakup, 14121 * so there is no need to set PKT_DMA_PARTIAL flag. 14122 */ 14123 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14124 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14125 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14126 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 14127 - sizeof (struct scsi_extended_sense)), 0, 14128 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 14129 sdrunout, (caddr_t)un); 14130 } else { 14131 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14132 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14133 sizeof (struct scsi_arq_status), 0, 14134 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 14135 sdrunout, (caddr_t)un); 14136 } 14137 14138 if (pktp == NULL) { 14139 *pktpp = NULL; 14140 /* 14141 * Set the driver state to RWAIT to indicate the driver 14142 * is waiting on resource allocations. The driver will not 14143 * suspend, pm_suspend, or detatch while the state is RWAIT. 14144 */ 14145 New_state(un, SD_STATE_RWAIT); 14146 14147 SD_ERROR(SD_LOG_IO_CORE, un, 14148 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 14149 14150 if ((bp->b_flags & B_ERROR) != 0) { 14151 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 14152 } 14153 return (SD_PKT_ALLOC_FAILURE); 14154 } 14155 14156 /* 14157 * We do not do DMA breakup for USCSI commands, so return failure 14158 * here if all the needed DMA resources were not allocated. 14159 */ 14160 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 14161 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 14162 scsi_destroy_pkt(pktp); 14163 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 14164 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 14165 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 14166 } 14167 14168 /* Init the cdb from the given uscsi struct */ 14169 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 14170 uscmd->uscsi_cdb[0], 0, 0, 0); 14171 14172 SD_FILL_SCSI1_LUN(un, pktp); 14173 14174 /* 14175 * Set up the optional USCSI flags. See the uscsi (7I) man page 14176 * for listing of the supported flags. 14177 */ 14178 14179 if (uscmd->uscsi_flags & USCSI_SILENT) { 14180 flags |= FLAG_SILENT; 14181 } 14182 14183 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 14184 flags |= FLAG_DIAGNOSE; 14185 } 14186 14187 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 14188 flags |= FLAG_ISOLATE; 14189 } 14190 14191 if (un->un_f_is_fibre == FALSE) { 14192 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 14193 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 14194 } 14195 } 14196 14197 /* 14198 * Set the pkt flags here so we save time later. 14199 * Note: These flags are NOT in the uscsi man page!!! 14200 */ 14201 if (uscmd->uscsi_flags & USCSI_HEAD) { 14202 flags |= FLAG_HEAD; 14203 } 14204 14205 if (uscmd->uscsi_flags & USCSI_NOINTR) { 14206 flags |= FLAG_NOINTR; 14207 } 14208 14209 /* 14210 * For tagged queueing, things get a bit complicated. 14211 * Check first for head of queue and last for ordered queue. 14212 * If neither head nor order, use the default driver tag flags. 14213 */ 14214 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 14215 if (uscmd->uscsi_flags & USCSI_HTAG) { 14216 flags |= FLAG_HTAG; 14217 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 14218 flags |= FLAG_OTAG; 14219 } else { 14220 flags |= un->un_tagflags & FLAG_TAGMASK; 14221 } 14222 } 14223 14224 if (uscmd->uscsi_flags & USCSI_NODISCON) { 14225 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 14226 } 14227 14228 pktp->pkt_flags = flags; 14229 14230 /* Transfer uscsi information to scsi_pkt */ 14231 (void) scsi_uscsi_pktinit(uscmd, pktp); 14232 14233 /* Copy the caller's CDB into the pkt... */ 14234 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 14235 14236 if (uscmd->uscsi_timeout == 0) { 14237 pktp->pkt_time = un->un_uscsi_timeout; 14238 } else { 14239 pktp->pkt_time = uscmd->uscsi_timeout; 14240 } 14241 14242 /* need it later to identify USCSI request in sdintr */ 14243 xp->xb_pkt_flags |= SD_XB_USCSICMD; 14244 14245 xp->xb_sense_resid = uscmd->uscsi_rqresid; 14246 14247 pktp->pkt_private = bp; 14248 pktp->pkt_comp = sdintr; 14249 *pktpp = pktp; 14250 14251 SD_TRACE(SD_LOG_IO_CORE, un, 14252 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 14253 14254 return (SD_PKT_ALLOC_SUCCESS); 14255 } 14256 14257 14258 /* 14259 * Function: sd_destroypkt_for_uscsi 14260 * 14261 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 14262 * IOs.. Also saves relevant info into the associated uscsi_cmd 14263 * struct. 14264 * 14265 * Context: May be called under interrupt context 14266 */ 14267 14268 static void 14269 sd_destroypkt_for_uscsi(struct buf *bp) 14270 { 14271 struct uscsi_cmd *uscmd; 14272 struct sd_xbuf *xp; 14273 struct scsi_pkt *pktp; 14274 struct sd_lun *un; 14275 struct sd_uscsi_info *suip; 14276 14277 ASSERT(bp != NULL); 14278 xp = SD_GET_XBUF(bp); 14279 ASSERT(xp != NULL); 14280 un = SD_GET_UN(bp); 14281 ASSERT(un != NULL); 14282 ASSERT(!mutex_owned(SD_MUTEX(un))); 14283 pktp = SD_GET_PKTP(bp); 14284 ASSERT(pktp != NULL); 14285 14286 SD_TRACE(SD_LOG_IO_CORE, un, 14287 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 14288 14289 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14290 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14291 ASSERT(uscmd != NULL); 14292 14293 /* Save the status and the residual into the uscsi_cmd struct */ 14294 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 14295 uscmd->uscsi_resid = bp->b_resid; 14296 14297 /* Transfer scsi_pkt information to uscsi */ 14298 (void) scsi_uscsi_pktfini(pktp, uscmd); 14299 14300 /* 14301 * If enabled, copy any saved sense data into the area specified 14302 * by the uscsi command. 14303 */ 14304 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14305 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14306 /* 14307 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14308 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14309 */ 14310 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14311 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14312 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14313 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14314 MAX_SENSE_LENGTH); 14315 } else { 14316 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14317 SENSE_LENGTH); 14318 } 14319 } 14320 /* 14321 * The following assignments are for SCSI FMA. 14322 */ 14323 ASSERT(xp->xb_private != NULL); 14324 suip = (struct sd_uscsi_info *)xp->xb_private; 14325 suip->ui_pkt_reason = pktp->pkt_reason; 14326 suip->ui_pkt_state = pktp->pkt_state; 14327 suip->ui_pkt_statistics = pktp->pkt_statistics; 14328 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14329 14330 /* We are done with the scsi_pkt; free it now */ 14331 ASSERT(SD_GET_PKTP(bp) != NULL); 14332 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14333 14334 SD_TRACE(SD_LOG_IO_CORE, un, 14335 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14336 } 14337 14338 14339 /* 14340 * Function: sd_bioclone_alloc 14341 * 14342 * Description: Allocate a buf(9S) and init it as per the given buf 14343 * and the various arguments. The associated sd_xbuf 14344 * struct is (nearly) duplicated. The struct buf *bp 14345 * argument is saved in new_xp->xb_private. 14346 * 14347 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14348 * datalen - size of data area for the shadow bp 14349 * blkno - starting LBA 14350 * func - function pointer for b_iodone in the shadow buf. (May 14351 * be NULL if none.) 14352 * 14353 * Return Code: Pointer to allocates buf(9S) struct 14354 * 14355 * Context: Can sleep. 14356 */ 14357 14358 static struct buf * 14359 sd_bioclone_alloc(struct buf *bp, size_t datalen, daddr_t blkno, 14360 int (*func)(struct buf *)) 14361 { 14362 struct sd_lun *un; 14363 struct sd_xbuf *xp; 14364 struct sd_xbuf *new_xp; 14365 struct buf *new_bp; 14366 14367 ASSERT(bp != NULL); 14368 xp = SD_GET_XBUF(bp); 14369 ASSERT(xp != NULL); 14370 un = SD_GET_UN(bp); 14371 ASSERT(un != NULL); 14372 ASSERT(!mutex_owned(SD_MUTEX(un))); 14373 14374 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14375 NULL, KM_SLEEP); 14376 14377 new_bp->b_lblkno = blkno; 14378 14379 /* 14380 * Allocate an xbuf for the shadow bp and copy the contents of the 14381 * original xbuf into it. 14382 */ 14383 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14384 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14385 14386 /* 14387 * The given bp is automatically saved in the xb_private member 14388 * of the new xbuf. Callers are allowed to depend on this. 14389 */ 14390 new_xp->xb_private = bp; 14391 14392 new_bp->b_private = new_xp; 14393 14394 return (new_bp); 14395 } 14396 14397 /* 14398 * Function: sd_shadow_buf_alloc 14399 * 14400 * Description: Allocate a buf(9S) and init it as per the given buf 14401 * and the various arguments. The associated sd_xbuf 14402 * struct is (nearly) duplicated. The struct buf *bp 14403 * argument is saved in new_xp->xb_private. 14404 * 14405 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14406 * datalen - size of data area for the shadow bp 14407 * bflags - B_READ or B_WRITE (pseudo flag) 14408 * blkno - starting LBA 14409 * func - function pointer for b_iodone in the shadow buf. (May 14410 * be NULL if none.) 14411 * 14412 * Return Code: Pointer to allocates buf(9S) struct 14413 * 14414 * Context: Can sleep. 14415 */ 14416 14417 static struct buf * 14418 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14419 daddr_t blkno, int (*func)(struct buf *)) 14420 { 14421 struct sd_lun *un; 14422 struct sd_xbuf *xp; 14423 struct sd_xbuf *new_xp; 14424 struct buf *new_bp; 14425 14426 ASSERT(bp != NULL); 14427 xp = SD_GET_XBUF(bp); 14428 ASSERT(xp != NULL); 14429 un = SD_GET_UN(bp); 14430 ASSERT(un != NULL); 14431 ASSERT(!mutex_owned(SD_MUTEX(un))); 14432 14433 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14434 bp_mapin(bp); 14435 } 14436 14437 bflags &= (B_READ | B_WRITE); 14438 #if defined(__i386) || defined(__amd64) 14439 new_bp = getrbuf(KM_SLEEP); 14440 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14441 new_bp->b_bcount = datalen; 14442 new_bp->b_flags = bflags | 14443 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14444 #else 14445 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14446 datalen, bflags, SLEEP_FUNC, NULL); 14447 #endif 14448 new_bp->av_forw = NULL; 14449 new_bp->av_back = NULL; 14450 new_bp->b_dev = bp->b_dev; 14451 new_bp->b_blkno = blkno; 14452 new_bp->b_iodone = func; 14453 new_bp->b_edev = bp->b_edev; 14454 new_bp->b_resid = 0; 14455 14456 /* We need to preserve the B_FAILFAST flag */ 14457 if (bp->b_flags & B_FAILFAST) { 14458 new_bp->b_flags |= B_FAILFAST; 14459 } 14460 14461 /* 14462 * Allocate an xbuf for the shadow bp and copy the contents of the 14463 * original xbuf into it. 14464 */ 14465 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14466 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14467 14468 /* Need later to copy data between the shadow buf & original buf! */ 14469 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14470 14471 /* 14472 * The given bp is automatically saved in the xb_private member 14473 * of the new xbuf. Callers are allowed to depend on this. 14474 */ 14475 new_xp->xb_private = bp; 14476 14477 new_bp->b_private = new_xp; 14478 14479 return (new_bp); 14480 } 14481 14482 /* 14483 * Function: sd_bioclone_free 14484 * 14485 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14486 * in the larger than partition operation. 14487 * 14488 * Context: May be called under interrupt context 14489 */ 14490 14491 static void 14492 sd_bioclone_free(struct buf *bp) 14493 { 14494 struct sd_xbuf *xp; 14495 14496 ASSERT(bp != NULL); 14497 xp = SD_GET_XBUF(bp); 14498 ASSERT(xp != NULL); 14499 14500 /* 14501 * Call bp_mapout() before freeing the buf, in case a lower 14502 * layer or HBA had done a bp_mapin(). we must do this here 14503 * as we are the "originator" of the shadow buf. 14504 */ 14505 bp_mapout(bp); 14506 14507 /* 14508 * Null out b_iodone before freeing the bp, to ensure that the driver 14509 * never gets confused by a stale value in this field. (Just a little 14510 * extra defensiveness here.) 14511 */ 14512 bp->b_iodone = NULL; 14513 14514 freerbuf(bp); 14515 14516 kmem_free(xp, sizeof (struct sd_xbuf)); 14517 } 14518 14519 /* 14520 * Function: sd_shadow_buf_free 14521 * 14522 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14523 * 14524 * Context: May be called under interrupt context 14525 */ 14526 14527 static void 14528 sd_shadow_buf_free(struct buf *bp) 14529 { 14530 struct sd_xbuf *xp; 14531 14532 ASSERT(bp != NULL); 14533 xp = SD_GET_XBUF(bp); 14534 ASSERT(xp != NULL); 14535 14536 #if defined(__sparc) 14537 /* 14538 * Call bp_mapout() before freeing the buf, in case a lower 14539 * layer or HBA had done a bp_mapin(). we must do this here 14540 * as we are the "originator" of the shadow buf. 14541 */ 14542 bp_mapout(bp); 14543 #endif 14544 14545 /* 14546 * Null out b_iodone before freeing the bp, to ensure that the driver 14547 * never gets confused by a stale value in this field. (Just a little 14548 * extra defensiveness here.) 14549 */ 14550 bp->b_iodone = NULL; 14551 14552 #if defined(__i386) || defined(__amd64) 14553 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14554 freerbuf(bp); 14555 #else 14556 scsi_free_consistent_buf(bp); 14557 #endif 14558 14559 kmem_free(xp, sizeof (struct sd_xbuf)); 14560 } 14561 14562 14563 /* 14564 * Function: sd_print_transport_rejected_message 14565 * 14566 * Description: This implements the ludicrously complex rules for printing 14567 * a "transport rejected" message. This is to address the 14568 * specific problem of having a flood of this error message 14569 * produced when a failover occurs. 14570 * 14571 * Context: Any. 14572 */ 14573 14574 static void 14575 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14576 int code) 14577 { 14578 ASSERT(un != NULL); 14579 ASSERT(mutex_owned(SD_MUTEX(un))); 14580 ASSERT(xp != NULL); 14581 14582 /* 14583 * Print the "transport rejected" message under the following 14584 * conditions: 14585 * 14586 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14587 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14588 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14589 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14590 * scsi_transport(9F) (which indicates that the target might have 14591 * gone off-line). This uses the un->un_tran_fatal_count 14592 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14593 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14594 * from scsi_transport(). 14595 * 14596 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14597 * the preceeding cases in order for the message to be printed. 14598 */ 14599 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14600 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14601 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14602 (code != TRAN_FATAL_ERROR) || 14603 (un->un_tran_fatal_count == 1)) { 14604 switch (code) { 14605 case TRAN_BADPKT: 14606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14607 "transport rejected bad packet\n"); 14608 break; 14609 case TRAN_FATAL_ERROR: 14610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14611 "transport rejected fatal error\n"); 14612 break; 14613 default: 14614 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14615 "transport rejected (%d)\n", code); 14616 break; 14617 } 14618 } 14619 } 14620 } 14621 14622 14623 /* 14624 * Function: sd_add_buf_to_waitq 14625 * 14626 * Description: Add the given buf(9S) struct to the wait queue for the 14627 * instance. If sorting is enabled, then the buf is added 14628 * to the queue via an elevator sort algorithm (a la 14629 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14630 * If sorting is not enabled, then the buf is just added 14631 * to the end of the wait queue. 14632 * 14633 * Return Code: void 14634 * 14635 * Context: Does not sleep/block, therefore technically can be called 14636 * from any context. However if sorting is enabled then the 14637 * execution time is indeterminate, and may take long if 14638 * the wait queue grows large. 14639 */ 14640 14641 static void 14642 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14643 { 14644 struct buf *ap; 14645 14646 ASSERT(bp != NULL); 14647 ASSERT(un != NULL); 14648 ASSERT(mutex_owned(SD_MUTEX(un))); 14649 14650 /* If the queue is empty, add the buf as the only entry & return. */ 14651 if (un->un_waitq_headp == NULL) { 14652 ASSERT(un->un_waitq_tailp == NULL); 14653 un->un_waitq_headp = un->un_waitq_tailp = bp; 14654 bp->av_forw = NULL; 14655 return; 14656 } 14657 14658 ASSERT(un->un_waitq_tailp != NULL); 14659 14660 /* 14661 * If sorting is disabled, just add the buf to the tail end of 14662 * the wait queue and return. 14663 */ 14664 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) { 14665 un->un_waitq_tailp->av_forw = bp; 14666 un->un_waitq_tailp = bp; 14667 bp->av_forw = NULL; 14668 return; 14669 } 14670 14671 /* 14672 * Sort thru the list of requests currently on the wait queue 14673 * and add the new buf request at the appropriate position. 14674 * 14675 * The un->un_waitq_headp is an activity chain pointer on which 14676 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14677 * first queue holds those requests which are positioned after 14678 * the current SD_GET_BLKNO() (in the first request); the second holds 14679 * requests which came in after their SD_GET_BLKNO() number was passed. 14680 * Thus we implement a one way scan, retracting after reaching 14681 * the end of the drive to the first request on the second 14682 * queue, at which time it becomes the first queue. 14683 * A one-way scan is natural because of the way UNIX read-ahead 14684 * blocks are allocated. 14685 * 14686 * If we lie after the first request, then we must locate the 14687 * second request list and add ourselves to it. 14688 */ 14689 ap = un->un_waitq_headp; 14690 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14691 while (ap->av_forw != NULL) { 14692 /* 14693 * Look for an "inversion" in the (normally 14694 * ascending) block numbers. This indicates 14695 * the start of the second request list. 14696 */ 14697 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14698 /* 14699 * Search the second request list for the 14700 * first request at a larger block number. 14701 * We go before that; however if there is 14702 * no such request, we go at the end. 14703 */ 14704 do { 14705 if (SD_GET_BLKNO(bp) < 14706 SD_GET_BLKNO(ap->av_forw)) { 14707 goto insert; 14708 } 14709 ap = ap->av_forw; 14710 } while (ap->av_forw != NULL); 14711 goto insert; /* after last */ 14712 } 14713 ap = ap->av_forw; 14714 } 14715 14716 /* 14717 * No inversions... we will go after the last, and 14718 * be the first request in the second request list. 14719 */ 14720 goto insert; 14721 } 14722 14723 /* 14724 * Request is at/after the current request... 14725 * sort in the first request list. 14726 */ 14727 while (ap->av_forw != NULL) { 14728 /* 14729 * We want to go after the current request (1) if 14730 * there is an inversion after it (i.e. it is the end 14731 * of the first request list), or (2) if the next 14732 * request is a larger block no. than our request. 14733 */ 14734 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14735 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14736 goto insert; 14737 } 14738 ap = ap->av_forw; 14739 } 14740 14741 /* 14742 * Neither a second list nor a larger request, therefore 14743 * we go at the end of the first list (which is the same 14744 * as the end of the whole schebang). 14745 */ 14746 insert: 14747 bp->av_forw = ap->av_forw; 14748 ap->av_forw = bp; 14749 14750 /* 14751 * If we inserted onto the tail end of the waitq, make sure the 14752 * tail pointer is updated. 14753 */ 14754 if (ap == un->un_waitq_tailp) { 14755 un->un_waitq_tailp = bp; 14756 } 14757 } 14758 14759 14760 /* 14761 * Function: sd_start_cmds 14762 * 14763 * Description: Remove and transport cmds from the driver queues. 14764 * 14765 * Arguments: un - pointer to the unit (soft state) struct for the target. 14766 * 14767 * immed_bp - ptr to a buf to be transported immediately. Only 14768 * the immed_bp is transported; bufs on the waitq are not 14769 * processed and the un_retry_bp is not checked. If immed_bp is 14770 * NULL, then normal queue processing is performed. 14771 * 14772 * Context: May be called from kernel thread context, interrupt context, 14773 * or runout callback context. This function may not block or 14774 * call routines that block. 14775 */ 14776 14777 static void 14778 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14779 { 14780 struct sd_xbuf *xp; 14781 struct buf *bp; 14782 void (*statp)(kstat_io_t *); 14783 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14784 void (*saved_statp)(kstat_io_t *); 14785 #endif 14786 int rval; 14787 struct sd_fm_internal *sfip = NULL; 14788 14789 ASSERT(un != NULL); 14790 ASSERT(mutex_owned(SD_MUTEX(un))); 14791 ASSERT(un->un_ncmds_in_transport >= 0); 14792 ASSERT(un->un_throttle >= 0); 14793 14794 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14795 14796 do { 14797 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14798 saved_statp = NULL; 14799 #endif 14800 14801 /* 14802 * If we are syncing or dumping, fail the command to 14803 * avoid recursively calling back into scsi_transport(). 14804 * The dump I/O itself uses a separate code path so this 14805 * only prevents non-dump I/O from being sent while dumping. 14806 * File system sync takes place before dumping begins. 14807 * During panic, filesystem I/O is allowed provided 14808 * un_in_callback is <= 1. This is to prevent recursion 14809 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14810 * sd_start_cmds and so on. See panic.c for more information 14811 * about the states the system can be in during panic. 14812 */ 14813 if ((un->un_state == SD_STATE_DUMPING) || 14814 (ddi_in_panic() && (un->un_in_callback > 1))) { 14815 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14816 "sd_start_cmds: panicking\n"); 14817 goto exit; 14818 } 14819 14820 if ((bp = immed_bp) != NULL) { 14821 /* 14822 * We have a bp that must be transported immediately. 14823 * It's OK to transport the immed_bp here without doing 14824 * the throttle limit check because the immed_bp is 14825 * always used in a retry/recovery case. This means 14826 * that we know we are not at the throttle limit by 14827 * virtue of the fact that to get here we must have 14828 * already gotten a command back via sdintr(). This also 14829 * relies on (1) the command on un_retry_bp preventing 14830 * further commands from the waitq from being issued; 14831 * and (2) the code in sd_retry_command checking the 14832 * throttle limit before issuing a delayed or immediate 14833 * retry. This holds even if the throttle limit is 14834 * currently ratcheted down from its maximum value. 14835 */ 14836 statp = kstat_runq_enter; 14837 if (bp == un->un_retry_bp) { 14838 ASSERT((un->un_retry_statp == NULL) || 14839 (un->un_retry_statp == kstat_waitq_enter) || 14840 (un->un_retry_statp == 14841 kstat_runq_back_to_waitq)); 14842 /* 14843 * If the waitq kstat was incremented when 14844 * sd_set_retry_bp() queued this bp for a retry, 14845 * then we must set up statp so that the waitq 14846 * count will get decremented correctly below. 14847 * Also we must clear un->un_retry_statp to 14848 * ensure that we do not act on a stale value 14849 * in this field. 14850 */ 14851 if ((un->un_retry_statp == kstat_waitq_enter) || 14852 (un->un_retry_statp == 14853 kstat_runq_back_to_waitq)) { 14854 statp = kstat_waitq_to_runq; 14855 } 14856 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14857 saved_statp = un->un_retry_statp; 14858 #endif 14859 un->un_retry_statp = NULL; 14860 14861 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14862 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14863 "un_throttle:%d un_ncmds_in_transport:%d\n", 14864 un, un->un_retry_bp, un->un_throttle, 14865 un->un_ncmds_in_transport); 14866 } else { 14867 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14868 "processing priority bp:0x%p\n", bp); 14869 } 14870 14871 } else if ((bp = un->un_waitq_headp) != NULL) { 14872 /* 14873 * A command on the waitq is ready to go, but do not 14874 * send it if: 14875 * 14876 * (1) the throttle limit has been reached, or 14877 * (2) a retry is pending, or 14878 * (3) a START_STOP_UNIT callback pending, or 14879 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14880 * command is pending. 14881 * 14882 * For all of these conditions, IO processing will 14883 * restart after the condition is cleared. 14884 */ 14885 if (un->un_ncmds_in_transport >= un->un_throttle) { 14886 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14887 "sd_start_cmds: exiting, " 14888 "throttle limit reached!\n"); 14889 goto exit; 14890 } 14891 if (un->un_retry_bp != NULL) { 14892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14893 "sd_start_cmds: exiting, retry pending!\n"); 14894 goto exit; 14895 } 14896 if (un->un_startstop_timeid != NULL) { 14897 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14898 "sd_start_cmds: exiting, " 14899 "START_STOP pending!\n"); 14900 goto exit; 14901 } 14902 if (un->un_direct_priority_timeid != NULL) { 14903 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14904 "sd_start_cmds: exiting, " 14905 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14906 goto exit; 14907 } 14908 14909 /* Dequeue the command */ 14910 un->un_waitq_headp = bp->av_forw; 14911 if (un->un_waitq_headp == NULL) { 14912 un->un_waitq_tailp = NULL; 14913 } 14914 bp->av_forw = NULL; 14915 statp = kstat_waitq_to_runq; 14916 SD_TRACE(SD_LOG_IO_CORE, un, 14917 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14918 14919 } else { 14920 /* No work to do so bail out now */ 14921 SD_TRACE(SD_LOG_IO_CORE, un, 14922 "sd_start_cmds: no more work, exiting!\n"); 14923 goto exit; 14924 } 14925 14926 /* 14927 * Reset the state to normal. This is the mechanism by which 14928 * the state transitions from either SD_STATE_RWAIT or 14929 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14930 * If state is SD_STATE_PM_CHANGING then this command is 14931 * part of the device power control and the state must 14932 * not be put back to normal. Doing so would would 14933 * allow new commands to proceed when they shouldn't, 14934 * the device may be going off. 14935 */ 14936 if ((un->un_state != SD_STATE_SUSPENDED) && 14937 (un->un_state != SD_STATE_PM_CHANGING)) { 14938 New_state(un, SD_STATE_NORMAL); 14939 } 14940 14941 xp = SD_GET_XBUF(bp); 14942 ASSERT(xp != NULL); 14943 14944 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14945 /* 14946 * Allocate the scsi_pkt if we need one, or attach DMA 14947 * resources if we have a scsi_pkt that needs them. The 14948 * latter should only occur for commands that are being 14949 * retried. 14950 */ 14951 if ((xp->xb_pktp == NULL) || 14952 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14953 #else 14954 if (xp->xb_pktp == NULL) { 14955 #endif 14956 /* 14957 * There is no scsi_pkt allocated for this buf. Call 14958 * the initpkt function to allocate & init one. 14959 * 14960 * The scsi_init_pkt runout callback functionality is 14961 * implemented as follows: 14962 * 14963 * 1) The initpkt function always calls 14964 * scsi_init_pkt(9F) with sdrunout specified as the 14965 * callback routine. 14966 * 2) A successful packet allocation is initialized and 14967 * the I/O is transported. 14968 * 3) The I/O associated with an allocation resource 14969 * failure is left on its queue to be retried via 14970 * runout or the next I/O. 14971 * 4) The I/O associated with a DMA error is removed 14972 * from the queue and failed with EIO. Processing of 14973 * the transport queues is also halted to be 14974 * restarted via runout or the next I/O. 14975 * 5) The I/O associated with a CDB size or packet 14976 * size error is removed from the queue and failed 14977 * with EIO. Processing of the transport queues is 14978 * continued. 14979 * 14980 * Note: there is no interface for canceling a runout 14981 * callback. To prevent the driver from detaching or 14982 * suspending while a runout is pending the driver 14983 * state is set to SD_STATE_RWAIT 14984 * 14985 * Note: using the scsi_init_pkt callback facility can 14986 * result in an I/O request persisting at the head of 14987 * the list which cannot be satisfied even after 14988 * multiple retries. In the future the driver may 14989 * implement some kind of maximum runout count before 14990 * failing an I/O. 14991 * 14992 * Note: the use of funcp below may seem superfluous, 14993 * but it helps warlock figure out the correct 14994 * initpkt function calls (see [s]sd.wlcmd). 14995 */ 14996 struct scsi_pkt *pktp; 14997 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14998 14999 ASSERT(bp != un->un_rqs_bp); 15000 15001 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 15002 switch ((*funcp)(bp, &pktp)) { 15003 case SD_PKT_ALLOC_SUCCESS: 15004 xp->xb_pktp = pktp; 15005 SD_TRACE(SD_LOG_IO_CORE, un, 15006 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 15007 pktp); 15008 goto got_pkt; 15009 15010 case SD_PKT_ALLOC_FAILURE: 15011 /* 15012 * Temporary (hopefully) resource depletion. 15013 * Since retries and RQS commands always have a 15014 * scsi_pkt allocated, these cases should never 15015 * get here. So the only cases this needs to 15016 * handle is a bp from the waitq (which we put 15017 * back onto the waitq for sdrunout), or a bp 15018 * sent as an immed_bp (which we just fail). 15019 */ 15020 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15021 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 15022 15023 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15024 15025 if (bp == immed_bp) { 15026 /* 15027 * If SD_XB_DMA_FREED is clear, then 15028 * this is a failure to allocate a 15029 * scsi_pkt, and we must fail the 15030 * command. 15031 */ 15032 if ((xp->xb_pkt_flags & 15033 SD_XB_DMA_FREED) == 0) { 15034 break; 15035 } 15036 15037 /* 15038 * If this immediate command is NOT our 15039 * un_retry_bp, then we must fail it. 15040 */ 15041 if (bp != un->un_retry_bp) { 15042 break; 15043 } 15044 15045 /* 15046 * We get here if this cmd is our 15047 * un_retry_bp that was DMAFREED, but 15048 * scsi_init_pkt() failed to reallocate 15049 * DMA resources when we attempted to 15050 * retry it. This can happen when an 15051 * mpxio failover is in progress, but 15052 * we don't want to just fail the 15053 * command in this case. 15054 * 15055 * Use timeout(9F) to restart it after 15056 * a 100ms delay. We don't want to 15057 * let sdrunout() restart it, because 15058 * sdrunout() is just supposed to start 15059 * commands that are sitting on the 15060 * wait queue. The un_retry_bp stays 15061 * set until the command completes, but 15062 * sdrunout can be called many times 15063 * before that happens. Since sdrunout 15064 * cannot tell if the un_retry_bp is 15065 * already in the transport, it could 15066 * end up calling scsi_transport() for 15067 * the un_retry_bp multiple times. 15068 * 15069 * Also: don't schedule the callback 15070 * if some other callback is already 15071 * pending. 15072 */ 15073 if (un->un_retry_statp == NULL) { 15074 /* 15075 * restore the kstat pointer to 15076 * keep kstat counts coherent 15077 * when we do retry the command. 15078 */ 15079 un->un_retry_statp = 15080 saved_statp; 15081 } 15082 15083 if ((un->un_startstop_timeid == NULL) && 15084 (un->un_retry_timeid == NULL) && 15085 (un->un_direct_priority_timeid == 15086 NULL)) { 15087 15088 un->un_retry_timeid = 15089 timeout( 15090 sd_start_retry_command, 15091 un, SD_RESTART_TIMEOUT); 15092 } 15093 goto exit; 15094 } 15095 15096 #else 15097 if (bp == immed_bp) { 15098 break; /* Just fail the command */ 15099 } 15100 #endif 15101 15102 /* Add the buf back to the head of the waitq */ 15103 bp->av_forw = un->un_waitq_headp; 15104 un->un_waitq_headp = bp; 15105 if (un->un_waitq_tailp == NULL) { 15106 un->un_waitq_tailp = bp; 15107 } 15108 goto exit; 15109 15110 case SD_PKT_ALLOC_FAILURE_NO_DMA: 15111 /* 15112 * HBA DMA resource failure. Fail the command 15113 * and continue processing of the queues. 15114 */ 15115 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15116 "sd_start_cmds: " 15117 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 15118 break; 15119 15120 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 15121 /* 15122 * Note:x86: Partial DMA mapping not supported 15123 * for USCSI commands, and all the needed DMA 15124 * resources were not allocated. 15125 */ 15126 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15127 "sd_start_cmds: " 15128 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 15129 break; 15130 15131 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 15132 /* 15133 * Note:x86: Request cannot fit into CDB based 15134 * on lba and len. 15135 */ 15136 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15137 "sd_start_cmds: " 15138 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 15139 break; 15140 15141 default: 15142 /* Should NEVER get here! */ 15143 panic("scsi_initpkt error"); 15144 /*NOTREACHED*/ 15145 } 15146 15147 /* 15148 * Fatal error in allocating a scsi_pkt for this buf. 15149 * Update kstats & return the buf with an error code. 15150 * We must use sd_return_failed_command_no_restart() to 15151 * avoid a recursive call back into sd_start_cmds(). 15152 * However this also means that we must keep processing 15153 * the waitq here in order to avoid stalling. 15154 */ 15155 if (statp == kstat_waitq_to_runq) { 15156 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 15157 } 15158 sd_return_failed_command_no_restart(un, bp, EIO); 15159 if (bp == immed_bp) { 15160 /* immed_bp is gone by now, so clear this */ 15161 immed_bp = NULL; 15162 } 15163 continue; 15164 } 15165 got_pkt: 15166 if (bp == immed_bp) { 15167 /* goto the head of the class.... */ 15168 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15169 } 15170 15171 un->un_ncmds_in_transport++; 15172 SD_UPDATE_KSTATS(un, statp, bp); 15173 15174 /* 15175 * Call scsi_transport() to send the command to the target. 15176 * According to SCSA architecture, we must drop the mutex here 15177 * before calling scsi_transport() in order to avoid deadlock. 15178 * Note that the scsi_pkt's completion routine can be executed 15179 * (from interrupt context) even before the call to 15180 * scsi_transport() returns. 15181 */ 15182 SD_TRACE(SD_LOG_IO_CORE, un, 15183 "sd_start_cmds: calling scsi_transport()\n"); 15184 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 15185 15186 mutex_exit(SD_MUTEX(un)); 15187 rval = scsi_transport(xp->xb_pktp); 15188 mutex_enter(SD_MUTEX(un)); 15189 15190 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15191 "sd_start_cmds: scsi_transport() returned %d\n", rval); 15192 15193 switch (rval) { 15194 case TRAN_ACCEPT: 15195 /* Clear this with every pkt accepted by the HBA */ 15196 un->un_tran_fatal_count = 0; 15197 break; /* Success; try the next cmd (if any) */ 15198 15199 case TRAN_BUSY: 15200 un->un_ncmds_in_transport--; 15201 ASSERT(un->un_ncmds_in_transport >= 0); 15202 15203 /* 15204 * Don't retry request sense, the sense data 15205 * is lost when another request is sent. 15206 * Free up the rqs buf and retry 15207 * the original failed cmd. Update kstat. 15208 */ 15209 if (bp == un->un_rqs_bp) { 15210 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15211 bp = sd_mark_rqs_idle(un, xp); 15212 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15213 NULL, NULL, EIO, un->un_busy_timeout / 500, 15214 kstat_waitq_enter); 15215 goto exit; 15216 } 15217 15218 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15219 /* 15220 * Free the DMA resources for the scsi_pkt. This will 15221 * allow mpxio to select another path the next time 15222 * we call scsi_transport() with this scsi_pkt. 15223 * See sdintr() for the rationalization behind this. 15224 */ 15225 if ((un->un_f_is_fibre == TRUE) && 15226 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15227 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 15228 scsi_dmafree(xp->xb_pktp); 15229 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15230 } 15231 #endif 15232 15233 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 15234 /* 15235 * Commands that are SD_PATH_DIRECT_PRIORITY 15236 * are for error recovery situations. These do 15237 * not use the normal command waitq, so if they 15238 * get a TRAN_BUSY we cannot put them back onto 15239 * the waitq for later retry. One possible 15240 * problem is that there could already be some 15241 * other command on un_retry_bp that is waiting 15242 * for this one to complete, so we would be 15243 * deadlocked if we put this command back onto 15244 * the waitq for later retry (since un_retry_bp 15245 * must complete before the driver gets back to 15246 * commands on the waitq). 15247 * 15248 * To avoid deadlock we must schedule a callback 15249 * that will restart this command after a set 15250 * interval. This should keep retrying for as 15251 * long as the underlying transport keeps 15252 * returning TRAN_BUSY (just like for other 15253 * commands). Use the same timeout interval as 15254 * for the ordinary TRAN_BUSY retry. 15255 */ 15256 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15257 "sd_start_cmds: scsi_transport() returned " 15258 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 15259 15260 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15261 un->un_direct_priority_timeid = 15262 timeout(sd_start_direct_priority_command, 15263 bp, un->un_busy_timeout / 500); 15264 15265 goto exit; 15266 } 15267 15268 /* 15269 * For TRAN_BUSY, we want to reduce the throttle value, 15270 * unless we are retrying a command. 15271 */ 15272 if (bp != un->un_retry_bp) { 15273 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 15274 } 15275 15276 /* 15277 * Set up the bp to be tried again 10 ms later. 15278 * Note:x86: Is there a timeout value in the sd_lun 15279 * for this condition? 15280 */ 15281 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 15282 kstat_runq_back_to_waitq); 15283 goto exit; 15284 15285 case TRAN_FATAL_ERROR: 15286 un->un_tran_fatal_count++; 15287 /* FALLTHRU */ 15288 15289 case TRAN_BADPKT: 15290 default: 15291 un->un_ncmds_in_transport--; 15292 ASSERT(un->un_ncmds_in_transport >= 0); 15293 15294 /* 15295 * If this is our REQUEST SENSE command with a 15296 * transport error, we must get back the pointers 15297 * to the original buf, and mark the REQUEST 15298 * SENSE command as "available". 15299 */ 15300 if (bp == un->un_rqs_bp) { 15301 bp = sd_mark_rqs_idle(un, xp); 15302 xp = SD_GET_XBUF(bp); 15303 } else { 15304 /* 15305 * Legacy behavior: do not update transport 15306 * error count for request sense commands. 15307 */ 15308 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15309 } 15310 15311 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15312 sd_print_transport_rejected_message(un, xp, rval); 15313 15314 /* 15315 * This command will be terminated by SD driver due 15316 * to a fatal transport error. We should post 15317 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15318 * of "fail" for any command to indicate this 15319 * situation. 15320 */ 15321 if (xp->xb_ena > 0) { 15322 ASSERT(un->un_fm_private != NULL); 15323 sfip = un->un_fm_private; 15324 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15325 sd_ssc_extract_info(&sfip->fm_ssc, un, 15326 xp->xb_pktp, bp, xp); 15327 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15328 } 15329 15330 /* 15331 * We must use sd_return_failed_command_no_restart() to 15332 * avoid a recursive call back into sd_start_cmds(). 15333 * However this also means that we must keep processing 15334 * the waitq here in order to avoid stalling. 15335 */ 15336 sd_return_failed_command_no_restart(un, bp, EIO); 15337 15338 /* 15339 * Notify any threads waiting in sd_ddi_suspend() that 15340 * a command completion has occurred. 15341 */ 15342 if (un->un_state == SD_STATE_SUSPENDED) { 15343 cv_broadcast(&un->un_disk_busy_cv); 15344 } 15345 15346 if (bp == immed_bp) { 15347 /* immed_bp is gone by now, so clear this */ 15348 immed_bp = NULL; 15349 } 15350 break; 15351 } 15352 15353 } while (immed_bp == NULL); 15354 15355 exit: 15356 ASSERT(mutex_owned(SD_MUTEX(un))); 15357 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15358 } 15359 15360 15361 /* 15362 * Function: sd_return_command 15363 * 15364 * Description: Returns a command to its originator (with or without an 15365 * error). Also starts commands waiting to be transported 15366 * to the target. 15367 * 15368 * Context: May be called from interrupt, kernel, or timeout context 15369 */ 15370 15371 static void 15372 sd_return_command(struct sd_lun *un, struct buf *bp) 15373 { 15374 struct sd_xbuf *xp; 15375 struct scsi_pkt *pktp; 15376 struct sd_fm_internal *sfip; 15377 15378 ASSERT(bp != NULL); 15379 ASSERT(un != NULL); 15380 ASSERT(mutex_owned(SD_MUTEX(un))); 15381 ASSERT(bp != un->un_rqs_bp); 15382 xp = SD_GET_XBUF(bp); 15383 ASSERT(xp != NULL); 15384 15385 pktp = SD_GET_PKTP(bp); 15386 sfip = (struct sd_fm_internal *)un->un_fm_private; 15387 ASSERT(sfip != NULL); 15388 15389 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15390 15391 /* 15392 * Note: check for the "sdrestart failed" case. 15393 */ 15394 if ((un->un_partial_dma_supported == 1) && 15395 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15396 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15397 (xp->xb_pktp->pkt_resid == 0)) { 15398 15399 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15400 /* 15401 * Successfully set up next portion of cmd 15402 * transfer, try sending it 15403 */ 15404 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15405 NULL, NULL, 0, (clock_t)0, NULL); 15406 sd_start_cmds(un, NULL); 15407 return; /* Note:x86: need a return here? */ 15408 } 15409 } 15410 15411 /* 15412 * If this is the failfast bp, clear it from un_failfast_bp. This 15413 * can happen if upon being re-tried the failfast bp either 15414 * succeeded or encountered another error (possibly even a different 15415 * error than the one that precipitated the failfast state, but in 15416 * that case it would have had to exhaust retries as well). Regardless, 15417 * this should not occur whenever the instance is in the active 15418 * failfast state. 15419 */ 15420 if (bp == un->un_failfast_bp) { 15421 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15422 un->un_failfast_bp = NULL; 15423 } 15424 15425 /* 15426 * Clear the failfast state upon successful completion of ANY cmd. 15427 */ 15428 if (bp->b_error == 0) { 15429 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15430 /* 15431 * If this is a successful command, but used to be retried, 15432 * we will take it as a recovered command and post an 15433 * ereport with driver-assessment of "recovered". 15434 */ 15435 if (xp->xb_ena > 0) { 15436 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15437 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15438 } 15439 } else { 15440 /* 15441 * If this is a failed non-USCSI command we will post an 15442 * ereport with driver-assessment set accordingly("fail" or 15443 * "fatal"). 15444 */ 15445 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15446 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15447 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15448 } 15449 } 15450 15451 /* 15452 * This is used if the command was retried one or more times. Show that 15453 * we are done with it, and allow processing of the waitq to resume. 15454 */ 15455 if (bp == un->un_retry_bp) { 15456 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15457 "sd_return_command: un:0x%p: " 15458 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15459 un->un_retry_bp = NULL; 15460 un->un_retry_statp = NULL; 15461 } 15462 15463 SD_UPDATE_RDWR_STATS(un, bp); 15464 SD_UPDATE_PARTITION_STATS(un, bp); 15465 15466 switch (un->un_state) { 15467 case SD_STATE_SUSPENDED: 15468 /* 15469 * Notify any threads waiting in sd_ddi_suspend() that 15470 * a command completion has occurred. 15471 */ 15472 cv_broadcast(&un->un_disk_busy_cv); 15473 break; 15474 default: 15475 sd_start_cmds(un, NULL); 15476 break; 15477 } 15478 15479 /* Return this command up the iodone chain to its originator. */ 15480 mutex_exit(SD_MUTEX(un)); 15481 15482 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15483 xp->xb_pktp = NULL; 15484 15485 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15486 15487 ASSERT(!mutex_owned(SD_MUTEX(un))); 15488 mutex_enter(SD_MUTEX(un)); 15489 15490 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15491 } 15492 15493 15494 /* 15495 * Function: sd_return_failed_command 15496 * 15497 * Description: Command completion when an error occurred. 15498 * 15499 * Context: May be called from interrupt context 15500 */ 15501 15502 static void 15503 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15504 { 15505 ASSERT(bp != NULL); 15506 ASSERT(un != NULL); 15507 ASSERT(mutex_owned(SD_MUTEX(un))); 15508 15509 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15510 "sd_return_failed_command: entry\n"); 15511 15512 /* 15513 * b_resid could already be nonzero due to a partial data 15514 * transfer, so do not change it here. 15515 */ 15516 SD_BIOERROR(bp, errcode); 15517 15518 sd_return_command(un, bp); 15519 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15520 "sd_return_failed_command: exit\n"); 15521 } 15522 15523 15524 /* 15525 * Function: sd_return_failed_command_no_restart 15526 * 15527 * Description: Same as sd_return_failed_command, but ensures that no 15528 * call back into sd_start_cmds will be issued. 15529 * 15530 * Context: May be called from interrupt context 15531 */ 15532 15533 static void 15534 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15535 int errcode) 15536 { 15537 struct sd_xbuf *xp; 15538 15539 ASSERT(bp != NULL); 15540 ASSERT(un != NULL); 15541 ASSERT(mutex_owned(SD_MUTEX(un))); 15542 xp = SD_GET_XBUF(bp); 15543 ASSERT(xp != NULL); 15544 ASSERT(errcode != 0); 15545 15546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15547 "sd_return_failed_command_no_restart: entry\n"); 15548 15549 /* 15550 * b_resid could already be nonzero due to a partial data 15551 * transfer, so do not change it here. 15552 */ 15553 SD_BIOERROR(bp, errcode); 15554 15555 /* 15556 * If this is the failfast bp, clear it. This can happen if the 15557 * failfast bp encounterd a fatal error when we attempted to 15558 * re-try it (such as a scsi_transport(9F) failure). However 15559 * we should NOT be in an active failfast state if the failfast 15560 * bp is not NULL. 15561 */ 15562 if (bp == un->un_failfast_bp) { 15563 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15564 un->un_failfast_bp = NULL; 15565 } 15566 15567 if (bp == un->un_retry_bp) { 15568 /* 15569 * This command was retried one or more times. Show that we are 15570 * done with it, and allow processing of the waitq to resume. 15571 */ 15572 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15573 "sd_return_failed_command_no_restart: " 15574 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15575 un->un_retry_bp = NULL; 15576 un->un_retry_statp = NULL; 15577 } 15578 15579 SD_UPDATE_RDWR_STATS(un, bp); 15580 SD_UPDATE_PARTITION_STATS(un, bp); 15581 15582 mutex_exit(SD_MUTEX(un)); 15583 15584 if (xp->xb_pktp != NULL) { 15585 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15586 xp->xb_pktp = NULL; 15587 } 15588 15589 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15590 15591 mutex_enter(SD_MUTEX(un)); 15592 15593 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15594 "sd_return_failed_command_no_restart: exit\n"); 15595 } 15596 15597 15598 /* 15599 * Function: sd_retry_command 15600 * 15601 * Description: queue up a command for retry, or (optionally) fail it 15602 * if retry counts are exhausted. 15603 * 15604 * Arguments: un - Pointer to the sd_lun struct for the target. 15605 * 15606 * bp - Pointer to the buf for the command to be retried. 15607 * 15608 * retry_check_flag - Flag to see which (if any) of the retry 15609 * counts should be decremented/checked. If the indicated 15610 * retry count is exhausted, then the command will not be 15611 * retried; it will be failed instead. This should use a 15612 * value equal to one of the following: 15613 * 15614 * SD_RETRIES_NOCHECK 15615 * SD_RESD_RETRIES_STANDARD 15616 * SD_RETRIES_VICTIM 15617 * 15618 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15619 * if the check should be made to see of FLAG_ISOLATE is set 15620 * in the pkt. If FLAG_ISOLATE is set, then the command is 15621 * not retried, it is simply failed. 15622 * 15623 * user_funcp - Ptr to function to call before dispatching the 15624 * command. May be NULL if no action needs to be performed. 15625 * (Primarily intended for printing messages.) 15626 * 15627 * user_arg - Optional argument to be passed along to 15628 * the user_funcp call. 15629 * 15630 * failure_code - errno return code to set in the bp if the 15631 * command is going to be failed. 15632 * 15633 * retry_delay - Retry delay interval in (clock_t) units. May 15634 * be zero which indicates that the retry should be retried 15635 * immediately (ie, without an intervening delay). 15636 * 15637 * statp - Ptr to kstat function to be updated if the command 15638 * is queued for a delayed retry. May be NULL if no kstat 15639 * update is desired. 15640 * 15641 * Context: May be called from interrupt context. 15642 */ 15643 15644 static void 15645 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15646 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int code), 15647 void *user_arg, int failure_code, clock_t retry_delay, 15648 void (*statp)(kstat_io_t *)) 15649 { 15650 struct sd_xbuf *xp; 15651 struct scsi_pkt *pktp; 15652 struct sd_fm_internal *sfip; 15653 15654 ASSERT(un != NULL); 15655 ASSERT(mutex_owned(SD_MUTEX(un))); 15656 ASSERT(bp != NULL); 15657 xp = SD_GET_XBUF(bp); 15658 ASSERT(xp != NULL); 15659 pktp = SD_GET_PKTP(bp); 15660 ASSERT(pktp != NULL); 15661 15662 sfip = (struct sd_fm_internal *)un->un_fm_private; 15663 ASSERT(sfip != NULL); 15664 15665 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15666 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15667 15668 /* 15669 * If we are syncing or dumping, fail the command to avoid 15670 * recursively calling back into scsi_transport(). 15671 */ 15672 if (ddi_in_panic()) { 15673 goto fail_command_no_log; 15674 } 15675 15676 /* 15677 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15678 * log an error and fail the command. 15679 */ 15680 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15681 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15682 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15683 sd_dump_memory(un, SD_LOG_IO, "CDB", 15684 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15685 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15686 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15687 goto fail_command; 15688 } 15689 15690 /* 15691 * If we are suspended, then put the command onto head of the 15692 * wait queue since we don't want to start more commands, and 15693 * clear the un_retry_bp. Next time when we are resumed, will 15694 * handle the command in the wait queue. 15695 */ 15696 switch (un->un_state) { 15697 case SD_STATE_SUSPENDED: 15698 case SD_STATE_DUMPING: 15699 bp->av_forw = un->un_waitq_headp; 15700 un->un_waitq_headp = bp; 15701 if (un->un_waitq_tailp == NULL) { 15702 un->un_waitq_tailp = bp; 15703 } 15704 if (bp == un->un_retry_bp) { 15705 un->un_retry_bp = NULL; 15706 un->un_retry_statp = NULL; 15707 } 15708 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15709 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15710 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15711 return; 15712 default: 15713 break; 15714 } 15715 15716 /* 15717 * If the caller wants us to check FLAG_ISOLATE, then see if that 15718 * is set; if it is then we do not want to retry the command. 15719 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15720 */ 15721 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15722 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15723 goto fail_command; 15724 } 15725 } 15726 15727 15728 /* 15729 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15730 * command timeout or a selection timeout has occurred. This means 15731 * that we were unable to establish an kind of communication with 15732 * the target, and subsequent retries and/or commands are likely 15733 * to encounter similar results and take a long time to complete. 15734 * 15735 * If this is a failfast error condition, we need to update the 15736 * failfast state, even if this bp does not have B_FAILFAST set. 15737 */ 15738 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15739 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15740 ASSERT(un->un_failfast_bp == NULL); 15741 /* 15742 * If we are already in the active failfast state, and 15743 * another failfast error condition has been detected, 15744 * then fail this command if it has B_FAILFAST set. 15745 * If B_FAILFAST is clear, then maintain the legacy 15746 * behavior of retrying heroically, even tho this will 15747 * take a lot more time to fail the command. 15748 */ 15749 if (bp->b_flags & B_FAILFAST) { 15750 goto fail_command; 15751 } 15752 } else { 15753 /* 15754 * We're not in the active failfast state, but we 15755 * have a failfast error condition, so we must begin 15756 * transition to the next state. We do this regardless 15757 * of whether or not this bp has B_FAILFAST set. 15758 */ 15759 if (un->un_failfast_bp == NULL) { 15760 /* 15761 * This is the first bp to meet a failfast 15762 * condition so save it on un_failfast_bp & 15763 * do normal retry processing. Do not enter 15764 * active failfast state yet. This marks 15765 * entry into the "failfast pending" state. 15766 */ 15767 un->un_failfast_bp = bp; 15768 15769 } else if (un->un_failfast_bp == bp) { 15770 /* 15771 * This is the second time *this* bp has 15772 * encountered a failfast error condition, 15773 * so enter active failfast state & flush 15774 * queues as appropriate. 15775 */ 15776 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15777 un->un_failfast_bp = NULL; 15778 sd_failfast_flushq(un); 15779 15780 /* 15781 * Fail this bp now if B_FAILFAST set; 15782 * otherwise continue with retries. (It would 15783 * be pretty ironic if this bp succeeded on a 15784 * subsequent retry after we just flushed all 15785 * the queues). 15786 */ 15787 if (bp->b_flags & B_FAILFAST) { 15788 goto fail_command; 15789 } 15790 15791 #if !defined(lint) && !defined(__lint) 15792 } else { 15793 /* 15794 * If neither of the preceeding conditionals 15795 * was true, it means that there is some 15796 * *other* bp that has met an inital failfast 15797 * condition and is currently either being 15798 * retried or is waiting to be retried. In 15799 * that case we should perform normal retry 15800 * processing on *this* bp, since there is a 15801 * chance that the current failfast condition 15802 * is transient and recoverable. If that does 15803 * not turn out to be the case, then retries 15804 * will be cleared when the wait queue is 15805 * flushed anyway. 15806 */ 15807 #endif 15808 } 15809 } 15810 } else { 15811 /* 15812 * SD_RETRIES_FAILFAST is clear, which indicates that we 15813 * likely were able to at least establish some level of 15814 * communication with the target and subsequent commands 15815 * and/or retries are likely to get through to the target, 15816 * In this case we want to be aggressive about clearing 15817 * the failfast state. Note that this does not affect 15818 * the "failfast pending" condition. 15819 */ 15820 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15821 } 15822 15823 15824 /* 15825 * Check the specified retry count to see if we can still do 15826 * any retries with this pkt before we should fail it. 15827 */ 15828 switch (retry_check_flag & SD_RETRIES_MASK) { 15829 case SD_RETRIES_VICTIM: 15830 /* 15831 * Check the victim retry count. If exhausted, then fall 15832 * thru & check against the standard retry count. 15833 */ 15834 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15835 /* Increment count & proceed with the retry */ 15836 xp->xb_victim_retry_count++; 15837 break; 15838 } 15839 /* Victim retries exhausted, fall back to std. retries... */ 15840 /* FALLTHRU */ 15841 15842 case SD_RETRIES_STANDARD: 15843 if (xp->xb_retry_count >= un->un_retry_count) { 15844 /* Retries exhausted, fail the command */ 15845 SD_TRACE(SD_LOG_IO_CORE, un, 15846 "sd_retry_command: retries exhausted!\n"); 15847 /* 15848 * update b_resid for failed SCMD_READ & SCMD_WRITE 15849 * commands with nonzero pkt_resid. 15850 */ 15851 if ((pktp->pkt_reason == CMD_CMPLT) && 15852 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15853 (pktp->pkt_resid != 0)) { 15854 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15855 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15856 SD_UPDATE_B_RESID(bp, pktp); 15857 } 15858 } 15859 goto fail_command; 15860 } 15861 xp->xb_retry_count++; 15862 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15863 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15864 break; 15865 15866 case SD_RETRIES_UA: 15867 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15868 /* Retries exhausted, fail the command */ 15869 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15870 "Unit Attention retries exhausted. " 15871 "Check the target.\n"); 15872 goto fail_command; 15873 } 15874 xp->xb_ua_retry_count++; 15875 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15876 "sd_retry_command: retry count:%d\n", 15877 xp->xb_ua_retry_count); 15878 break; 15879 15880 case SD_RETRIES_BUSY: 15881 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15882 /* Retries exhausted, fail the command */ 15883 SD_TRACE(SD_LOG_IO_CORE, un, 15884 "sd_retry_command: retries exhausted!\n"); 15885 goto fail_command; 15886 } 15887 xp->xb_retry_count++; 15888 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15889 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15890 break; 15891 15892 case SD_RETRIES_NOCHECK: 15893 default: 15894 /* No retry count to check. Just proceed with the retry */ 15895 break; 15896 } 15897 15898 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15899 15900 /* 15901 * If this is a non-USCSI command being retried 15902 * during execution last time, we should post an ereport with 15903 * driver-assessment of the value "retry". 15904 * For partial DMA, request sense and STATUS_QFULL, there are no 15905 * hardware errors, we bypass ereport posting. 15906 */ 15907 if (failure_code != 0) { 15908 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15909 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15910 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15911 } 15912 } 15913 15914 /* 15915 * If we were given a zero timeout, we must attempt to retry the 15916 * command immediately (ie, without a delay). 15917 */ 15918 if (retry_delay == 0) { 15919 /* 15920 * Check some limiting conditions to see if we can actually 15921 * do the immediate retry. If we cannot, then we must 15922 * fall back to queueing up a delayed retry. 15923 */ 15924 if (un->un_ncmds_in_transport >= un->un_throttle) { 15925 /* 15926 * We are at the throttle limit for the target, 15927 * fall back to delayed retry. 15928 */ 15929 retry_delay = un->un_busy_timeout; 15930 statp = kstat_waitq_enter; 15931 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15932 "sd_retry_command: immed. retry hit " 15933 "throttle!\n"); 15934 } else { 15935 /* 15936 * We're clear to proceed with the immediate retry. 15937 * First call the user-provided function (if any) 15938 */ 15939 if (user_funcp != NULL) { 15940 (*user_funcp)(un, bp, user_arg, 15941 SD_IMMEDIATE_RETRY_ISSUED); 15942 #ifdef __lock_lint 15943 sd_print_incomplete_msg(un, bp, user_arg, 15944 SD_IMMEDIATE_RETRY_ISSUED); 15945 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15946 SD_IMMEDIATE_RETRY_ISSUED); 15947 sd_print_sense_failed_msg(un, bp, user_arg, 15948 SD_IMMEDIATE_RETRY_ISSUED); 15949 #endif 15950 } 15951 15952 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15953 "sd_retry_command: issuing immediate retry\n"); 15954 15955 /* 15956 * Call sd_start_cmds() to transport the command to 15957 * the target. 15958 */ 15959 sd_start_cmds(un, bp); 15960 15961 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15962 "sd_retry_command exit\n"); 15963 return; 15964 } 15965 } 15966 15967 /* 15968 * Set up to retry the command after a delay. 15969 * First call the user-provided function (if any) 15970 */ 15971 if (user_funcp != NULL) { 15972 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15973 } 15974 15975 sd_set_retry_bp(un, bp, retry_delay, statp); 15976 15977 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15978 return; 15979 15980 fail_command: 15981 15982 if (user_funcp != NULL) { 15983 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15984 } 15985 15986 fail_command_no_log: 15987 15988 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15989 "sd_retry_command: returning failed command\n"); 15990 15991 sd_return_failed_command(un, bp, failure_code); 15992 15993 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15994 } 15995 15996 15997 /* 15998 * Function: sd_set_retry_bp 15999 * 16000 * Description: Set up the given bp for retry. 16001 * 16002 * Arguments: un - ptr to associated softstate 16003 * bp - ptr to buf(9S) for the command 16004 * retry_delay - time interval before issuing retry (may be 0) 16005 * statp - optional pointer to kstat function 16006 * 16007 * Context: May be called under interrupt context 16008 */ 16009 16010 static void 16011 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 16012 void (*statp)(kstat_io_t *)) 16013 { 16014 ASSERT(un != NULL); 16015 ASSERT(mutex_owned(SD_MUTEX(un))); 16016 ASSERT(bp != NULL); 16017 16018 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16019 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 16020 16021 /* 16022 * Indicate that the command is being retried. This will not allow any 16023 * other commands on the wait queue to be transported to the target 16024 * until this command has been completed (success or failure). The 16025 * "retry command" is not transported to the target until the given 16026 * time delay expires, unless the user specified a 0 retry_delay. 16027 * 16028 * Note: the timeout(9F) callback routine is what actually calls 16029 * sd_start_cmds() to transport the command, with the exception of a 16030 * zero retry_delay. The only current implementor of a zero retry delay 16031 * is the case where a START_STOP_UNIT is sent to spin-up a device. 16032 */ 16033 if (un->un_retry_bp == NULL) { 16034 ASSERT(un->un_retry_statp == NULL); 16035 un->un_retry_bp = bp; 16036 16037 /* 16038 * If the user has not specified a delay the command should 16039 * be queued and no timeout should be scheduled. 16040 */ 16041 if (retry_delay == 0) { 16042 /* 16043 * Save the kstat pointer that will be used in the 16044 * call to SD_UPDATE_KSTATS() below, so that 16045 * sd_start_cmds() can correctly decrement the waitq 16046 * count when it is time to transport this command. 16047 */ 16048 un->un_retry_statp = statp; 16049 goto done; 16050 } 16051 } 16052 16053 if (un->un_retry_bp == bp) { 16054 /* 16055 * Save the kstat pointer that will be used in the call to 16056 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 16057 * correctly decrement the waitq count when it is time to 16058 * transport this command. 16059 */ 16060 un->un_retry_statp = statp; 16061 16062 /* 16063 * Schedule a timeout if: 16064 * 1) The user has specified a delay. 16065 * 2) There is not a START_STOP_UNIT callback pending. 16066 * 16067 * If no delay has been specified, then it is up to the caller 16068 * to ensure that IO processing continues without stalling. 16069 * Effectively, this means that the caller will issue the 16070 * required call to sd_start_cmds(). The START_STOP_UNIT 16071 * callback does this after the START STOP UNIT command has 16072 * completed. In either of these cases we should not schedule 16073 * a timeout callback here. Also don't schedule the timeout if 16074 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 16075 */ 16076 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 16077 (un->un_direct_priority_timeid == NULL)) { 16078 un->un_retry_timeid = 16079 timeout(sd_start_retry_command, un, retry_delay); 16080 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16081 "sd_set_retry_bp: setting timeout: un: 0x%p" 16082 " bp:0x%p un_retry_timeid:0x%p\n", 16083 un, bp, un->un_retry_timeid); 16084 } 16085 } else { 16086 /* 16087 * We only get in here if there is already another command 16088 * waiting to be retried. In this case, we just put the 16089 * given command onto the wait queue, so it can be transported 16090 * after the current retry command has completed. 16091 * 16092 * Also we have to make sure that if the command at the head 16093 * of the wait queue is the un_failfast_bp, that we do not 16094 * put ahead of it any other commands that are to be retried. 16095 */ 16096 if ((un->un_failfast_bp != NULL) && 16097 (un->un_failfast_bp == un->un_waitq_headp)) { 16098 /* 16099 * Enqueue this command AFTER the first command on 16100 * the wait queue (which is also un_failfast_bp). 16101 */ 16102 bp->av_forw = un->un_waitq_headp->av_forw; 16103 un->un_waitq_headp->av_forw = bp; 16104 if (un->un_waitq_headp == un->un_waitq_tailp) { 16105 un->un_waitq_tailp = bp; 16106 } 16107 } else { 16108 /* Enqueue this command at the head of the waitq. */ 16109 bp->av_forw = un->un_waitq_headp; 16110 un->un_waitq_headp = bp; 16111 if (un->un_waitq_tailp == NULL) { 16112 un->un_waitq_tailp = bp; 16113 } 16114 } 16115 16116 if (statp == NULL) { 16117 statp = kstat_waitq_enter; 16118 } 16119 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16120 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 16121 } 16122 16123 done: 16124 if (statp != NULL) { 16125 SD_UPDATE_KSTATS(un, statp, bp); 16126 } 16127 16128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16129 "sd_set_retry_bp: exit un:0x%p\n", un); 16130 } 16131 16132 16133 /* 16134 * Function: sd_start_retry_command 16135 * 16136 * Description: Start the command that has been waiting on the target's 16137 * retry queue. Called from timeout(9F) context after the 16138 * retry delay interval has expired. 16139 * 16140 * Arguments: arg - pointer to associated softstate for the device. 16141 * 16142 * Context: timeout(9F) thread context. May not sleep. 16143 */ 16144 16145 static void 16146 sd_start_retry_command(void *arg) 16147 { 16148 struct sd_lun *un = arg; 16149 16150 ASSERT(un != NULL); 16151 ASSERT(!mutex_owned(SD_MUTEX(un))); 16152 16153 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16154 "sd_start_retry_command: entry\n"); 16155 16156 mutex_enter(SD_MUTEX(un)); 16157 16158 un->un_retry_timeid = NULL; 16159 16160 if (un->un_retry_bp != NULL) { 16161 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16162 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 16163 un, un->un_retry_bp); 16164 sd_start_cmds(un, un->un_retry_bp); 16165 } 16166 16167 mutex_exit(SD_MUTEX(un)); 16168 16169 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16170 "sd_start_retry_command: exit\n"); 16171 } 16172 16173 /* 16174 * Function: sd_rmw_msg_print_handler 16175 * 16176 * Description: If RMW mode is enabled and warning message is triggered 16177 * print I/O count during a fixed interval. 16178 * 16179 * Arguments: arg - pointer to associated softstate for the device. 16180 * 16181 * Context: timeout(9F) thread context. May not sleep. 16182 */ 16183 static void 16184 sd_rmw_msg_print_handler(void *arg) 16185 { 16186 struct sd_lun *un = arg; 16187 16188 ASSERT(un != NULL); 16189 ASSERT(!mutex_owned(SD_MUTEX(un))); 16190 16191 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16192 "sd_rmw_msg_print_handler: entry\n"); 16193 16194 mutex_enter(SD_MUTEX(un)); 16195 16196 if (un->un_rmw_incre_count > 0) { 16197 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16198 "%"PRIu64" I/O requests are not aligned with %d disk " 16199 "sector size in %ld seconds. They are handled through " 16200 "Read Modify Write but the performance is very low!\n", 16201 un->un_rmw_incre_count, un->un_tgt_blocksize, 16202 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 16203 un->un_rmw_incre_count = 0; 16204 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 16205 un, SD_RMW_MSG_PRINT_TIMEOUT); 16206 } else { 16207 un->un_rmw_msg_timeid = NULL; 16208 } 16209 16210 mutex_exit(SD_MUTEX(un)); 16211 16212 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16213 "sd_rmw_msg_print_handler: exit\n"); 16214 } 16215 16216 /* 16217 * Function: sd_start_direct_priority_command 16218 * 16219 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 16220 * received TRAN_BUSY when we called scsi_transport() to send it 16221 * to the underlying HBA. This function is called from timeout(9F) 16222 * context after the delay interval has expired. 16223 * 16224 * Arguments: arg - pointer to associated buf(9S) to be restarted. 16225 * 16226 * Context: timeout(9F) thread context. May not sleep. 16227 */ 16228 16229 static void 16230 sd_start_direct_priority_command(void *arg) 16231 { 16232 struct buf *priority_bp = arg; 16233 struct sd_lun *un; 16234 16235 ASSERT(priority_bp != NULL); 16236 un = SD_GET_UN(priority_bp); 16237 ASSERT(un != NULL); 16238 ASSERT(!mutex_owned(SD_MUTEX(un))); 16239 16240 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16241 "sd_start_direct_priority_command: entry\n"); 16242 16243 mutex_enter(SD_MUTEX(un)); 16244 un->un_direct_priority_timeid = NULL; 16245 sd_start_cmds(un, priority_bp); 16246 mutex_exit(SD_MUTEX(un)); 16247 16248 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16249 "sd_start_direct_priority_command: exit\n"); 16250 } 16251 16252 16253 /* 16254 * Function: sd_send_request_sense_command 16255 * 16256 * Description: Sends a REQUEST SENSE command to the target 16257 * 16258 * Context: May be called from interrupt context. 16259 */ 16260 16261 static void 16262 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 16263 struct scsi_pkt *pktp) 16264 { 16265 ASSERT(bp != NULL); 16266 ASSERT(un != NULL); 16267 ASSERT(mutex_owned(SD_MUTEX(un))); 16268 16269 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 16270 "entry: buf:0x%p\n", bp); 16271 16272 /* 16273 * If we are syncing or dumping, then fail the command to avoid a 16274 * recursive callback into scsi_transport(). Also fail the command 16275 * if we are suspended (legacy behavior). 16276 */ 16277 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 16278 (un->un_state == SD_STATE_DUMPING)) { 16279 sd_return_failed_command(un, bp, EIO); 16280 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16281 "sd_send_request_sense_command: syncing/dumping, exit\n"); 16282 return; 16283 } 16284 16285 /* 16286 * Retry the failed command and don't issue the request sense if: 16287 * 1) the sense buf is busy 16288 * 2) we have 1 or more outstanding commands on the target 16289 * (the sense data will be cleared or invalidated any way) 16290 * 16291 * Note: There could be an issue with not checking a retry limit here, 16292 * the problem is determining which retry limit to check. 16293 */ 16294 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 16295 /* Don't retry if the command is flagged as non-retryable */ 16296 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16297 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16298 NULL, NULL, 0, un->un_busy_timeout, 16299 kstat_waitq_enter); 16300 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16301 "sd_send_request_sense_command: " 16302 "at full throttle, retrying exit\n"); 16303 } else { 16304 sd_return_failed_command(un, bp, EIO); 16305 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16306 "sd_send_request_sense_command: " 16307 "at full throttle, non-retryable exit\n"); 16308 } 16309 return; 16310 } 16311 16312 sd_mark_rqs_busy(un, bp); 16313 sd_start_cmds(un, un->un_rqs_bp); 16314 16315 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16316 "sd_send_request_sense_command: exit\n"); 16317 } 16318 16319 16320 /* 16321 * Function: sd_mark_rqs_busy 16322 * 16323 * Description: Indicate that the request sense bp for this instance is 16324 * in use. 16325 * 16326 * Context: May be called under interrupt context 16327 */ 16328 16329 static void 16330 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16331 { 16332 struct sd_xbuf *sense_xp; 16333 16334 ASSERT(un != NULL); 16335 ASSERT(bp != NULL); 16336 ASSERT(mutex_owned(SD_MUTEX(un))); 16337 ASSERT(un->un_sense_isbusy == 0); 16338 16339 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16340 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16341 16342 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16343 ASSERT(sense_xp != NULL); 16344 16345 SD_INFO(SD_LOG_IO, un, 16346 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16347 16348 ASSERT(sense_xp->xb_pktp != NULL); 16349 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16350 == (FLAG_SENSING | FLAG_HEAD)); 16351 16352 un->un_sense_isbusy = 1; 16353 un->un_rqs_bp->b_resid = 0; 16354 sense_xp->xb_pktp->pkt_resid = 0; 16355 sense_xp->xb_pktp->pkt_reason = 0; 16356 16357 /* So we can get back the bp at interrupt time! */ 16358 sense_xp->xb_sense_bp = bp; 16359 16360 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16361 16362 /* 16363 * Mark this buf as awaiting sense data. (This is already set in 16364 * the pkt_flags for the RQS packet.) 16365 */ 16366 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16367 16368 /* Request sense down same path */ 16369 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16370 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16371 sense_xp->xb_pktp->pkt_path_instance = 16372 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16373 16374 sense_xp->xb_retry_count = 0; 16375 sense_xp->xb_victim_retry_count = 0; 16376 sense_xp->xb_ua_retry_count = 0; 16377 sense_xp->xb_nr_retry_count = 0; 16378 sense_xp->xb_dma_resid = 0; 16379 16380 /* Clean up the fields for auto-request sense */ 16381 sense_xp->xb_sense_status = 0; 16382 sense_xp->xb_sense_state = 0; 16383 sense_xp->xb_sense_resid = 0; 16384 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16385 16386 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16387 } 16388 16389 16390 /* 16391 * Function: sd_mark_rqs_idle 16392 * 16393 * Description: SD_MUTEX must be held continuously through this routine 16394 * to prevent reuse of the rqs struct before the caller can 16395 * complete it's processing. 16396 * 16397 * Return Code: Pointer to the RQS buf 16398 * 16399 * Context: May be called under interrupt context 16400 */ 16401 16402 static struct buf * 16403 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16404 { 16405 struct buf *bp; 16406 ASSERT(un != NULL); 16407 ASSERT(sense_xp != NULL); 16408 ASSERT(mutex_owned(SD_MUTEX(un))); 16409 ASSERT(un->un_sense_isbusy != 0); 16410 16411 un->un_sense_isbusy = 0; 16412 bp = sense_xp->xb_sense_bp; 16413 sense_xp->xb_sense_bp = NULL; 16414 16415 /* This pkt is no longer interested in getting sense data */ 16416 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16417 16418 return (bp); 16419 } 16420 16421 16422 16423 /* 16424 * Function: sd_alloc_rqs 16425 * 16426 * Description: Set up the unit to receive auto request sense data 16427 * 16428 * Return Code: DDI_SUCCESS or DDI_FAILURE 16429 * 16430 * Context: Called under attach(9E) context 16431 */ 16432 16433 static int 16434 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16435 { 16436 struct sd_xbuf *xp; 16437 16438 ASSERT(un != NULL); 16439 ASSERT(!mutex_owned(SD_MUTEX(un))); 16440 ASSERT(un->un_rqs_bp == NULL); 16441 ASSERT(un->un_rqs_pktp == NULL); 16442 16443 /* 16444 * First allocate the required buf and scsi_pkt structs, then set up 16445 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16446 */ 16447 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16448 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16449 if (un->un_rqs_bp == NULL) { 16450 return (DDI_FAILURE); 16451 } 16452 16453 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16454 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16455 16456 if (un->un_rqs_pktp == NULL) { 16457 sd_free_rqs(un); 16458 return (DDI_FAILURE); 16459 } 16460 16461 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16462 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16463 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16464 16465 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16466 16467 /* Set up the other needed members in the ARQ scsi_pkt. */ 16468 un->un_rqs_pktp->pkt_comp = sdintr; 16469 un->un_rqs_pktp->pkt_time = sd_io_time; 16470 un->un_rqs_pktp->pkt_flags |= 16471 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16472 16473 /* 16474 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16475 * provide any intpkt, destroypkt routines as we take care of 16476 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16477 */ 16478 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16479 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16480 xp->xb_pktp = un->un_rqs_pktp; 16481 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16482 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16483 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16484 16485 /* 16486 * Save the pointer to the request sense private bp so it can 16487 * be retrieved in sdintr. 16488 */ 16489 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16490 ASSERT(un->un_rqs_bp->b_private == xp); 16491 16492 /* 16493 * See if the HBA supports auto-request sense for the specified 16494 * target/lun. If it does, then try to enable it (if not already 16495 * enabled). 16496 * 16497 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16498 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16499 * return success. However, in both of these cases ARQ is always 16500 * enabled and scsi_ifgetcap will always return true. The best approach 16501 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16502 * 16503 * The 3rd case is the HBA (adp) always return enabled on 16504 * scsi_ifgetgetcap even when it's not enable, the best approach 16505 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16506 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16507 */ 16508 16509 if (un->un_f_is_fibre == TRUE) { 16510 un->un_f_arq_enabled = TRUE; 16511 } else { 16512 #if defined(__i386) || defined(__amd64) 16513 /* 16514 * Circumvent the Adaptec bug, remove this code when 16515 * the bug is fixed 16516 */ 16517 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16518 #endif 16519 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16520 case 0: 16521 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16522 "sd_alloc_rqs: HBA supports ARQ\n"); 16523 /* 16524 * ARQ is supported by this HBA but currently is not 16525 * enabled. Attempt to enable it and if successful then 16526 * mark this instance as ARQ enabled. 16527 */ 16528 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16529 == 1) { 16530 /* Successfully enabled ARQ in the HBA */ 16531 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16532 "sd_alloc_rqs: ARQ enabled\n"); 16533 un->un_f_arq_enabled = TRUE; 16534 } else { 16535 /* Could not enable ARQ in the HBA */ 16536 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16537 "sd_alloc_rqs: failed ARQ enable\n"); 16538 un->un_f_arq_enabled = FALSE; 16539 } 16540 break; 16541 case 1: 16542 /* 16543 * ARQ is supported by this HBA and is already enabled. 16544 * Just mark ARQ as enabled for this instance. 16545 */ 16546 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16547 "sd_alloc_rqs: ARQ already enabled\n"); 16548 un->un_f_arq_enabled = TRUE; 16549 break; 16550 default: 16551 /* 16552 * ARQ is not supported by this HBA; disable it for this 16553 * instance. 16554 */ 16555 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16556 "sd_alloc_rqs: HBA does not support ARQ\n"); 16557 un->un_f_arq_enabled = FALSE; 16558 break; 16559 } 16560 } 16561 16562 return (DDI_SUCCESS); 16563 } 16564 16565 16566 /* 16567 * Function: sd_free_rqs 16568 * 16569 * Description: Cleanup for the pre-instance RQS command. 16570 * 16571 * Context: Kernel thread context 16572 */ 16573 16574 static void 16575 sd_free_rqs(struct sd_lun *un) 16576 { 16577 ASSERT(un != NULL); 16578 16579 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16580 16581 /* 16582 * If consistent memory is bound to a scsi_pkt, the pkt 16583 * has to be destroyed *before* freeing the consistent memory. 16584 * Don't change the sequence of this operations. 16585 * scsi_destroy_pkt() might access memory, which isn't allowed, 16586 * after it was freed in scsi_free_consistent_buf(). 16587 */ 16588 if (un->un_rqs_pktp != NULL) { 16589 scsi_destroy_pkt(un->un_rqs_pktp); 16590 un->un_rqs_pktp = NULL; 16591 } 16592 16593 if (un->un_rqs_bp != NULL) { 16594 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16595 if (xp != NULL) { 16596 kmem_free(xp, sizeof (struct sd_xbuf)); 16597 } 16598 scsi_free_consistent_buf(un->un_rqs_bp); 16599 un->un_rqs_bp = NULL; 16600 } 16601 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16602 } 16603 16604 16605 16606 /* 16607 * Function: sd_reduce_throttle 16608 * 16609 * Description: Reduces the maximum # of outstanding commands on a 16610 * target to the current number of outstanding commands. 16611 * Queues a tiemout(9F) callback to restore the limit 16612 * after a specified interval has elapsed. 16613 * Typically used when we get a TRAN_BUSY return code 16614 * back from scsi_transport(). 16615 * 16616 * Arguments: un - ptr to the sd_lun softstate struct 16617 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16618 * 16619 * Context: May be called from interrupt context 16620 */ 16621 16622 static void 16623 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16624 { 16625 ASSERT(un != NULL); 16626 ASSERT(mutex_owned(SD_MUTEX(un))); 16627 ASSERT(un->un_ncmds_in_transport >= 0); 16628 16629 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16630 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16631 un, un->un_throttle, un->un_ncmds_in_transport); 16632 16633 if (un->un_throttle > 1) { 16634 if (un->un_f_use_adaptive_throttle == TRUE) { 16635 switch (throttle_type) { 16636 case SD_THROTTLE_TRAN_BUSY: 16637 if (un->un_busy_throttle == 0) { 16638 un->un_busy_throttle = un->un_throttle; 16639 } 16640 break; 16641 case SD_THROTTLE_QFULL: 16642 un->un_busy_throttle = 0; 16643 break; 16644 default: 16645 ASSERT(FALSE); 16646 } 16647 16648 if (un->un_ncmds_in_transport > 0) { 16649 un->un_throttle = un->un_ncmds_in_transport; 16650 } 16651 16652 } else { 16653 if (un->un_ncmds_in_transport == 0) { 16654 un->un_throttle = 1; 16655 } else { 16656 un->un_throttle = un->un_ncmds_in_transport; 16657 } 16658 } 16659 } 16660 16661 /* Reschedule the timeout if none is currently active */ 16662 if (un->un_reset_throttle_timeid == NULL) { 16663 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16664 un, SD_THROTTLE_RESET_INTERVAL); 16665 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16666 "sd_reduce_throttle: timeout scheduled!\n"); 16667 } 16668 16669 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16670 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16671 } 16672 16673 16674 16675 /* 16676 * Function: sd_restore_throttle 16677 * 16678 * Description: Callback function for timeout(9F). Resets the current 16679 * value of un->un_throttle to its default. 16680 * 16681 * Arguments: arg - pointer to associated softstate for the device. 16682 * 16683 * Context: May be called from interrupt context 16684 */ 16685 16686 static void 16687 sd_restore_throttle(void *arg) 16688 { 16689 struct sd_lun *un = arg; 16690 16691 ASSERT(un != NULL); 16692 ASSERT(!mutex_owned(SD_MUTEX(un))); 16693 16694 mutex_enter(SD_MUTEX(un)); 16695 16696 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16697 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16698 16699 un->un_reset_throttle_timeid = NULL; 16700 16701 if (un->un_f_use_adaptive_throttle == TRUE) { 16702 /* 16703 * If un_busy_throttle is nonzero, then it contains the 16704 * value that un_throttle was when we got a TRAN_BUSY back 16705 * from scsi_transport(). We want to revert back to this 16706 * value. 16707 * 16708 * In the QFULL case, the throttle limit will incrementally 16709 * increase until it reaches max throttle. 16710 */ 16711 if (un->un_busy_throttle > 0) { 16712 un->un_throttle = un->un_busy_throttle; 16713 un->un_busy_throttle = 0; 16714 } else { 16715 /* 16716 * increase throttle by 10% open gate slowly, schedule 16717 * another restore if saved throttle has not been 16718 * reached 16719 */ 16720 short throttle; 16721 if (sd_qfull_throttle_enable) { 16722 throttle = un->un_throttle + 16723 max((un->un_throttle / 10), 1); 16724 un->un_throttle = 16725 (throttle < un->un_saved_throttle) ? 16726 throttle : un->un_saved_throttle; 16727 if (un->un_throttle < un->un_saved_throttle) { 16728 un->un_reset_throttle_timeid = 16729 timeout(sd_restore_throttle, 16730 un, 16731 SD_QFULL_THROTTLE_RESET_INTERVAL); 16732 } 16733 } 16734 } 16735 16736 /* 16737 * If un_throttle has fallen below the low-water mark, we 16738 * restore the maximum value here (and allow it to ratchet 16739 * down again if necessary). 16740 */ 16741 if (un->un_throttle < un->un_min_throttle) { 16742 un->un_throttle = un->un_saved_throttle; 16743 } 16744 } else { 16745 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16746 "restoring limit from 0x%x to 0x%x\n", 16747 un->un_throttle, un->un_saved_throttle); 16748 un->un_throttle = un->un_saved_throttle; 16749 } 16750 16751 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16752 "sd_restore_throttle: calling sd_start_cmds!\n"); 16753 16754 sd_start_cmds(un, NULL); 16755 16756 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16757 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16758 un, un->un_throttle); 16759 16760 mutex_exit(SD_MUTEX(un)); 16761 16762 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16763 } 16764 16765 /* 16766 * Function: sdrunout 16767 * 16768 * Description: Callback routine for scsi_init_pkt when a resource allocation 16769 * fails. 16770 * 16771 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16772 * soft state instance. 16773 * 16774 * Return Code: The scsi_init_pkt routine allows for the callback function to 16775 * return a 0 indicating the callback should be rescheduled or a 1 16776 * indicating not to reschedule. This routine always returns 1 16777 * because the driver always provides a callback function to 16778 * scsi_init_pkt. This results in a callback always being scheduled 16779 * (via the scsi_init_pkt callback implementation) if a resource 16780 * failure occurs. 16781 * 16782 * Context: This callback function may not block or call routines that block 16783 * 16784 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16785 * request persisting at the head of the list which cannot be 16786 * satisfied even after multiple retries. In the future the driver 16787 * may implement some time of maximum runout count before failing 16788 * an I/O. 16789 */ 16790 16791 static int 16792 sdrunout(caddr_t arg) 16793 { 16794 struct sd_lun *un = (struct sd_lun *)arg; 16795 16796 ASSERT(un != NULL); 16797 ASSERT(!mutex_owned(SD_MUTEX(un))); 16798 16799 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16800 16801 mutex_enter(SD_MUTEX(un)); 16802 sd_start_cmds(un, NULL); 16803 mutex_exit(SD_MUTEX(un)); 16804 /* 16805 * This callback routine always returns 1 (i.e. do not reschedule) 16806 * because we always specify sdrunout as the callback handler for 16807 * scsi_init_pkt inside the call to sd_start_cmds. 16808 */ 16809 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16810 return (1); 16811 } 16812 16813 16814 /* 16815 * Function: sdintr 16816 * 16817 * Description: Completion callback routine for scsi_pkt(9S) structs 16818 * sent to the HBA driver via scsi_transport(9F). 16819 * 16820 * Context: Interrupt context 16821 */ 16822 16823 static void 16824 sdintr(struct scsi_pkt *pktp) 16825 { 16826 struct buf *bp; 16827 struct sd_xbuf *xp; 16828 struct sd_lun *un; 16829 size_t actual_len; 16830 sd_ssc_t *sscp; 16831 16832 ASSERT(pktp != NULL); 16833 bp = (struct buf *)pktp->pkt_private; 16834 ASSERT(bp != NULL); 16835 xp = SD_GET_XBUF(bp); 16836 ASSERT(xp != NULL); 16837 ASSERT(xp->xb_pktp != NULL); 16838 un = SD_GET_UN(bp); 16839 ASSERT(un != NULL); 16840 ASSERT(!mutex_owned(SD_MUTEX(un))); 16841 16842 #ifdef SD_FAULT_INJECTION 16843 16844 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16845 /* SD FaultInjection */ 16846 sd_faultinjection(pktp); 16847 16848 #endif /* SD_FAULT_INJECTION */ 16849 16850 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16851 " xp:0x%p, un:0x%p\n", bp, xp, un); 16852 16853 mutex_enter(SD_MUTEX(un)); 16854 16855 ASSERT(un->un_fm_private != NULL); 16856 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16857 ASSERT(sscp != NULL); 16858 16859 /* Reduce the count of the #commands currently in transport */ 16860 un->un_ncmds_in_transport--; 16861 ASSERT(un->un_ncmds_in_transport >= 0); 16862 16863 /* Increment counter to indicate that the callback routine is active */ 16864 un->un_in_callback++; 16865 16866 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16867 16868 #ifdef SDDEBUG 16869 if (bp == un->un_retry_bp) { 16870 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16871 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16872 un, un->un_retry_bp, un->un_ncmds_in_transport); 16873 } 16874 #endif 16875 16876 /* 16877 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16878 * state if needed. 16879 */ 16880 if (pktp->pkt_reason == CMD_DEV_GONE) { 16881 /* Prevent multiple console messages for the same failure. */ 16882 if (un->un_last_pkt_reason != CMD_DEV_GONE) { 16883 un->un_last_pkt_reason = CMD_DEV_GONE; 16884 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16885 "Command failed to complete...Device is gone\n"); 16886 } 16887 if (un->un_mediastate != DKIO_DEV_GONE) { 16888 un->un_mediastate = DKIO_DEV_GONE; 16889 cv_broadcast(&un->un_state_cv); 16890 } 16891 /* 16892 * If the command happens to be the REQUEST SENSE command, 16893 * free up the rqs buf and fail the original command. 16894 */ 16895 if (bp == un->un_rqs_bp) { 16896 bp = sd_mark_rqs_idle(un, xp); 16897 } 16898 sd_return_failed_command(un, bp, EIO); 16899 goto exit; 16900 } 16901 16902 if (pktp->pkt_state & STATE_XARQ_DONE) { 16903 SD_TRACE(SD_LOG_COMMON, un, 16904 "sdintr: extra sense data received. pkt=%p\n", pktp); 16905 } 16906 16907 /* 16908 * First see if the pkt has auto-request sense data with it.... 16909 * Look at the packet state first so we don't take a performance 16910 * hit looking at the arq enabled flag unless absolutely necessary. 16911 */ 16912 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16913 (un->un_f_arq_enabled == TRUE)) { 16914 /* 16915 * The HBA did an auto request sense for this command so check 16916 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16917 * driver command that should not be retried. 16918 */ 16919 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16920 /* 16921 * Save the relevant sense info into the xp for the 16922 * original cmd. 16923 */ 16924 struct scsi_arq_status *asp; 16925 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16926 xp->xb_sense_status = 16927 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16928 xp->xb_sense_state = asp->sts_rqpkt_state; 16929 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16930 if (pktp->pkt_state & STATE_XARQ_DONE) { 16931 actual_len = MAX_SENSE_LENGTH - 16932 xp->xb_sense_resid; 16933 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16934 MAX_SENSE_LENGTH); 16935 } else { 16936 if (xp->xb_sense_resid > SENSE_LENGTH) { 16937 actual_len = MAX_SENSE_LENGTH - 16938 xp->xb_sense_resid; 16939 } else { 16940 actual_len = SENSE_LENGTH - 16941 xp->xb_sense_resid; 16942 } 16943 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16944 if ((((struct uscsi_cmd *) 16945 (xp->xb_pktinfo))->uscsi_rqlen) > 16946 actual_len) { 16947 xp->xb_sense_resid = 16948 (((struct uscsi_cmd *) 16949 (xp->xb_pktinfo))-> 16950 uscsi_rqlen) - actual_len; 16951 } else { 16952 xp->xb_sense_resid = 0; 16953 } 16954 } 16955 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16956 SENSE_LENGTH); 16957 } 16958 16959 /* fail the command */ 16960 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16961 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16962 sd_return_failed_command(un, bp, EIO); 16963 goto exit; 16964 } 16965 16966 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16967 /* 16968 * We want to either retry or fail this command, so free 16969 * the DMA resources here. If we retry the command then 16970 * the DMA resources will be reallocated in sd_start_cmds(). 16971 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16972 * causes the *entire* transfer to start over again from the 16973 * beginning of the request, even for PARTIAL chunks that 16974 * have already transferred successfully. 16975 */ 16976 if ((un->un_f_is_fibre == TRUE) && 16977 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16978 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16979 scsi_dmafree(pktp); 16980 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16981 } 16982 #endif 16983 16984 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16985 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16986 16987 sd_handle_auto_request_sense(un, bp, xp, pktp); 16988 goto exit; 16989 } 16990 16991 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16992 if (pktp->pkt_flags & FLAG_SENSING) { 16993 /* This pktp is from the unit's REQUEST_SENSE command */ 16994 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16995 "sdintr: sd_handle_request_sense\n"); 16996 sd_handle_request_sense(un, bp, xp, pktp); 16997 goto exit; 16998 } 16999 17000 /* 17001 * Check to see if the command successfully completed as requested; 17002 * this is the most common case (and also the hot performance path). 17003 * 17004 * Requirements for successful completion are: 17005 * pkt_reason is CMD_CMPLT and packet status is status good. 17006 * In addition: 17007 * - A residual of zero indicates successful completion no matter what 17008 * the command is. 17009 * - If the residual is not zero and the command is not a read or 17010 * write, then it's still defined as successful completion. In other 17011 * words, if the command is a read or write the residual must be 17012 * zero for successful completion. 17013 * - If the residual is not zero and the command is a read or 17014 * write, and it's a USCSICMD, then it's still defined as 17015 * successful completion. 17016 */ 17017 if ((pktp->pkt_reason == CMD_CMPLT) && 17018 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 17019 17020 /* 17021 * Since this command is returned with a good status, we 17022 * can reset the count for Sonoma failover. 17023 */ 17024 un->un_sonoma_failure_count = 0; 17025 17026 /* 17027 * Return all USCSI commands on good status 17028 */ 17029 if (pktp->pkt_resid == 0) { 17030 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17031 "sdintr: returning command for resid == 0\n"); 17032 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 17033 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 17034 SD_UPDATE_B_RESID(bp, pktp); 17035 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17036 "sdintr: returning command for resid != 0\n"); 17037 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17038 SD_UPDATE_B_RESID(bp, pktp); 17039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17040 "sdintr: returning uscsi command\n"); 17041 } else { 17042 goto not_successful; 17043 } 17044 sd_return_command(un, bp); 17045 17046 /* 17047 * Decrement counter to indicate that the callback routine 17048 * is done. 17049 */ 17050 un->un_in_callback--; 17051 ASSERT(un->un_in_callback >= 0); 17052 mutex_exit(SD_MUTEX(un)); 17053 17054 return; 17055 } 17056 17057 not_successful: 17058 17059 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 17060 /* 17061 * The following is based upon knowledge of the underlying transport 17062 * and its use of DMA resources. This code should be removed when 17063 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 17064 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 17065 * and sd_start_cmds(). 17066 * 17067 * Free any DMA resources associated with this command if there 17068 * is a chance it could be retried or enqueued for later retry. 17069 * If we keep the DMA binding then mpxio cannot reissue the 17070 * command on another path whenever a path failure occurs. 17071 * 17072 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 17073 * causes the *entire* transfer to start over again from the 17074 * beginning of the request, even for PARTIAL chunks that 17075 * have already transferred successfully. 17076 * 17077 * This is only done for non-uscsi commands (and also skipped for the 17078 * driver's internal RQS command). Also just do this for Fibre Channel 17079 * devices as these are the only ones that support mpxio. 17080 */ 17081 if ((un->un_f_is_fibre == TRUE) && 17082 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 17083 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 17084 scsi_dmafree(pktp); 17085 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 17086 } 17087 #endif 17088 17089 /* 17090 * The command did not successfully complete as requested so check 17091 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 17092 * driver command that should not be retried so just return. If 17093 * FLAG_DIAGNOSE is not set the error will be processed below. 17094 */ 17095 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 17096 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17097 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 17098 /* 17099 * Issue a request sense if a check condition caused the error 17100 * (we handle the auto request sense case above), otherwise 17101 * just fail the command. 17102 */ 17103 if ((pktp->pkt_reason == CMD_CMPLT) && 17104 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 17105 sd_send_request_sense_command(un, bp, pktp); 17106 } else { 17107 sd_return_failed_command(un, bp, EIO); 17108 } 17109 goto exit; 17110 } 17111 17112 /* 17113 * The command did not successfully complete as requested so process 17114 * the error, retry, and/or attempt recovery. 17115 */ 17116 switch (pktp->pkt_reason) { 17117 case CMD_CMPLT: 17118 switch (SD_GET_PKT_STATUS(pktp)) { 17119 case STATUS_GOOD: 17120 /* 17121 * The command completed successfully with a non-zero 17122 * residual 17123 */ 17124 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17125 "sdintr: STATUS_GOOD \n"); 17126 sd_pkt_status_good(un, bp, xp, pktp); 17127 break; 17128 17129 case STATUS_CHECK: 17130 case STATUS_TERMINATED: 17131 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17132 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 17133 sd_pkt_status_check_condition(un, bp, xp, pktp); 17134 break; 17135 17136 case STATUS_BUSY: 17137 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17138 "sdintr: STATUS_BUSY\n"); 17139 sd_pkt_status_busy(un, bp, xp, pktp); 17140 break; 17141 17142 case STATUS_RESERVATION_CONFLICT: 17143 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17144 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 17145 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17146 break; 17147 17148 case STATUS_QFULL: 17149 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17150 "sdintr: STATUS_QFULL\n"); 17151 sd_pkt_status_qfull(un, bp, xp, pktp); 17152 break; 17153 17154 case STATUS_MET: 17155 case STATUS_INTERMEDIATE: 17156 case STATUS_SCSI2: 17157 case STATUS_INTERMEDIATE_MET: 17158 case STATUS_ACA_ACTIVE: 17159 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17160 "Unexpected SCSI status received: 0x%x\n", 17161 SD_GET_PKT_STATUS(pktp)); 17162 /* 17163 * Mark the ssc_flags when detected invalid status 17164 * code for non-USCSI command. 17165 */ 17166 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17167 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17168 0, "stat-code"); 17169 } 17170 sd_return_failed_command(un, bp, EIO); 17171 break; 17172 17173 default: 17174 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17175 "Invalid SCSI status received: 0x%x\n", 17176 SD_GET_PKT_STATUS(pktp)); 17177 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17178 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17179 0, "stat-code"); 17180 } 17181 sd_return_failed_command(un, bp, EIO); 17182 break; 17183 17184 } 17185 break; 17186 17187 case CMD_INCOMPLETE: 17188 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17189 "sdintr: CMD_INCOMPLETE\n"); 17190 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 17191 break; 17192 case CMD_TRAN_ERR: 17193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17194 "sdintr: CMD_TRAN_ERR\n"); 17195 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 17196 break; 17197 case CMD_RESET: 17198 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17199 "sdintr: CMD_RESET \n"); 17200 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 17201 break; 17202 case CMD_ABORTED: 17203 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17204 "sdintr: CMD_ABORTED \n"); 17205 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 17206 break; 17207 case CMD_TIMEOUT: 17208 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17209 "sdintr: CMD_TIMEOUT\n"); 17210 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 17211 break; 17212 case CMD_UNX_BUS_FREE: 17213 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17214 "sdintr: CMD_UNX_BUS_FREE \n"); 17215 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 17216 break; 17217 case CMD_TAG_REJECT: 17218 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17219 "sdintr: CMD_TAG_REJECT\n"); 17220 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 17221 break; 17222 default: 17223 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17224 "sdintr: default\n"); 17225 /* 17226 * Mark the ssc_flags for detecting invliad pkt_reason. 17227 */ 17228 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17229 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 17230 0, "pkt-reason"); 17231 } 17232 sd_pkt_reason_default(un, bp, xp, pktp); 17233 break; 17234 } 17235 17236 exit: 17237 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 17238 17239 /* Decrement counter to indicate that the callback routine is done. */ 17240 un->un_in_callback--; 17241 ASSERT(un->un_in_callback >= 0); 17242 17243 /* 17244 * At this point, the pkt has been dispatched, ie, it is either 17245 * being re-tried or has been returned to its caller and should 17246 * not be referenced. 17247 */ 17248 17249 mutex_exit(SD_MUTEX(un)); 17250 } 17251 17252 17253 /* 17254 * Function: sd_print_incomplete_msg 17255 * 17256 * Description: Prints the error message for a CMD_INCOMPLETE error. 17257 * 17258 * Arguments: un - ptr to associated softstate for the device. 17259 * bp - ptr to the buf(9S) for the command. 17260 * arg - message string ptr 17261 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 17262 * or SD_NO_RETRY_ISSUED. 17263 * 17264 * Context: May be called under interrupt context 17265 */ 17266 17267 static void 17268 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17269 { 17270 struct scsi_pkt *pktp; 17271 char *msgp; 17272 char *cmdp = arg; 17273 17274 ASSERT(un != NULL); 17275 ASSERT(mutex_owned(SD_MUTEX(un))); 17276 ASSERT(bp != NULL); 17277 ASSERT(arg != NULL); 17278 pktp = SD_GET_PKTP(bp); 17279 ASSERT(pktp != NULL); 17280 17281 switch (code) { 17282 case SD_DELAYED_RETRY_ISSUED: 17283 case SD_IMMEDIATE_RETRY_ISSUED: 17284 msgp = "retrying"; 17285 break; 17286 case SD_NO_RETRY_ISSUED: 17287 default: 17288 msgp = "giving up"; 17289 break; 17290 } 17291 17292 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17293 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17294 "incomplete %s- %s\n", cmdp, msgp); 17295 } 17296 } 17297 17298 17299 17300 /* 17301 * Function: sd_pkt_status_good 17302 * 17303 * Description: Processing for a STATUS_GOOD code in pkt_status. 17304 * 17305 * Context: May be called under interrupt context 17306 */ 17307 17308 static void 17309 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17310 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17311 { 17312 char *cmdp; 17313 17314 ASSERT(un != NULL); 17315 ASSERT(mutex_owned(SD_MUTEX(un))); 17316 ASSERT(bp != NULL); 17317 ASSERT(xp != NULL); 17318 ASSERT(pktp != NULL); 17319 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17320 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17321 ASSERT(pktp->pkt_resid != 0); 17322 17323 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17324 17325 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17326 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17327 case SCMD_READ: 17328 cmdp = "read"; 17329 break; 17330 case SCMD_WRITE: 17331 cmdp = "write"; 17332 break; 17333 default: 17334 SD_UPDATE_B_RESID(bp, pktp); 17335 sd_return_command(un, bp); 17336 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17337 return; 17338 } 17339 17340 /* 17341 * See if we can retry the read/write, preferrably immediately. 17342 * If retries are exhaused, then sd_retry_command() will update 17343 * the b_resid count. 17344 */ 17345 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17346 cmdp, EIO, (clock_t)0, NULL); 17347 17348 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17349 } 17350 17351 17352 17353 17354 17355 /* 17356 * Function: sd_handle_request_sense 17357 * 17358 * Description: Processing for non-auto Request Sense command. 17359 * 17360 * Arguments: un - ptr to associated softstate 17361 * sense_bp - ptr to buf(9S) for the RQS command 17362 * sense_xp - ptr to the sd_xbuf for the RQS command 17363 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17364 * 17365 * Context: May be called under interrupt context 17366 */ 17367 17368 static void 17369 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17370 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17371 { 17372 struct buf *cmd_bp; /* buf for the original command */ 17373 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17374 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17375 size_t actual_len; /* actual sense data length */ 17376 17377 ASSERT(un != NULL); 17378 ASSERT(mutex_owned(SD_MUTEX(un))); 17379 ASSERT(sense_bp != NULL); 17380 ASSERT(sense_xp != NULL); 17381 ASSERT(sense_pktp != NULL); 17382 17383 /* 17384 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17385 * RQS command and not the original command. 17386 */ 17387 ASSERT(sense_pktp == un->un_rqs_pktp); 17388 ASSERT(sense_bp == un->un_rqs_bp); 17389 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17390 (FLAG_SENSING | FLAG_HEAD)); 17391 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17392 FLAG_SENSING) == FLAG_SENSING); 17393 17394 /* These are the bp, xp, and pktp for the original command */ 17395 cmd_bp = sense_xp->xb_sense_bp; 17396 cmd_xp = SD_GET_XBUF(cmd_bp); 17397 cmd_pktp = SD_GET_PKTP(cmd_bp); 17398 17399 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17400 /* 17401 * The REQUEST SENSE command failed. Release the REQUEST 17402 * SENSE command for re-use, get back the bp for the original 17403 * command, and attempt to re-try the original command if 17404 * FLAG_DIAGNOSE is not set in the original packet. 17405 */ 17406 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17407 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17408 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17409 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17410 NULL, NULL, EIO, (clock_t)0, NULL); 17411 return; 17412 } 17413 } 17414 17415 /* 17416 * Save the relevant sense info into the xp for the original cmd. 17417 * 17418 * Note: if the request sense failed the state info will be zero 17419 * as set in sd_mark_rqs_busy() 17420 */ 17421 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17422 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17423 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17424 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17425 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17426 SENSE_LENGTH)) { 17427 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17428 MAX_SENSE_LENGTH); 17429 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17430 } else { 17431 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17432 SENSE_LENGTH); 17433 if (actual_len < SENSE_LENGTH) { 17434 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17435 } else { 17436 cmd_xp->xb_sense_resid = 0; 17437 } 17438 } 17439 17440 /* 17441 * Free up the RQS command.... 17442 * NOTE: 17443 * Must do this BEFORE calling sd_validate_sense_data! 17444 * sd_validate_sense_data may return the original command in 17445 * which case the pkt will be freed and the flags can no 17446 * longer be touched. 17447 * SD_MUTEX is held through this process until the command 17448 * is dispatched based upon the sense data, so there are 17449 * no race conditions. 17450 */ 17451 (void) sd_mark_rqs_idle(un, sense_xp); 17452 17453 /* 17454 * For a retryable command see if we have valid sense data, if so then 17455 * turn it over to sd_decode_sense() to figure out the right course of 17456 * action. Just fail a non-retryable command. 17457 */ 17458 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17459 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17460 SD_SENSE_DATA_IS_VALID) { 17461 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17462 } 17463 } else { 17464 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17465 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17466 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17467 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17468 sd_return_failed_command(un, cmd_bp, EIO); 17469 } 17470 } 17471 17472 17473 17474 17475 /* 17476 * Function: sd_handle_auto_request_sense 17477 * 17478 * Description: Processing for auto-request sense information. 17479 * 17480 * Arguments: un - ptr to associated softstate 17481 * bp - ptr to buf(9S) for the command 17482 * xp - ptr to the sd_xbuf for the command 17483 * pktp - ptr to the scsi_pkt(9S) for the command 17484 * 17485 * Context: May be called under interrupt context 17486 */ 17487 17488 static void 17489 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17490 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17491 { 17492 struct scsi_arq_status *asp; 17493 size_t actual_len; 17494 17495 ASSERT(un != NULL); 17496 ASSERT(mutex_owned(SD_MUTEX(un))); 17497 ASSERT(bp != NULL); 17498 ASSERT(xp != NULL); 17499 ASSERT(pktp != NULL); 17500 ASSERT(pktp != un->un_rqs_pktp); 17501 ASSERT(bp != un->un_rqs_bp); 17502 17503 /* 17504 * For auto-request sense, we get a scsi_arq_status back from 17505 * the HBA, with the sense data in the sts_sensedata member. 17506 * The pkt_scbp of the packet points to this scsi_arq_status. 17507 */ 17508 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17509 17510 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17511 /* 17512 * The auto REQUEST SENSE failed; see if we can re-try 17513 * the original command. 17514 */ 17515 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17516 "auto request sense failed (reason=%s)\n", 17517 scsi_rname(asp->sts_rqpkt_reason)); 17518 17519 sd_reset_target(un, pktp); 17520 17521 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17522 NULL, NULL, EIO, (clock_t)0, NULL); 17523 return; 17524 } 17525 17526 /* Save the relevant sense info into the xp for the original cmd. */ 17527 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17528 xp->xb_sense_state = asp->sts_rqpkt_state; 17529 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17530 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17531 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17532 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17533 MAX_SENSE_LENGTH); 17534 } else { 17535 if (xp->xb_sense_resid > SENSE_LENGTH) { 17536 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17537 } else { 17538 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17539 } 17540 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17541 if ((((struct uscsi_cmd *) 17542 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17543 xp->xb_sense_resid = (((struct uscsi_cmd *) 17544 (xp->xb_pktinfo))->uscsi_rqlen) - 17545 actual_len; 17546 } else { 17547 xp->xb_sense_resid = 0; 17548 } 17549 } 17550 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17551 } 17552 17553 /* 17554 * See if we have valid sense data, if so then turn it over to 17555 * sd_decode_sense() to figure out the right course of action. 17556 */ 17557 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17558 SD_SENSE_DATA_IS_VALID) { 17559 sd_decode_sense(un, bp, xp, pktp); 17560 } 17561 } 17562 17563 17564 /* 17565 * Function: sd_print_sense_failed_msg 17566 * 17567 * Description: Print log message when RQS has failed. 17568 * 17569 * Arguments: un - ptr to associated softstate 17570 * bp - ptr to buf(9S) for the command 17571 * arg - generic message string ptr 17572 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17573 * or SD_NO_RETRY_ISSUED 17574 * 17575 * Context: May be called from interrupt context 17576 */ 17577 17578 static void 17579 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17580 int code) 17581 { 17582 char *msgp = arg; 17583 17584 ASSERT(un != NULL); 17585 ASSERT(mutex_owned(SD_MUTEX(un))); 17586 ASSERT(bp != NULL); 17587 17588 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17589 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17590 } 17591 } 17592 17593 17594 /* 17595 * Function: sd_validate_sense_data 17596 * 17597 * Description: Check the given sense data for validity. 17598 * If the sense data is not valid, the command will 17599 * be either failed or retried! 17600 * 17601 * Return Code: SD_SENSE_DATA_IS_INVALID 17602 * SD_SENSE_DATA_IS_VALID 17603 * 17604 * Context: May be called from interrupt context 17605 */ 17606 17607 static int 17608 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17609 size_t actual_len) 17610 { 17611 struct scsi_extended_sense *esp; 17612 struct scsi_pkt *pktp; 17613 char *msgp = NULL; 17614 sd_ssc_t *sscp; 17615 17616 ASSERT(un != NULL); 17617 ASSERT(mutex_owned(SD_MUTEX(un))); 17618 ASSERT(bp != NULL); 17619 ASSERT(bp != un->un_rqs_bp); 17620 ASSERT(xp != NULL); 17621 ASSERT(un->un_fm_private != NULL); 17622 17623 pktp = SD_GET_PKTP(bp); 17624 ASSERT(pktp != NULL); 17625 17626 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17627 ASSERT(sscp != NULL); 17628 17629 /* 17630 * Check the status of the RQS command (auto or manual). 17631 */ 17632 switch (xp->xb_sense_status & STATUS_MASK) { 17633 case STATUS_GOOD: 17634 break; 17635 17636 case STATUS_RESERVATION_CONFLICT: 17637 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17638 return (SD_SENSE_DATA_IS_INVALID); 17639 17640 case STATUS_BUSY: 17641 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17642 "Busy Status on REQUEST SENSE\n"); 17643 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17644 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17645 return (SD_SENSE_DATA_IS_INVALID); 17646 17647 case STATUS_QFULL: 17648 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17649 "QFULL Status on REQUEST SENSE\n"); 17650 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17651 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17652 return (SD_SENSE_DATA_IS_INVALID); 17653 17654 case STATUS_CHECK: 17655 case STATUS_TERMINATED: 17656 msgp = "Check Condition on REQUEST SENSE\n"; 17657 goto sense_failed; 17658 17659 default: 17660 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17661 goto sense_failed; 17662 } 17663 17664 /* 17665 * See if we got the minimum required amount of sense data. 17666 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17667 * or less. 17668 */ 17669 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17670 (actual_len == 0)) { 17671 msgp = "Request Sense couldn't get sense data\n"; 17672 goto sense_failed; 17673 } 17674 17675 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17676 msgp = "Not enough sense information\n"; 17677 /* Mark the ssc_flags for detecting invalid sense data */ 17678 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17679 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17680 "sense-data"); 17681 } 17682 goto sense_failed; 17683 } 17684 17685 /* 17686 * We require the extended sense data 17687 */ 17688 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17689 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17690 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17691 static char tmp[8]; 17692 static char buf[148]; 17693 char *p = (char *)(xp->xb_sense_data); 17694 int i; 17695 17696 mutex_enter(&sd_sense_mutex); 17697 (void) strcpy(buf, "undecodable sense information:"); 17698 for (i = 0; i < actual_len; i++) { 17699 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17700 (void) strcpy(&buf[strlen(buf)], tmp); 17701 } 17702 i = strlen(buf); 17703 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17704 17705 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17706 scsi_log(SD_DEVINFO(un), sd_label, 17707 CE_WARN, buf); 17708 } 17709 mutex_exit(&sd_sense_mutex); 17710 } 17711 17712 /* Mark the ssc_flags for detecting invalid sense data */ 17713 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17714 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17715 "sense-data"); 17716 } 17717 17718 /* Note: Legacy behavior, fail the command with no retry */ 17719 sd_return_failed_command(un, bp, EIO); 17720 return (SD_SENSE_DATA_IS_INVALID); 17721 } 17722 17723 /* 17724 * Check that es_code is valid (es_class concatenated with es_code 17725 * make up the "response code" field. es_class will always be 7, so 17726 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17727 * format. 17728 */ 17729 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17730 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17731 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17732 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17733 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17734 /* Mark the ssc_flags for detecting invalid sense data */ 17735 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17736 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17737 "sense-data"); 17738 } 17739 goto sense_failed; 17740 } 17741 17742 return (SD_SENSE_DATA_IS_VALID); 17743 17744 sense_failed: 17745 /* 17746 * If the request sense failed (for whatever reason), attempt 17747 * to retry the original command. 17748 */ 17749 #if defined(__i386) || defined(__amd64) 17750 /* 17751 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17752 * sddef.h for Sparc platform, and x86 uses 1 binary 17753 * for both SCSI/FC. 17754 * The SD_RETRY_DELAY value need to be adjusted here 17755 * when SD_RETRY_DELAY change in sddef.h 17756 */ 17757 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17758 sd_print_sense_failed_msg, msgp, EIO, 17759 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17760 #else 17761 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17762 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17763 #endif 17764 17765 return (SD_SENSE_DATA_IS_INVALID); 17766 } 17767 17768 /* 17769 * Function: sd_decode_sense 17770 * 17771 * Description: Take recovery action(s) when SCSI Sense Data is received. 17772 * 17773 * Context: Interrupt context. 17774 */ 17775 17776 static void 17777 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17778 struct scsi_pkt *pktp) 17779 { 17780 uint8_t sense_key; 17781 17782 ASSERT(un != NULL); 17783 ASSERT(mutex_owned(SD_MUTEX(un))); 17784 ASSERT(bp != NULL); 17785 ASSERT(bp != un->un_rqs_bp); 17786 ASSERT(xp != NULL); 17787 ASSERT(pktp != NULL); 17788 17789 sense_key = scsi_sense_key(xp->xb_sense_data); 17790 17791 switch (sense_key) { 17792 case KEY_NO_SENSE: 17793 sd_sense_key_no_sense(un, bp, xp, pktp); 17794 break; 17795 case KEY_RECOVERABLE_ERROR: 17796 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17797 bp, xp, pktp); 17798 break; 17799 case KEY_NOT_READY: 17800 sd_sense_key_not_ready(un, xp->xb_sense_data, 17801 bp, xp, pktp); 17802 break; 17803 case KEY_MEDIUM_ERROR: 17804 case KEY_HARDWARE_ERROR: 17805 sd_sense_key_medium_or_hardware_error(un, 17806 xp->xb_sense_data, bp, xp, pktp); 17807 break; 17808 case KEY_ILLEGAL_REQUEST: 17809 sd_sense_key_illegal_request(un, bp, xp, pktp); 17810 break; 17811 case KEY_UNIT_ATTENTION: 17812 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17813 bp, xp, pktp); 17814 break; 17815 case KEY_WRITE_PROTECT: 17816 case KEY_VOLUME_OVERFLOW: 17817 case KEY_MISCOMPARE: 17818 sd_sense_key_fail_command(un, bp, xp, pktp); 17819 break; 17820 case KEY_BLANK_CHECK: 17821 sd_sense_key_blank_check(un, bp, xp, pktp); 17822 break; 17823 case KEY_ABORTED_COMMAND: 17824 sd_sense_key_aborted_command(un, bp, xp, pktp); 17825 break; 17826 case KEY_VENDOR_UNIQUE: 17827 case KEY_COPY_ABORTED: 17828 case KEY_EQUAL: 17829 case KEY_RESERVED: 17830 default: 17831 sd_sense_key_default(un, xp->xb_sense_data, 17832 bp, xp, pktp); 17833 break; 17834 } 17835 } 17836 17837 17838 /* 17839 * Function: sd_dump_memory 17840 * 17841 * Description: Debug logging routine to print the contents of a user provided 17842 * buffer. The output of the buffer is broken up into 256 byte 17843 * segments due to a size constraint of the scsi_log. 17844 * implementation. 17845 * 17846 * Arguments: un - ptr to softstate 17847 * comp - component mask 17848 * title - "title" string to preceed data when printed 17849 * data - ptr to data block to be printed 17850 * len - size of data block to be printed 17851 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17852 * 17853 * Context: May be called from interrupt context 17854 */ 17855 17856 #define SD_DUMP_MEMORY_BUF_SIZE 256 17857 17858 static char *sd_dump_format_string[] = { 17859 " 0x%02x", 17860 " %c" 17861 }; 17862 17863 static void 17864 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17865 int len, int fmt) 17866 { 17867 int i, j; 17868 int avail_count; 17869 int start_offset; 17870 int end_offset; 17871 size_t entry_len; 17872 char *bufp; 17873 char *local_buf; 17874 char *format_string; 17875 17876 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17877 17878 /* 17879 * In the debug version of the driver, this function is called from a 17880 * number of places which are NOPs in the release driver. 17881 * The debug driver therefore has additional methods of filtering 17882 * debug output. 17883 */ 17884 #ifdef SDDEBUG 17885 /* 17886 * In the debug version of the driver we can reduce the amount of debug 17887 * messages by setting sd_error_level to something other than 17888 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17889 * sd_component_mask. 17890 */ 17891 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17892 (sd_error_level != SCSI_ERR_ALL)) { 17893 return; 17894 } 17895 if (((sd_component_mask & comp) == 0) || 17896 (sd_error_level != SCSI_ERR_ALL)) { 17897 return; 17898 } 17899 #else 17900 if (sd_error_level != SCSI_ERR_ALL) { 17901 return; 17902 } 17903 #endif 17904 17905 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17906 bufp = local_buf; 17907 /* 17908 * Available length is the length of local_buf[], minus the 17909 * length of the title string, minus one for the ":", minus 17910 * one for the newline, minus one for the NULL terminator. 17911 * This gives the #bytes available for holding the printed 17912 * values from the given data buffer. 17913 */ 17914 if (fmt == SD_LOG_HEX) { 17915 format_string = sd_dump_format_string[0]; 17916 } else /* SD_LOG_CHAR */ { 17917 format_string = sd_dump_format_string[1]; 17918 } 17919 /* 17920 * Available count is the number of elements from the given 17921 * data buffer that we can fit into the available length. 17922 * This is based upon the size of the format string used. 17923 * Make one entry and find it's size. 17924 */ 17925 (void) sprintf(bufp, format_string, data[0]); 17926 entry_len = strlen(bufp); 17927 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17928 17929 j = 0; 17930 while (j < len) { 17931 bufp = local_buf; 17932 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17933 start_offset = j; 17934 17935 end_offset = start_offset + avail_count; 17936 17937 (void) sprintf(bufp, "%s:", title); 17938 bufp += strlen(bufp); 17939 for (i = start_offset; ((i < end_offset) && (j < len)); 17940 i++, j++) { 17941 (void) sprintf(bufp, format_string, data[i]); 17942 bufp += entry_len; 17943 } 17944 (void) sprintf(bufp, "\n"); 17945 17946 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17947 } 17948 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17949 } 17950 17951 /* 17952 * Function: sd_print_sense_msg 17953 * 17954 * Description: Log a message based upon the given sense data. 17955 * 17956 * Arguments: un - ptr to associated softstate 17957 * bp - ptr to buf(9S) for the command 17958 * arg - ptr to associate sd_sense_info struct 17959 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17960 * or SD_NO_RETRY_ISSUED 17961 * 17962 * Context: May be called from interrupt context 17963 */ 17964 17965 static void 17966 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17967 { 17968 struct sd_xbuf *xp; 17969 struct scsi_pkt *pktp; 17970 uint8_t *sensep; 17971 daddr_t request_blkno; 17972 diskaddr_t err_blkno; 17973 int severity; 17974 int pfa_flag; 17975 extern struct scsi_key_strings scsi_cmds[]; 17976 17977 ASSERT(un != NULL); 17978 ASSERT(mutex_owned(SD_MUTEX(un))); 17979 ASSERT(bp != NULL); 17980 xp = SD_GET_XBUF(bp); 17981 ASSERT(xp != NULL); 17982 pktp = SD_GET_PKTP(bp); 17983 ASSERT(pktp != NULL); 17984 ASSERT(arg != NULL); 17985 17986 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17987 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17988 17989 if ((code == SD_DELAYED_RETRY_ISSUED) || 17990 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17991 severity = SCSI_ERR_RETRYABLE; 17992 } 17993 17994 /* Use absolute block number for the request block number */ 17995 request_blkno = xp->xb_blkno; 17996 17997 /* 17998 * Now try to get the error block number from the sense data 17999 */ 18000 sensep = xp->xb_sense_data; 18001 18002 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 18003 (uint64_t *)&err_blkno)) { 18004 /* 18005 * We retrieved the error block number from the information 18006 * portion of the sense data. 18007 * 18008 * For USCSI commands we are better off using the error 18009 * block no. as the requested block no. (This is the best 18010 * we can estimate.) 18011 */ 18012 if ((SD_IS_BUFIO(xp) == FALSE) && 18013 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 18014 request_blkno = err_blkno; 18015 } 18016 } else { 18017 /* 18018 * Without the es_valid bit set (for fixed format) or an 18019 * information descriptor (for descriptor format) we cannot 18020 * be certain of the error blkno, so just use the 18021 * request_blkno. 18022 */ 18023 err_blkno = (diskaddr_t)request_blkno; 18024 } 18025 18026 /* 18027 * The following will log the buffer contents for the release driver 18028 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 18029 * level is set to verbose. 18030 */ 18031 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 18032 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 18033 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 18034 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 18035 18036 if (pfa_flag == FALSE) { 18037 /* This is normally only set for USCSI */ 18038 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 18039 return; 18040 } 18041 18042 if ((SD_IS_BUFIO(xp) == TRUE) && 18043 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 18044 (severity < sd_error_level))) { 18045 return; 18046 } 18047 } 18048 /* 18049 * Check for Sonoma Failover and keep a count of how many failed I/O's 18050 */ 18051 if ((SD_IS_LSI(un)) && 18052 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 18053 (scsi_sense_asc(sensep) == 0x94) && 18054 (scsi_sense_ascq(sensep) == 0x01)) { 18055 un->un_sonoma_failure_count++; 18056 if (un->un_sonoma_failure_count > 1) { 18057 return; 18058 } 18059 } 18060 18061 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 18062 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 18063 (pktp->pkt_resid == 0))) { 18064 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 18065 request_blkno, err_blkno, scsi_cmds, 18066 (struct scsi_extended_sense *)sensep, 18067 un->un_additional_codes, NULL); 18068 } 18069 } 18070 18071 /* 18072 * Function: sd_sense_key_no_sense 18073 * 18074 * Description: Recovery action when sense data was not received. 18075 * 18076 * Context: May be called from interrupt context 18077 */ 18078 18079 static void 18080 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18081 struct scsi_pkt *pktp) 18082 { 18083 struct sd_sense_info si; 18084 18085 ASSERT(un != NULL); 18086 ASSERT(mutex_owned(SD_MUTEX(un))); 18087 ASSERT(bp != NULL); 18088 ASSERT(xp != NULL); 18089 ASSERT(pktp != NULL); 18090 18091 si.ssi_severity = SCSI_ERR_FATAL; 18092 si.ssi_pfa_flag = FALSE; 18093 18094 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18095 18096 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18097 &si, EIO, (clock_t)0, NULL); 18098 } 18099 18100 18101 /* 18102 * Function: sd_sense_key_recoverable_error 18103 * 18104 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 18105 * 18106 * Context: May be called from interrupt context 18107 */ 18108 18109 static void 18110 sd_sense_key_recoverable_error(struct sd_lun *un, uint8_t *sense_datap, 18111 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18112 { 18113 struct sd_sense_info si; 18114 uint8_t asc = scsi_sense_asc(sense_datap); 18115 uint8_t ascq = scsi_sense_ascq(sense_datap); 18116 18117 ASSERT(un != NULL); 18118 ASSERT(mutex_owned(SD_MUTEX(un))); 18119 ASSERT(bp != NULL); 18120 ASSERT(xp != NULL); 18121 ASSERT(pktp != NULL); 18122 18123 /* 18124 * 0x00, 0x1D: ATA PASSTHROUGH INFORMATION AVAILABLE 18125 */ 18126 if (asc == 0x00 && ascq == 0x1D) { 18127 sd_return_command(un, bp); 18128 return; 18129 } 18130 18131 /* 18132 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 18133 */ 18134 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 18135 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18136 si.ssi_severity = SCSI_ERR_INFO; 18137 si.ssi_pfa_flag = TRUE; 18138 } else { 18139 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18140 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 18141 si.ssi_severity = SCSI_ERR_RECOVERED; 18142 si.ssi_pfa_flag = FALSE; 18143 } 18144 18145 if (pktp->pkt_resid == 0) { 18146 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18147 sd_return_command(un, bp); 18148 return; 18149 } 18150 18151 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18152 &si, EIO, (clock_t)0, NULL); 18153 } 18154 18155 18156 18157 18158 /* 18159 * Function: sd_sense_key_not_ready 18160 * 18161 * Description: Recovery actions for a SCSI "Not Ready" sense key. 18162 * 18163 * Context: May be called from interrupt context 18164 */ 18165 18166 static void 18167 sd_sense_key_not_ready(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp, 18168 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18169 { 18170 struct sd_sense_info si; 18171 uint8_t asc = scsi_sense_asc(sense_datap); 18172 uint8_t ascq = scsi_sense_ascq(sense_datap); 18173 18174 ASSERT(un != NULL); 18175 ASSERT(mutex_owned(SD_MUTEX(un))); 18176 ASSERT(bp != NULL); 18177 ASSERT(xp != NULL); 18178 ASSERT(pktp != NULL); 18179 18180 si.ssi_severity = SCSI_ERR_FATAL; 18181 si.ssi_pfa_flag = FALSE; 18182 18183 /* 18184 * Update error stats after first NOT READY error. Disks may have 18185 * been powered down and may need to be restarted. For CDROMs, 18186 * report NOT READY errors only if media is present. 18187 */ 18188 if ((ISCD(un) && (asc == 0x3A)) || 18189 (xp->xb_nr_retry_count > 0)) { 18190 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18191 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 18192 } 18193 18194 /* 18195 * Just fail if the "not ready" retry limit has been reached. 18196 */ 18197 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 18198 /* Special check for error message printing for removables. */ 18199 if (un->un_f_has_removable_media && (asc == 0x04) && 18200 (ascq >= 0x04)) { 18201 si.ssi_severity = SCSI_ERR_ALL; 18202 } 18203 goto fail_command; 18204 } 18205 18206 /* 18207 * Check the ASC and ASCQ in the sense data as needed, to determine 18208 * what to do. 18209 */ 18210 switch (asc) { 18211 case 0x04: /* LOGICAL UNIT NOT READY */ 18212 /* 18213 * disk drives that don't spin up result in a very long delay 18214 * in format without warning messages. We will log a message 18215 * if the error level is set to verbose. 18216 */ 18217 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18218 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18219 "logical unit not ready, resetting disk\n"); 18220 } 18221 18222 /* 18223 * There are different requirements for CDROMs and disks for 18224 * the number of retries. If a CD-ROM is giving this, it is 18225 * probably reading TOC and is in the process of getting 18226 * ready, so we should keep on trying for a long time to make 18227 * sure that all types of media are taken in account (for 18228 * some media the drive takes a long time to read TOC). For 18229 * disks we do not want to retry this too many times as this 18230 * can cause a long hang in format when the drive refuses to 18231 * spin up (a very common failure). 18232 */ 18233 switch (ascq) { 18234 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 18235 /* 18236 * Disk drives frequently refuse to spin up which 18237 * results in a very long hang in format without 18238 * warning messages. 18239 * 18240 * Note: This code preserves the legacy behavior of 18241 * comparing xb_nr_retry_count against zero for fibre 18242 * channel targets instead of comparing against the 18243 * un_reset_retry_count value. The reason for this 18244 * discrepancy has been so utterly lost beneath the 18245 * Sands of Time that even Indiana Jones could not 18246 * find it. 18247 */ 18248 if (un->un_f_is_fibre == TRUE) { 18249 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18250 (xp->xb_nr_retry_count > 0)) && 18251 (un->un_startstop_timeid == NULL)) { 18252 scsi_log(SD_DEVINFO(un), sd_label, 18253 CE_WARN, "logical unit not ready, " 18254 "resetting disk\n"); 18255 sd_reset_target(un, pktp); 18256 } 18257 } else { 18258 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18259 (xp->xb_nr_retry_count > 18260 un->un_reset_retry_count)) && 18261 (un->un_startstop_timeid == NULL)) { 18262 scsi_log(SD_DEVINFO(un), sd_label, 18263 CE_WARN, "logical unit not ready, " 18264 "resetting disk\n"); 18265 sd_reset_target(un, pktp); 18266 } 18267 } 18268 break; 18269 18270 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 18271 /* 18272 * If the target is in the process of becoming 18273 * ready, just proceed with the retry. This can 18274 * happen with CD-ROMs that take a long time to 18275 * read TOC after a power cycle or reset. 18276 */ 18277 goto do_retry; 18278 18279 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 18280 break; 18281 18282 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 18283 /* 18284 * Retries cannot help here so just fail right away. 18285 */ 18286 goto fail_command; 18287 18288 case 0x88: 18289 /* 18290 * Vendor-unique code for T3/T4: it indicates a 18291 * path problem in a mutipathed config, but as far as 18292 * the target driver is concerned it equates to a fatal 18293 * error, so we should just fail the command right away 18294 * (without printing anything to the console). If this 18295 * is not a T3/T4, fall thru to the default recovery 18296 * action. 18297 * T3/T4 is FC only, don't need to check is_fibre 18298 */ 18299 if (SD_IS_T3(un) || SD_IS_T4(un)) { 18300 sd_return_failed_command(un, bp, EIO); 18301 return; 18302 } 18303 /* FALLTHRU */ 18304 18305 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 18306 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 18307 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 18308 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 18309 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 18310 default: /* Possible future codes in SCSI spec? */ 18311 /* 18312 * For removable-media devices, do not retry if 18313 * ASCQ > 2 as these result mostly from USCSI commands 18314 * on MMC devices issued to check status of an 18315 * operation initiated in immediate mode. Also for 18316 * ASCQ >= 4 do not print console messages as these 18317 * mainly represent a user-initiated operation 18318 * instead of a system failure. 18319 */ 18320 if (un->un_f_has_removable_media) { 18321 si.ssi_severity = SCSI_ERR_ALL; 18322 goto fail_command; 18323 } 18324 break; 18325 } 18326 18327 /* 18328 * As part of our recovery attempt for the NOT READY 18329 * condition, we issue a START STOP UNIT command. However 18330 * we want to wait for a short delay before attempting this 18331 * as there may still be more commands coming back from the 18332 * target with the check condition. To do this we use 18333 * timeout(9F) to call sd_start_stop_unit_callback() after 18334 * the delay interval expires. (sd_start_stop_unit_callback() 18335 * dispatches sd_start_stop_unit_task(), which will issue 18336 * the actual START STOP UNIT command. The delay interval 18337 * is one-half of the delay that we will use to retry the 18338 * command that generated the NOT READY condition. 18339 * 18340 * Note that we could just dispatch sd_start_stop_unit_task() 18341 * from here and allow it to sleep for the delay interval, 18342 * but then we would be tying up the taskq thread 18343 * uncesessarily for the duration of the delay. 18344 * 18345 * Do not issue the START STOP UNIT if the current command 18346 * is already a START STOP UNIT. 18347 */ 18348 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18349 break; 18350 } 18351 18352 /* 18353 * Do not schedule the timeout if one is already pending. 18354 */ 18355 if (un->un_startstop_timeid != NULL) { 18356 SD_INFO(SD_LOG_ERROR, un, 18357 "sd_sense_key_not_ready: restart already issued to" 18358 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18359 ddi_get_instance(SD_DEVINFO(un))); 18360 break; 18361 } 18362 18363 /* 18364 * Schedule the START STOP UNIT command, then queue the command 18365 * for a retry. 18366 * 18367 * Note: A timeout is not scheduled for this retry because we 18368 * want the retry to be serial with the START_STOP_UNIT. The 18369 * retry will be started when the START_STOP_UNIT is completed 18370 * in sd_start_stop_unit_task. 18371 */ 18372 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18373 un, un->un_busy_timeout / 2); 18374 xp->xb_nr_retry_count++; 18375 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18376 return; 18377 18378 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18379 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18380 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18381 "unit does not respond to selection\n"); 18382 } 18383 break; 18384 18385 case 0x3A: /* MEDIUM NOT PRESENT */ 18386 if (sd_error_level >= SCSI_ERR_FATAL) { 18387 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18388 "Caddy not inserted in drive\n"); 18389 } 18390 18391 sr_ejected(un); 18392 un->un_mediastate = DKIO_EJECTED; 18393 /* The state has changed, inform the media watch routines */ 18394 cv_broadcast(&un->un_state_cv); 18395 /* Just fail if no media is present in the drive. */ 18396 goto fail_command; 18397 18398 default: 18399 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18400 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18401 "Unit not Ready. Additional sense code 0x%x\n", 18402 asc); 18403 } 18404 break; 18405 } 18406 18407 do_retry: 18408 18409 /* 18410 * Retry the command, as some targets may report NOT READY for 18411 * several seconds after being reset. 18412 */ 18413 xp->xb_nr_retry_count++; 18414 si.ssi_severity = SCSI_ERR_RETRYABLE; 18415 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18416 &si, EIO, un->un_busy_timeout, NULL); 18417 18418 return; 18419 18420 fail_command: 18421 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18422 sd_return_failed_command(un, bp, EIO); 18423 } 18424 18425 18426 18427 /* 18428 * Function: sd_sense_key_medium_or_hardware_error 18429 * 18430 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18431 * sense key. 18432 * 18433 * Context: May be called from interrupt context 18434 */ 18435 18436 static void 18437 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, uint8_t *sense_datap, 18438 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18439 { 18440 struct sd_sense_info si; 18441 uint8_t sense_key = scsi_sense_key(sense_datap); 18442 uint8_t asc = scsi_sense_asc(sense_datap); 18443 18444 ASSERT(un != NULL); 18445 ASSERT(mutex_owned(SD_MUTEX(un))); 18446 ASSERT(bp != NULL); 18447 ASSERT(xp != NULL); 18448 ASSERT(pktp != NULL); 18449 18450 si.ssi_severity = SCSI_ERR_FATAL; 18451 si.ssi_pfa_flag = FALSE; 18452 18453 if (sense_key == KEY_MEDIUM_ERROR) { 18454 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18455 } 18456 18457 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18458 18459 if ((un->un_reset_retry_count != 0) && 18460 (xp->xb_retry_count == un->un_reset_retry_count)) { 18461 mutex_exit(SD_MUTEX(un)); 18462 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18463 if (un->un_f_allow_bus_device_reset == TRUE) { 18464 18465 boolean_t try_resetting_target = B_TRUE; 18466 18467 /* 18468 * We need to be able to handle specific ASC when we are 18469 * handling a KEY_HARDWARE_ERROR. In particular 18470 * taking the default action of resetting the target may 18471 * not be the appropriate way to attempt recovery. 18472 * Resetting a target because of a single LUN failure 18473 * victimizes all LUNs on that target. 18474 * 18475 * This is true for the LSI arrays, if an LSI 18476 * array controller returns an ASC of 0x84 (LUN Dead) we 18477 * should trust it. 18478 */ 18479 18480 if (sense_key == KEY_HARDWARE_ERROR) { 18481 switch (asc) { 18482 case 0x84: 18483 if (SD_IS_LSI(un)) { 18484 try_resetting_target = B_FALSE; 18485 } 18486 break; 18487 default: 18488 break; 18489 } 18490 } 18491 18492 if (try_resetting_target == B_TRUE) { 18493 int reset_retval = 0; 18494 if (un->un_f_lun_reset_enabled == TRUE) { 18495 SD_TRACE(SD_LOG_IO_CORE, un, 18496 "sd_sense_key_medium_or_hardware_" 18497 "error: issuing RESET_LUN\n"); 18498 reset_retval = 18499 scsi_reset(SD_ADDRESS(un), 18500 RESET_LUN); 18501 } 18502 if (reset_retval == 0) { 18503 SD_TRACE(SD_LOG_IO_CORE, un, 18504 "sd_sense_key_medium_or_hardware_" 18505 "error: issuing RESET_TARGET\n"); 18506 (void) scsi_reset(SD_ADDRESS(un), 18507 RESET_TARGET); 18508 } 18509 } 18510 } 18511 mutex_enter(SD_MUTEX(un)); 18512 } 18513 18514 /* 18515 * This really ought to be a fatal error, but we will retry anyway 18516 * as some drives report this as a spurious error. 18517 */ 18518 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18519 &si, EIO, (clock_t)0, NULL); 18520 } 18521 18522 18523 18524 /* 18525 * Function: sd_sense_key_illegal_request 18526 * 18527 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18528 * 18529 * Context: May be called from interrupt context 18530 */ 18531 18532 static void 18533 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18534 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18535 { 18536 struct sd_sense_info si; 18537 18538 ASSERT(un != NULL); 18539 ASSERT(mutex_owned(SD_MUTEX(un))); 18540 ASSERT(bp != NULL); 18541 ASSERT(xp != NULL); 18542 ASSERT(pktp != NULL); 18543 18544 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18545 18546 si.ssi_severity = SCSI_ERR_INFO; 18547 si.ssi_pfa_flag = FALSE; 18548 18549 /* Pointless to retry if the target thinks it's an illegal request */ 18550 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18551 sd_return_failed_command(un, bp, EIO); 18552 } 18553 18554 18555 18556 18557 /* 18558 * Function: sd_sense_key_unit_attention 18559 * 18560 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18561 * 18562 * Context: May be called from interrupt context 18563 */ 18564 18565 static void 18566 sd_sense_key_unit_attention(struct sd_lun *un, uint8_t *sense_datap, 18567 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18568 { 18569 /* 18570 * For UNIT ATTENTION we allow retries for one minute. Devices 18571 * like Sonoma can return UNIT ATTENTION close to a minute 18572 * under certain conditions. 18573 */ 18574 int retry_check_flag = SD_RETRIES_UA; 18575 boolean_t kstat_updated = B_FALSE; 18576 struct sd_sense_info si; 18577 uint8_t asc = scsi_sense_asc(sense_datap); 18578 uint8_t ascq = scsi_sense_ascq(sense_datap); 18579 18580 ASSERT(un != NULL); 18581 ASSERT(mutex_owned(SD_MUTEX(un))); 18582 ASSERT(bp != NULL); 18583 ASSERT(xp != NULL); 18584 ASSERT(pktp != NULL); 18585 18586 si.ssi_severity = SCSI_ERR_INFO; 18587 si.ssi_pfa_flag = FALSE; 18588 18589 18590 switch (asc) { 18591 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18592 if (sd_report_pfa != 0) { 18593 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18594 si.ssi_pfa_flag = TRUE; 18595 retry_check_flag = SD_RETRIES_STANDARD; 18596 goto do_retry; 18597 } 18598 18599 break; 18600 18601 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18602 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18603 un->un_resvd_status |= 18604 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18605 } 18606 #ifdef _LP64 18607 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18608 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18609 un, KM_NOSLEEP) == 0) { 18610 /* 18611 * If we can't dispatch the task we'll just 18612 * live without descriptor sense. We can 18613 * try again on the next "unit attention" 18614 */ 18615 SD_ERROR(SD_LOG_ERROR, un, 18616 "sd_sense_key_unit_attention: " 18617 "Could not dispatch " 18618 "sd_reenable_dsense_task\n"); 18619 } 18620 } 18621 #endif /* _LP64 */ 18622 /* FALLTHRU */ 18623 18624 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18625 if (!un->un_f_has_removable_media) { 18626 break; 18627 } 18628 18629 /* 18630 * When we get a unit attention from a removable-media device, 18631 * it may be in a state that will take a long time to recover 18632 * (e.g., from a reset). Since we are executing in interrupt 18633 * context here, we cannot wait around for the device to come 18634 * back. So hand this command off to sd_media_change_task() 18635 * for deferred processing under taskq thread context. (Note 18636 * that the command still may be failed if a problem is 18637 * encountered at a later time.) 18638 */ 18639 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18640 KM_NOSLEEP) == 0) { 18641 /* 18642 * Cannot dispatch the request so fail the command. 18643 */ 18644 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18645 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18646 si.ssi_severity = SCSI_ERR_FATAL; 18647 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18648 sd_return_failed_command(un, bp, EIO); 18649 } 18650 18651 /* 18652 * If failed to dispatch sd_media_change_task(), we already 18653 * updated kstat. If succeed to dispatch sd_media_change_task(), 18654 * we should update kstat later if it encounters an error. So, 18655 * we update kstat_updated flag here. 18656 */ 18657 kstat_updated = B_TRUE; 18658 18659 /* 18660 * Either the command has been successfully dispatched to a 18661 * task Q for retrying, or the dispatch failed. In either case 18662 * do NOT retry again by calling sd_retry_command. This sets up 18663 * two retries of the same command and when one completes and 18664 * frees the resources the other will access freed memory, 18665 * a bad thing. 18666 */ 18667 return; 18668 18669 default: 18670 break; 18671 } 18672 18673 /* 18674 * ASC ASCQ 18675 * 2A 09 Capacity data has changed 18676 * 2A 01 Mode parameters changed 18677 * 3F 0E Reported luns data has changed 18678 * Arrays that support logical unit expansion should report 18679 * capacity changes(2Ah/09). Mode parameters changed and 18680 * reported luns data has changed are the approximation. 18681 */ 18682 if (((asc == 0x2a) && (ascq == 0x09)) || 18683 ((asc == 0x2a) && (ascq == 0x01)) || 18684 ((asc == 0x3f) && (ascq == 0x0e))) { 18685 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18686 KM_NOSLEEP) == 0) { 18687 SD_ERROR(SD_LOG_ERROR, un, 18688 "sd_sense_key_unit_attention: " 18689 "Could not dispatch sd_target_change_task\n"); 18690 } 18691 } 18692 18693 /* 18694 * Update kstat if we haven't done that. 18695 */ 18696 if (!kstat_updated) { 18697 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18698 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18699 } 18700 18701 do_retry: 18702 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18703 EIO, SD_UA_RETRY_DELAY, NULL); 18704 } 18705 18706 18707 18708 /* 18709 * Function: sd_sense_key_fail_command 18710 * 18711 * Description: Use to fail a command when we don't like the sense key that 18712 * was returned. 18713 * 18714 * Context: May be called from interrupt context 18715 */ 18716 18717 static void 18718 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18719 struct scsi_pkt *pktp) 18720 { 18721 struct sd_sense_info si; 18722 18723 ASSERT(un != NULL); 18724 ASSERT(mutex_owned(SD_MUTEX(un))); 18725 ASSERT(bp != NULL); 18726 ASSERT(xp != NULL); 18727 ASSERT(pktp != NULL); 18728 18729 si.ssi_severity = SCSI_ERR_FATAL; 18730 si.ssi_pfa_flag = FALSE; 18731 18732 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18733 sd_return_failed_command(un, bp, EIO); 18734 } 18735 18736 18737 18738 /* 18739 * Function: sd_sense_key_blank_check 18740 * 18741 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18742 * Has no monetary connotation. 18743 * 18744 * Context: May be called from interrupt context 18745 */ 18746 18747 static void 18748 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18749 struct scsi_pkt *pktp) 18750 { 18751 struct sd_sense_info si; 18752 18753 ASSERT(un != NULL); 18754 ASSERT(mutex_owned(SD_MUTEX(un))); 18755 ASSERT(bp != NULL); 18756 ASSERT(xp != NULL); 18757 ASSERT(pktp != NULL); 18758 18759 /* 18760 * Blank check is not fatal for removable devices, therefore 18761 * it does not require a console message. 18762 */ 18763 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18764 SCSI_ERR_FATAL; 18765 si.ssi_pfa_flag = FALSE; 18766 18767 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18768 sd_return_failed_command(un, bp, EIO); 18769 } 18770 18771 18772 18773 18774 /* 18775 * Function: sd_sense_key_aborted_command 18776 * 18777 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18778 * 18779 * Context: May be called from interrupt context 18780 */ 18781 18782 static void 18783 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18784 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18785 { 18786 struct sd_sense_info si; 18787 18788 ASSERT(un != NULL); 18789 ASSERT(mutex_owned(SD_MUTEX(un))); 18790 ASSERT(bp != NULL); 18791 ASSERT(xp != NULL); 18792 ASSERT(pktp != NULL); 18793 18794 si.ssi_severity = SCSI_ERR_FATAL; 18795 si.ssi_pfa_flag = FALSE; 18796 18797 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18798 18799 /* 18800 * This really ought to be a fatal error, but we will retry anyway 18801 * as some drives report this as a spurious error. 18802 */ 18803 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18804 &si, EIO, drv_usectohz(100000), NULL); 18805 } 18806 18807 18808 18809 /* 18810 * Function: sd_sense_key_default 18811 * 18812 * Description: Default recovery action for several SCSI sense keys (basically 18813 * attempts a retry). 18814 * 18815 * Context: May be called from interrupt context 18816 */ 18817 18818 static void 18819 sd_sense_key_default(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp, 18820 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18821 { 18822 struct sd_sense_info si; 18823 uint8_t sense_key = scsi_sense_key(sense_datap); 18824 18825 ASSERT(un != NULL); 18826 ASSERT(mutex_owned(SD_MUTEX(un))); 18827 ASSERT(bp != NULL); 18828 ASSERT(xp != NULL); 18829 ASSERT(pktp != NULL); 18830 18831 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18832 18833 /* 18834 * Undecoded sense key. Attempt retries and hope that will fix 18835 * the problem. Otherwise, we're dead. 18836 */ 18837 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18838 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18839 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18840 } 18841 18842 si.ssi_severity = SCSI_ERR_FATAL; 18843 si.ssi_pfa_flag = FALSE; 18844 18845 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18846 &si, EIO, (clock_t)0, NULL); 18847 } 18848 18849 18850 18851 /* 18852 * Function: sd_print_retry_msg 18853 * 18854 * Description: Print a message indicating the retry action being taken. 18855 * 18856 * Arguments: un - ptr to associated softstate 18857 * bp - ptr to buf(9S) for the command 18858 * arg - not used. 18859 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18860 * or SD_NO_RETRY_ISSUED 18861 * 18862 * Context: May be called from interrupt context 18863 */ 18864 /* ARGSUSED */ 18865 static void 18866 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18867 { 18868 struct sd_xbuf *xp; 18869 struct scsi_pkt *pktp; 18870 char *reasonp; 18871 char *msgp; 18872 18873 ASSERT(un != NULL); 18874 ASSERT(mutex_owned(SD_MUTEX(un))); 18875 ASSERT(bp != NULL); 18876 pktp = SD_GET_PKTP(bp); 18877 ASSERT(pktp != NULL); 18878 xp = SD_GET_XBUF(bp); 18879 ASSERT(xp != NULL); 18880 18881 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18882 mutex_enter(&un->un_pm_mutex); 18883 if ((un->un_state == SD_STATE_SUSPENDED) || 18884 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18885 (pktp->pkt_flags & FLAG_SILENT)) { 18886 mutex_exit(&un->un_pm_mutex); 18887 goto update_pkt_reason; 18888 } 18889 mutex_exit(&un->un_pm_mutex); 18890 18891 /* 18892 * Suppress messages if they are all the same pkt_reason; with 18893 * TQ, many (up to 256) are returned with the same pkt_reason. 18894 * If we are in panic, then suppress the retry messages. 18895 */ 18896 switch (flag) { 18897 case SD_NO_RETRY_ISSUED: 18898 msgp = "giving up"; 18899 break; 18900 case SD_IMMEDIATE_RETRY_ISSUED: 18901 case SD_DELAYED_RETRY_ISSUED: 18902 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18903 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18904 (sd_error_level != SCSI_ERR_ALL))) { 18905 return; 18906 } 18907 msgp = "retrying command"; 18908 break; 18909 default: 18910 goto update_pkt_reason; 18911 } 18912 18913 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18914 scsi_rname(pktp->pkt_reason)); 18915 18916 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18917 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18918 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18919 } 18920 18921 update_pkt_reason: 18922 /* 18923 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18924 * This is to prevent multiple console messages for the same failure 18925 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18926 * when the command is retried successfully because there still may be 18927 * more commands coming back with the same value of pktp->pkt_reason. 18928 */ 18929 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18930 un->un_last_pkt_reason = pktp->pkt_reason; 18931 } 18932 } 18933 18934 18935 /* 18936 * Function: sd_print_cmd_incomplete_msg 18937 * 18938 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18939 * 18940 * Arguments: un - ptr to associated softstate 18941 * bp - ptr to buf(9S) for the command 18942 * arg - passed to sd_print_retry_msg() 18943 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18944 * or SD_NO_RETRY_ISSUED 18945 * 18946 * Context: May be called from interrupt context 18947 */ 18948 18949 static void 18950 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18951 int code) 18952 { 18953 dev_info_t *dip; 18954 18955 ASSERT(un != NULL); 18956 ASSERT(mutex_owned(SD_MUTEX(un))); 18957 ASSERT(bp != NULL); 18958 18959 switch (code) { 18960 case SD_NO_RETRY_ISSUED: 18961 /* Command was failed. Someone turned off this target? */ 18962 if (un->un_state != SD_STATE_OFFLINE) { 18963 /* 18964 * Suppress message if we are detaching and 18965 * device has been disconnected 18966 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18967 * private interface and not part of the DDI 18968 */ 18969 dip = un->un_sd->sd_dev; 18970 if (!(DEVI_IS_DETACHING(dip) && 18971 DEVI_IS_DEVICE_REMOVED(dip))) { 18972 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18973 "disk not responding to selection\n"); 18974 } 18975 New_state(un, SD_STATE_OFFLINE); 18976 } 18977 break; 18978 18979 case SD_DELAYED_RETRY_ISSUED: 18980 case SD_IMMEDIATE_RETRY_ISSUED: 18981 default: 18982 /* Command was successfully queued for retry */ 18983 sd_print_retry_msg(un, bp, arg, code); 18984 break; 18985 } 18986 } 18987 18988 18989 /* 18990 * Function: sd_pkt_reason_cmd_incomplete 18991 * 18992 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18993 * 18994 * Context: May be called from interrupt context 18995 */ 18996 18997 static void 18998 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18999 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19000 { 19001 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 19002 19003 ASSERT(un != NULL); 19004 ASSERT(mutex_owned(SD_MUTEX(un))); 19005 ASSERT(bp != NULL); 19006 ASSERT(xp != NULL); 19007 ASSERT(pktp != NULL); 19008 19009 /* Do not do a reset if selection did not complete */ 19010 /* Note: Should this not just check the bit? */ 19011 if (pktp->pkt_state != STATE_GOT_BUS) { 19012 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19013 sd_reset_target(un, pktp); 19014 } 19015 19016 /* 19017 * If the target was not successfully selected, then set 19018 * SD_RETRIES_FAILFAST to indicate that we lost communication 19019 * with the target, and further retries and/or commands are 19020 * likely to take a long time. 19021 */ 19022 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 19023 flag |= SD_RETRIES_FAILFAST; 19024 } 19025 19026 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19027 19028 sd_retry_command(un, bp, flag, 19029 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19030 } 19031 19032 19033 19034 /* 19035 * Function: sd_pkt_reason_cmd_tran_err 19036 * 19037 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 19038 * 19039 * Context: May be called from interrupt context 19040 */ 19041 19042 static void 19043 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 19044 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19045 { 19046 ASSERT(un != NULL); 19047 ASSERT(mutex_owned(SD_MUTEX(un))); 19048 ASSERT(bp != NULL); 19049 ASSERT(xp != NULL); 19050 ASSERT(pktp != NULL); 19051 19052 /* 19053 * Do not reset if we got a parity error, or if 19054 * selection did not complete. 19055 */ 19056 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19057 /* Note: Should this not just check the bit for pkt_state? */ 19058 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 19059 (pktp->pkt_state != STATE_GOT_BUS)) { 19060 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19061 sd_reset_target(un, pktp); 19062 } 19063 19064 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19065 19066 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19067 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19068 } 19069 19070 19071 19072 /* 19073 * Function: sd_pkt_reason_cmd_reset 19074 * 19075 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 19076 * 19077 * Context: May be called from interrupt context 19078 */ 19079 19080 static void 19081 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19082 struct scsi_pkt *pktp) 19083 { 19084 ASSERT(un != NULL); 19085 ASSERT(mutex_owned(SD_MUTEX(un))); 19086 ASSERT(bp != NULL); 19087 ASSERT(xp != NULL); 19088 ASSERT(pktp != NULL); 19089 19090 /* The target may still be running the command, so try to reset. */ 19091 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19092 sd_reset_target(un, pktp); 19093 19094 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19095 19096 /* 19097 * If pkt_reason is CMD_RESET chances are that this pkt got 19098 * reset because another target on this bus caused it. The target 19099 * that caused it should get CMD_TIMEOUT with pkt_statistics 19100 * of STAT_TIMEOUT/STAT_DEV_RESET. 19101 */ 19102 19103 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19104 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19105 } 19106 19107 19108 19109 19110 /* 19111 * Function: sd_pkt_reason_cmd_aborted 19112 * 19113 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 19114 * 19115 * Context: May be called from interrupt context 19116 */ 19117 19118 static void 19119 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19120 struct scsi_pkt *pktp) 19121 { 19122 ASSERT(un != NULL); 19123 ASSERT(mutex_owned(SD_MUTEX(un))); 19124 ASSERT(bp != NULL); 19125 ASSERT(xp != NULL); 19126 ASSERT(pktp != NULL); 19127 19128 /* The target may still be running the command, so try to reset. */ 19129 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19130 sd_reset_target(un, pktp); 19131 19132 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19133 19134 /* 19135 * If pkt_reason is CMD_ABORTED chances are that this pkt got 19136 * aborted because another target on this bus caused it. The target 19137 * that caused it should get CMD_TIMEOUT with pkt_statistics 19138 * of STAT_TIMEOUT/STAT_DEV_RESET. 19139 */ 19140 19141 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19142 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19143 } 19144 19145 19146 19147 /* 19148 * Function: sd_pkt_reason_cmd_timeout 19149 * 19150 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 19151 * 19152 * Context: May be called from interrupt context 19153 */ 19154 19155 static void 19156 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19157 struct scsi_pkt *pktp) 19158 { 19159 ASSERT(un != NULL); 19160 ASSERT(mutex_owned(SD_MUTEX(un))); 19161 ASSERT(bp != NULL); 19162 ASSERT(xp != NULL); 19163 ASSERT(pktp != NULL); 19164 19165 19166 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19167 sd_reset_target(un, pktp); 19168 19169 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19170 19171 /* 19172 * A command timeout indicates that we could not establish 19173 * communication with the target, so set SD_RETRIES_FAILFAST 19174 * as further retries/commands are likely to take a long time. 19175 */ 19176 sd_retry_command(un, bp, 19177 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 19178 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19179 } 19180 19181 19182 19183 /* 19184 * Function: sd_pkt_reason_cmd_unx_bus_free 19185 * 19186 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 19187 * 19188 * Context: May be called from interrupt context 19189 */ 19190 19191 static void 19192 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 19193 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19194 { 19195 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 19196 19197 ASSERT(un != NULL); 19198 ASSERT(mutex_owned(SD_MUTEX(un))); 19199 ASSERT(bp != NULL); 19200 ASSERT(xp != NULL); 19201 ASSERT(pktp != NULL); 19202 19203 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19204 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19205 19206 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 19207 sd_print_retry_msg : NULL; 19208 19209 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19210 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19211 } 19212 19213 19214 /* 19215 * Function: sd_pkt_reason_cmd_tag_reject 19216 * 19217 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 19218 * 19219 * Context: May be called from interrupt context 19220 */ 19221 19222 static void 19223 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 19224 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19225 { 19226 ASSERT(un != NULL); 19227 ASSERT(mutex_owned(SD_MUTEX(un))); 19228 ASSERT(bp != NULL); 19229 ASSERT(xp != NULL); 19230 ASSERT(pktp != NULL); 19231 19232 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19233 pktp->pkt_flags = 0; 19234 un->un_tagflags = 0; 19235 if (un->un_f_opt_queueing == TRUE) { 19236 un->un_throttle = min(un->un_throttle, 3); 19237 } else { 19238 un->un_throttle = 1; 19239 } 19240 mutex_exit(SD_MUTEX(un)); 19241 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 19242 mutex_enter(SD_MUTEX(un)); 19243 19244 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19245 19246 /* Legacy behavior not to check retry counts here. */ 19247 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 19248 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19249 } 19250 19251 19252 /* 19253 * Function: sd_pkt_reason_default 19254 * 19255 * Description: Default recovery actions for SCSA pkt_reason values that 19256 * do not have more explicit recovery actions. 19257 * 19258 * Context: May be called from interrupt context 19259 */ 19260 19261 static void 19262 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19263 struct scsi_pkt *pktp) 19264 { 19265 ASSERT(un != NULL); 19266 ASSERT(mutex_owned(SD_MUTEX(un))); 19267 ASSERT(bp != NULL); 19268 ASSERT(xp != NULL); 19269 ASSERT(pktp != NULL); 19270 19271 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19272 sd_reset_target(un, pktp); 19273 19274 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19275 19276 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19277 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19278 } 19279 19280 19281 19282 /* 19283 * Function: sd_pkt_status_check_condition 19284 * 19285 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 19286 * 19287 * Context: May be called from interrupt context 19288 */ 19289 19290 static void 19291 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 19292 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19293 { 19294 ASSERT(un != NULL); 19295 ASSERT(mutex_owned(SD_MUTEX(un))); 19296 ASSERT(bp != NULL); 19297 ASSERT(xp != NULL); 19298 ASSERT(pktp != NULL); 19299 19300 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 19301 "entry: buf:0x%p xp:0x%p\n", bp, xp); 19302 19303 /* 19304 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 19305 * command will be retried after the request sense). Otherwise, retry 19306 * the command. Note: we are issuing the request sense even though the 19307 * retry limit may have been reached for the failed command. 19308 */ 19309 if (un->un_f_arq_enabled == FALSE) { 19310 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19311 "no ARQ, sending request sense command\n"); 19312 sd_send_request_sense_command(un, bp, pktp); 19313 } else { 19314 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19315 "ARQ,retrying request sense command\n"); 19316 #if defined(__i386) || defined(__amd64) 19317 /* 19318 * The SD_RETRY_DELAY value need to be adjusted here 19319 * when SD_RETRY_DELAY change in sddef.h 19320 */ 19321 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19322 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19323 NULL); 19324 #else 19325 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19326 EIO, SD_RETRY_DELAY, NULL); 19327 #endif 19328 } 19329 19330 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19331 } 19332 19333 19334 /* 19335 * Function: sd_pkt_status_busy 19336 * 19337 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19338 * 19339 * Context: May be called from interrupt context 19340 */ 19341 19342 static void 19343 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19344 struct scsi_pkt *pktp) 19345 { 19346 ASSERT(un != NULL); 19347 ASSERT(mutex_owned(SD_MUTEX(un))); 19348 ASSERT(bp != NULL); 19349 ASSERT(xp != NULL); 19350 ASSERT(pktp != NULL); 19351 19352 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19353 "sd_pkt_status_busy: entry\n"); 19354 19355 /* If retries are exhausted, just fail the command. */ 19356 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19357 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19358 "device busy too long\n"); 19359 sd_return_failed_command(un, bp, EIO); 19360 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19361 "sd_pkt_status_busy: exit\n"); 19362 return; 19363 } 19364 xp->xb_retry_count++; 19365 19366 /* 19367 * Try to reset the target. However, we do not want to perform 19368 * more than one reset if the device continues to fail. The reset 19369 * will be performed when the retry count reaches the reset 19370 * threshold. This threshold should be set such that at least 19371 * one retry is issued before the reset is performed. 19372 */ 19373 if (xp->xb_retry_count == 19374 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19375 int rval = 0; 19376 mutex_exit(SD_MUTEX(un)); 19377 if (un->un_f_allow_bus_device_reset == TRUE) { 19378 /* 19379 * First try to reset the LUN; if we cannot then 19380 * try to reset the target. 19381 */ 19382 if (un->un_f_lun_reset_enabled == TRUE) { 19383 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19384 "sd_pkt_status_busy: RESET_LUN\n"); 19385 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19386 } 19387 if (rval == 0) { 19388 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19389 "sd_pkt_status_busy: RESET_TARGET\n"); 19390 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19391 } 19392 } 19393 if (rval == 0) { 19394 /* 19395 * If the RESET_LUN and/or RESET_TARGET failed, 19396 * try RESET_ALL 19397 */ 19398 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19399 "sd_pkt_status_busy: RESET_ALL\n"); 19400 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19401 } 19402 mutex_enter(SD_MUTEX(un)); 19403 if (rval == 0) { 19404 /* 19405 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19406 * At this point we give up & fail the command. 19407 */ 19408 sd_return_failed_command(un, bp, EIO); 19409 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19410 "sd_pkt_status_busy: exit (failed cmd)\n"); 19411 return; 19412 } 19413 } 19414 19415 /* 19416 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19417 * we have already checked the retry counts above. 19418 */ 19419 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19420 EIO, un->un_busy_timeout, NULL); 19421 19422 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19423 "sd_pkt_status_busy: exit\n"); 19424 } 19425 19426 19427 /* 19428 * Function: sd_pkt_status_reservation_conflict 19429 * 19430 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19431 * command status. 19432 * 19433 * Context: May be called from interrupt context 19434 */ 19435 19436 static void 19437 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19438 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19439 { 19440 ASSERT(un != NULL); 19441 ASSERT(mutex_owned(SD_MUTEX(un))); 19442 ASSERT(bp != NULL); 19443 ASSERT(xp != NULL); 19444 ASSERT(pktp != NULL); 19445 19446 /* 19447 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19448 * conflict could be due to various reasons like incorrect keys, not 19449 * registered or not reserved etc. So, we return EACCES to the caller. 19450 */ 19451 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19452 int cmd = SD_GET_PKT_OPCODE(pktp); 19453 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19454 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19455 sd_return_failed_command(un, bp, EACCES); 19456 return; 19457 } 19458 } 19459 19460 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19461 19462 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19463 if (sd_failfast_enable != 0) { 19464 /* By definition, we must panic here.... */ 19465 sd_panic_for_res_conflict(un); 19466 /*NOTREACHED*/ 19467 } 19468 SD_ERROR(SD_LOG_IO, un, 19469 "sd_handle_resv_conflict: Disk Reserved\n"); 19470 sd_return_failed_command(un, bp, EACCES); 19471 return; 19472 } 19473 19474 /* 19475 * 1147670: retry only if sd_retry_on_reservation_conflict 19476 * property is set (default is 1). Retries will not succeed 19477 * on a disk reserved by another initiator. HA systems 19478 * may reset this via sd.conf to avoid these retries. 19479 * 19480 * Note: The legacy return code for this failure is EIO, however EACCES 19481 * seems more appropriate for a reservation conflict. 19482 */ 19483 if (sd_retry_on_reservation_conflict == 0) { 19484 SD_ERROR(SD_LOG_IO, un, 19485 "sd_handle_resv_conflict: Device Reserved\n"); 19486 sd_return_failed_command(un, bp, EIO); 19487 return; 19488 } 19489 19490 /* 19491 * Retry the command if we can. 19492 * 19493 * Note: The legacy return code for this failure is EIO, however EACCES 19494 * seems more appropriate for a reservation conflict. 19495 */ 19496 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19497 (clock_t)2, NULL); 19498 } 19499 19500 19501 19502 /* 19503 * Function: sd_pkt_status_qfull 19504 * 19505 * Description: Handle a QUEUE FULL condition from the target. This can 19506 * occur if the HBA does not handle the queue full condition. 19507 * (Basically this means third-party HBAs as Sun HBAs will 19508 * handle the queue full condition.) Note that if there are 19509 * some commands already in the transport, then the queue full 19510 * has occurred because the queue for this nexus is actually 19511 * full. If there are no commands in the transport, then the 19512 * queue full is resulting from some other initiator or lun 19513 * consuming all the resources at the target. 19514 * 19515 * Context: May be called from interrupt context 19516 */ 19517 19518 static void 19519 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19520 struct scsi_pkt *pktp) 19521 { 19522 ASSERT(un != NULL); 19523 ASSERT(mutex_owned(SD_MUTEX(un))); 19524 ASSERT(bp != NULL); 19525 ASSERT(xp != NULL); 19526 ASSERT(pktp != NULL); 19527 19528 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19529 "sd_pkt_status_qfull: entry\n"); 19530 19531 /* 19532 * Just lower the QFULL throttle and retry the command. Note that 19533 * we do not limit the number of retries here. 19534 */ 19535 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19536 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19537 SD_RESTART_TIMEOUT, NULL); 19538 19539 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19540 "sd_pkt_status_qfull: exit\n"); 19541 } 19542 19543 19544 /* 19545 * Function: sd_reset_target 19546 * 19547 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19548 * RESET_TARGET, or RESET_ALL. 19549 * 19550 * Context: May be called under interrupt context. 19551 */ 19552 19553 static void 19554 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19555 { 19556 int rval = 0; 19557 19558 ASSERT(un != NULL); 19559 ASSERT(mutex_owned(SD_MUTEX(un))); 19560 ASSERT(pktp != NULL); 19561 19562 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19563 19564 /* 19565 * No need to reset if the transport layer has already done so. 19566 */ 19567 if ((pktp->pkt_statistics & 19568 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19569 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19570 "sd_reset_target: no reset\n"); 19571 return; 19572 } 19573 19574 mutex_exit(SD_MUTEX(un)); 19575 19576 if (un->un_f_allow_bus_device_reset == TRUE) { 19577 if (un->un_f_lun_reset_enabled == TRUE) { 19578 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19579 "sd_reset_target: RESET_LUN\n"); 19580 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19581 } 19582 if (rval == 0) { 19583 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19584 "sd_reset_target: RESET_TARGET\n"); 19585 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19586 } 19587 } 19588 19589 if (rval == 0) { 19590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19591 "sd_reset_target: RESET_ALL\n"); 19592 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19593 } 19594 19595 mutex_enter(SD_MUTEX(un)); 19596 19597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19598 } 19599 19600 /* 19601 * Function: sd_target_change_task 19602 * 19603 * Description: Handle dynamic target change 19604 * 19605 * Context: Executes in a taskq() thread context 19606 */ 19607 static void 19608 sd_target_change_task(void *arg) 19609 { 19610 struct sd_lun *un = arg; 19611 uint64_t capacity; 19612 diskaddr_t label_cap; 19613 uint_t lbasize; 19614 sd_ssc_t *ssc; 19615 19616 ASSERT(un != NULL); 19617 ASSERT(!mutex_owned(SD_MUTEX(un))); 19618 19619 if ((un->un_f_blockcount_is_valid == FALSE) || 19620 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19621 return; 19622 } 19623 19624 ssc = sd_ssc_init(un); 19625 19626 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19627 &lbasize, SD_PATH_DIRECT) != 0) { 19628 SD_ERROR(SD_LOG_ERROR, un, 19629 "sd_target_change_task: fail to read capacity\n"); 19630 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19631 goto task_exit; 19632 } 19633 19634 mutex_enter(SD_MUTEX(un)); 19635 if (capacity <= un->un_blockcount) { 19636 mutex_exit(SD_MUTEX(un)); 19637 goto task_exit; 19638 } 19639 19640 sd_update_block_info(un, lbasize, capacity); 19641 mutex_exit(SD_MUTEX(un)); 19642 19643 /* 19644 * If lun is EFI labeled and lun capacity is greater than the 19645 * capacity contained in the label, log a sys event. 19646 */ 19647 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19648 (void*)SD_PATH_DIRECT) == 0) { 19649 mutex_enter(SD_MUTEX(un)); 19650 if (un->un_f_blockcount_is_valid && 19651 un->un_blockcount > label_cap) { 19652 mutex_exit(SD_MUTEX(un)); 19653 sd_log_lun_expansion_event(un, KM_SLEEP); 19654 } else { 19655 mutex_exit(SD_MUTEX(un)); 19656 } 19657 } 19658 19659 task_exit: 19660 sd_ssc_fini(ssc); 19661 } 19662 19663 19664 /* 19665 * Function: sd_log_dev_status_event 19666 * 19667 * Description: Log EC_dev_status sysevent 19668 * 19669 * Context: Never called from interrupt context 19670 */ 19671 static void 19672 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag) 19673 { 19674 int err; 19675 char *path; 19676 nvlist_t *attr_list; 19677 19678 /* Allocate and build sysevent attribute list */ 19679 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19680 if (err != 0) { 19681 SD_ERROR(SD_LOG_ERROR, un, 19682 "sd_log_dev_status_event: fail to allocate space\n"); 19683 return; 19684 } 19685 19686 path = kmem_alloc(MAXPATHLEN, km_flag); 19687 if (path == NULL) { 19688 nvlist_free(attr_list); 19689 SD_ERROR(SD_LOG_ERROR, un, 19690 "sd_log_dev_status_event: fail to allocate space\n"); 19691 return; 19692 } 19693 /* 19694 * Add path attribute to identify the lun. 19695 * We are using minor node 'a' as the sysevent attribute. 19696 */ 19697 (void) snprintf(path, MAXPATHLEN, "/devices"); 19698 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19699 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19700 ":a"); 19701 19702 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path); 19703 if (err != 0) { 19704 nvlist_free(attr_list); 19705 kmem_free(path, MAXPATHLEN); 19706 SD_ERROR(SD_LOG_ERROR, un, 19707 "sd_log_dev_status_event: fail to add attribute\n"); 19708 return; 19709 } 19710 19711 /* Log dynamic lun expansion sysevent */ 19712 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19713 esc, attr_list, NULL, km_flag); 19714 if (err != DDI_SUCCESS) { 19715 SD_ERROR(SD_LOG_ERROR, un, 19716 "sd_log_dev_status_event: fail to log sysevent\n"); 19717 } 19718 19719 nvlist_free(attr_list); 19720 kmem_free(path, MAXPATHLEN); 19721 } 19722 19723 19724 /* 19725 * Function: sd_log_lun_expansion_event 19726 * 19727 * Description: Log lun expansion sys event 19728 * 19729 * Context: Never called from interrupt context 19730 */ 19731 static void 19732 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19733 { 19734 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag); 19735 } 19736 19737 19738 /* 19739 * Function: sd_log_eject_request_event 19740 * 19741 * Description: Log eject request sysevent 19742 * 19743 * Context: Never called from interrupt context 19744 */ 19745 static void 19746 sd_log_eject_request_event(struct sd_lun *un, int km_flag) 19747 { 19748 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag); 19749 } 19750 19751 19752 /* 19753 * Function: sd_media_change_task 19754 * 19755 * Description: Recovery action for CDROM to become available. 19756 * 19757 * Context: Executes in a taskq() thread context 19758 */ 19759 19760 static void 19761 sd_media_change_task(void *arg) 19762 { 19763 struct scsi_pkt *pktp = arg; 19764 struct sd_lun *un; 19765 struct buf *bp; 19766 struct sd_xbuf *xp; 19767 int err = 0; 19768 int retry_count = 0; 19769 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19770 struct sd_sense_info si; 19771 19772 ASSERT(pktp != NULL); 19773 bp = (struct buf *)pktp->pkt_private; 19774 ASSERT(bp != NULL); 19775 xp = SD_GET_XBUF(bp); 19776 ASSERT(xp != NULL); 19777 un = SD_GET_UN(bp); 19778 ASSERT(un != NULL); 19779 ASSERT(!mutex_owned(SD_MUTEX(un))); 19780 ASSERT(un->un_f_monitor_media_state); 19781 19782 si.ssi_severity = SCSI_ERR_INFO; 19783 si.ssi_pfa_flag = FALSE; 19784 19785 /* 19786 * When a reset is issued on a CDROM, it takes a long time to 19787 * recover. First few attempts to read capacity and other things 19788 * related to handling unit attention fail (with a ASC 0x4 and 19789 * ASCQ 0x1). In that case we want to do enough retries and we want 19790 * to limit the retries in other cases of genuine failures like 19791 * no media in drive. 19792 */ 19793 while (retry_count++ < retry_limit) { 19794 if ((err = sd_handle_mchange(un)) == 0) { 19795 break; 19796 } 19797 if (err == EAGAIN) { 19798 retry_limit = SD_UNIT_ATTENTION_RETRY; 19799 } 19800 /* Sleep for 0.5 sec. & try again */ 19801 delay(drv_usectohz(500000)); 19802 } 19803 19804 /* 19805 * Dispatch (retry or fail) the original command here, 19806 * along with appropriate console messages.... 19807 * 19808 * Must grab the mutex before calling sd_retry_command, 19809 * sd_print_sense_msg and sd_return_failed_command. 19810 */ 19811 mutex_enter(SD_MUTEX(un)); 19812 if (err != SD_CMD_SUCCESS) { 19813 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19814 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19815 si.ssi_severity = SCSI_ERR_FATAL; 19816 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19817 sd_return_failed_command(un, bp, EIO); 19818 } else { 19819 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg, 19820 &si, EIO, (clock_t)0, NULL); 19821 } 19822 mutex_exit(SD_MUTEX(un)); 19823 } 19824 19825 19826 19827 /* 19828 * Function: sd_handle_mchange 19829 * 19830 * Description: Perform geometry validation & other recovery when CDROM 19831 * has been removed from drive. 19832 * 19833 * Return Code: 0 for success 19834 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19835 * sd_send_scsi_READ_CAPACITY() 19836 * 19837 * Context: Executes in a taskq() thread context 19838 */ 19839 19840 static int 19841 sd_handle_mchange(struct sd_lun *un) 19842 { 19843 uint64_t capacity; 19844 uint32_t lbasize; 19845 int rval; 19846 sd_ssc_t *ssc; 19847 19848 ASSERT(!mutex_owned(SD_MUTEX(un))); 19849 ASSERT(un->un_f_monitor_media_state); 19850 19851 ssc = sd_ssc_init(un); 19852 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19853 SD_PATH_DIRECT_PRIORITY); 19854 19855 if (rval != 0) 19856 goto failed; 19857 19858 mutex_enter(SD_MUTEX(un)); 19859 sd_update_block_info(un, lbasize, capacity); 19860 19861 if (un->un_errstats != NULL) { 19862 struct sd_errstats *stp = 19863 (struct sd_errstats *)un->un_errstats->ks_data; 19864 stp->sd_capacity.value.ui64 = (uint64_t) 19865 ((uint64_t)un->un_blockcount * 19866 (uint64_t)un->un_tgt_blocksize); 19867 } 19868 19869 /* 19870 * Check if the media in the device is writable or not 19871 */ 19872 if (ISCD(un)) { 19873 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19874 } 19875 19876 /* 19877 * Note: Maybe let the strategy/partitioning chain worry about getting 19878 * valid geometry. 19879 */ 19880 mutex_exit(SD_MUTEX(un)); 19881 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19882 19883 19884 if (cmlb_validate(un->un_cmlbhandle, 0, 19885 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19886 sd_ssc_fini(ssc); 19887 return (EIO); 19888 } else { 19889 if (un->un_f_pkstats_enabled) { 19890 sd_set_pstats(un); 19891 SD_TRACE(SD_LOG_IO_PARTITION, un, 19892 "sd_handle_mchange: un:0x%p pstats created and " 19893 "set\n", un); 19894 } 19895 } 19896 19897 /* 19898 * Try to lock the door 19899 */ 19900 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19901 SD_PATH_DIRECT_PRIORITY); 19902 failed: 19903 if (rval != 0) 19904 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19905 sd_ssc_fini(ssc); 19906 return (rval); 19907 } 19908 19909 19910 /* 19911 * Function: sd_send_scsi_DOORLOCK 19912 * 19913 * Description: Issue the scsi DOOR LOCK command 19914 * 19915 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19916 * structure for this target. 19917 * flag - SD_REMOVAL_ALLOW 19918 * SD_REMOVAL_PREVENT 19919 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19920 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19921 * to use the USCSI "direct" chain and bypass the normal 19922 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19923 * command is issued as part of an error recovery action. 19924 * 19925 * Return Code: 0 - Success 19926 * errno return code from sd_ssc_send() 19927 * 19928 * Context: Can sleep. 19929 */ 19930 19931 static int 19932 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19933 { 19934 struct scsi_extended_sense sense_buf; 19935 union scsi_cdb cdb; 19936 struct uscsi_cmd ucmd_buf; 19937 int status; 19938 struct sd_lun *un; 19939 19940 ASSERT(ssc != NULL); 19941 un = ssc->ssc_un; 19942 ASSERT(un != NULL); 19943 ASSERT(!mutex_owned(SD_MUTEX(un))); 19944 19945 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19946 19947 /* already determined doorlock is not supported, fake success */ 19948 if (un->un_f_doorlock_supported == FALSE) { 19949 return (0); 19950 } 19951 19952 /* 19953 * If we are ejecting and see an SD_REMOVAL_PREVENT 19954 * ignore the command so we can complete the eject 19955 * operation. 19956 */ 19957 if (flag == SD_REMOVAL_PREVENT) { 19958 mutex_enter(SD_MUTEX(un)); 19959 if (un->un_f_ejecting == TRUE) { 19960 mutex_exit(SD_MUTEX(un)); 19961 return (EAGAIN); 19962 } 19963 mutex_exit(SD_MUTEX(un)); 19964 } 19965 19966 bzero(&cdb, sizeof (cdb)); 19967 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19968 19969 cdb.scc_cmd = SCMD_DOORLOCK; 19970 cdb.cdb_opaque[4] = (uchar_t)flag; 19971 19972 ucmd_buf.uscsi_cdb = (char *)&cdb; 19973 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19974 ucmd_buf.uscsi_bufaddr = NULL; 19975 ucmd_buf.uscsi_buflen = 0; 19976 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19977 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19978 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19979 ucmd_buf.uscsi_timeout = 15; 19980 19981 SD_TRACE(SD_LOG_IO, un, 19982 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19983 19984 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19985 UIO_SYSSPACE, path_flag); 19986 19987 if (status == 0) 19988 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19989 19990 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19991 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19992 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19993 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19994 19995 /* fake success and skip subsequent doorlock commands */ 19996 un->un_f_doorlock_supported = FALSE; 19997 return (0); 19998 } 19999 20000 return (status); 20001 } 20002 20003 /* 20004 * Function: sd_send_scsi_READ_CAPACITY 20005 * 20006 * Description: This routine uses the scsi READ CAPACITY command to determine 20007 * the device capacity in number of blocks and the device native 20008 * block size. If this function returns a failure, then the 20009 * values in *capp and *lbap are undefined. If the capacity 20010 * returned is 0xffffffff then the lun is too large for a 20011 * normal READ CAPACITY command and the results of a 20012 * READ CAPACITY 16 will be used instead. 20013 * 20014 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20015 * capp - ptr to unsigned 64-bit variable to receive the 20016 * capacity value from the command. 20017 * lbap - ptr to unsigned 32-bit varaible to receive the 20018 * block size value from the command 20019 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20020 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20021 * to use the USCSI "direct" chain and bypass the normal 20022 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20023 * command is issued as part of an error recovery action. 20024 * 20025 * Return Code: 0 - Success 20026 * EIO - IO error 20027 * EACCES - Reservation conflict detected 20028 * EAGAIN - Device is becoming ready 20029 * errno return code from sd_ssc_send() 20030 * 20031 * Context: Can sleep. Blocks until command completes. 20032 */ 20033 20034 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 20035 20036 static int 20037 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20038 int path_flag) 20039 { 20040 struct scsi_extended_sense sense_buf; 20041 struct uscsi_cmd ucmd_buf; 20042 union scsi_cdb cdb; 20043 uint32_t *capacity_buf; 20044 uint64_t capacity; 20045 uint32_t lbasize; 20046 uint32_t pbsize; 20047 int status; 20048 struct sd_lun *un; 20049 20050 ASSERT(ssc != NULL); 20051 20052 un = ssc->ssc_un; 20053 ASSERT(un != NULL); 20054 ASSERT(!mutex_owned(SD_MUTEX(un))); 20055 ASSERT(capp != NULL); 20056 ASSERT(lbap != NULL); 20057 20058 SD_TRACE(SD_LOG_IO, un, 20059 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20060 20061 /* 20062 * First send a READ_CAPACITY command to the target. 20063 * (This command is mandatory under SCSI-2.) 20064 * 20065 * Set up the CDB for the READ_CAPACITY command. The Partial 20066 * Medium Indicator bit is cleared. The address field must be 20067 * zero if the PMI bit is zero. 20068 */ 20069 bzero(&cdb, sizeof (cdb)); 20070 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20071 20072 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 20073 20074 cdb.scc_cmd = SCMD_READ_CAPACITY; 20075 20076 ucmd_buf.uscsi_cdb = (char *)&cdb; 20077 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20078 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 20079 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 20080 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20081 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20082 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20083 ucmd_buf.uscsi_timeout = 60; 20084 20085 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20086 UIO_SYSSPACE, path_flag); 20087 20088 switch (status) { 20089 case 0: 20090 /* Return failure if we did not get valid capacity data. */ 20091 if (ucmd_buf.uscsi_resid != 0) { 20092 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20093 "sd_send_scsi_READ_CAPACITY received invalid " 20094 "capacity data"); 20095 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20096 return (EIO); 20097 } 20098 /* 20099 * Read capacity and block size from the READ CAPACITY 10 data. 20100 * This data may be adjusted later due to device specific 20101 * issues. 20102 * 20103 * According to the SCSI spec, the READ CAPACITY 10 20104 * command returns the following: 20105 * 20106 * bytes 0-3: Maximum logical block address available. 20107 * (MSB in byte:0 & LSB in byte:3) 20108 * 20109 * bytes 4-7: Block length in bytes 20110 * (MSB in byte:4 & LSB in byte:7) 20111 * 20112 */ 20113 capacity = BE_32(capacity_buf[0]); 20114 lbasize = BE_32(capacity_buf[1]); 20115 20116 /* 20117 * Done with capacity_buf 20118 */ 20119 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20120 20121 /* 20122 * if the reported capacity is set to all 0xf's, then 20123 * this disk is too large and requires SBC-2 commands. 20124 * Reissue the request using READ CAPACITY 16. 20125 */ 20126 if (capacity == 0xffffffff) { 20127 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20128 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 20129 &lbasize, &pbsize, path_flag); 20130 if (status != 0) { 20131 return (status); 20132 } else { 20133 goto rc16_done; 20134 } 20135 } 20136 break; /* Success! */ 20137 case EIO: 20138 switch (ucmd_buf.uscsi_status) { 20139 case STATUS_RESERVATION_CONFLICT: 20140 status = EACCES; 20141 break; 20142 case STATUS_CHECK: 20143 /* 20144 * Check condition; look for ASC/ASCQ of 0x04/0x01 20145 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20146 */ 20147 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20148 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20149 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20150 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20151 return (EAGAIN); 20152 } 20153 break; 20154 default: 20155 break; 20156 } 20157 /* FALLTHRU */ 20158 default: 20159 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20160 return (status); 20161 } 20162 20163 /* 20164 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20165 * (2352 and 0 are common) so for these devices always force the value 20166 * to 2048 as required by the ATAPI specs. 20167 */ 20168 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20169 lbasize = 2048; 20170 } 20171 20172 /* 20173 * Get the maximum LBA value from the READ CAPACITY data. 20174 * Here we assume that the Partial Medium Indicator (PMI) bit 20175 * was cleared when issuing the command. This means that the LBA 20176 * returned from the device is the LBA of the last logical block 20177 * on the logical unit. The actual logical block count will be 20178 * this value plus one. 20179 */ 20180 capacity += 1; 20181 20182 /* 20183 * Currently, for removable media, the capacity is saved in terms 20184 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20185 */ 20186 if (un->un_f_has_removable_media) 20187 capacity *= (lbasize / un->un_sys_blocksize); 20188 20189 rc16_done: 20190 20191 /* 20192 * Copy the values from the READ CAPACITY command into the space 20193 * provided by the caller. 20194 */ 20195 *capp = capacity; 20196 *lbap = lbasize; 20197 20198 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 20199 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 20200 20201 /* 20202 * Both the lbasize and capacity from the device must be nonzero, 20203 * otherwise we assume that the values are not valid and return 20204 * failure to the caller. (4203735) 20205 */ 20206 if ((capacity == 0) || (lbasize == 0)) { 20207 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20208 "sd_send_scsi_READ_CAPACITY received invalid value " 20209 "capacity %llu lbasize %d", capacity, lbasize); 20210 return (EIO); 20211 } 20212 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20213 return (0); 20214 } 20215 20216 /* 20217 * Function: sd_send_scsi_READ_CAPACITY_16 20218 * 20219 * Description: This routine uses the scsi READ CAPACITY 16 command to 20220 * determine the device capacity in number of blocks and the 20221 * device native block size. If this function returns a failure, 20222 * then the values in *capp and *lbap are undefined. 20223 * This routine should be called by sd_send_scsi_READ_CAPACITY 20224 * which will apply any device specific adjustments to capacity 20225 * and lbasize. One exception is it is also called by 20226 * sd_get_media_info_ext. In that function, there is no need to 20227 * adjust the capacity and lbasize. 20228 * 20229 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20230 * capp - ptr to unsigned 64-bit variable to receive the 20231 * capacity value from the command. 20232 * lbap - ptr to unsigned 32-bit varaible to receive the 20233 * block size value from the command 20234 * psp - ptr to unsigned 32-bit variable to receive the 20235 * physical block size value from the command 20236 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20237 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20238 * to use the USCSI "direct" chain and bypass the normal 20239 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 20240 * this command is issued as part of an error recovery 20241 * action. 20242 * 20243 * Return Code: 0 - Success 20244 * EIO - IO error 20245 * EACCES - Reservation conflict detected 20246 * EAGAIN - Device is becoming ready 20247 * errno return code from sd_ssc_send() 20248 * 20249 * Context: Can sleep. Blocks until command completes. 20250 */ 20251 20252 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 20253 20254 static int 20255 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20256 uint32_t *psp, int path_flag) 20257 { 20258 struct scsi_extended_sense sense_buf; 20259 struct uscsi_cmd ucmd_buf; 20260 union scsi_cdb cdb; 20261 uint64_t *capacity16_buf; 20262 uint64_t capacity; 20263 uint32_t lbasize; 20264 uint32_t pbsize; 20265 uint32_t lbpb_exp; 20266 int status; 20267 struct sd_lun *un; 20268 20269 ASSERT(ssc != NULL); 20270 20271 un = ssc->ssc_un; 20272 ASSERT(un != NULL); 20273 ASSERT(!mutex_owned(SD_MUTEX(un))); 20274 ASSERT(capp != NULL); 20275 ASSERT(lbap != NULL); 20276 20277 SD_TRACE(SD_LOG_IO, un, 20278 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20279 20280 /* 20281 * First send a READ_CAPACITY_16 command to the target. 20282 * 20283 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 20284 * Medium Indicator bit is cleared. The address field must be 20285 * zero if the PMI bit is zero. 20286 */ 20287 bzero(&cdb, sizeof (cdb)); 20288 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20289 20290 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 20291 20292 ucmd_buf.uscsi_cdb = (char *)&cdb; 20293 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 20294 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 20295 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 20296 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20297 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20298 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20299 ucmd_buf.uscsi_timeout = 60; 20300 20301 /* 20302 * Read Capacity (16) is a Service Action In command. One 20303 * command byte (0x9E) is overloaded for multiple operations, 20304 * with the second CDB byte specifying the desired operation 20305 */ 20306 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 20307 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 20308 20309 /* 20310 * Fill in allocation length field 20311 */ 20312 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 20313 20314 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20315 UIO_SYSSPACE, path_flag); 20316 20317 switch (status) { 20318 case 0: 20319 /* Return failure if we did not get valid capacity data. */ 20320 if (ucmd_buf.uscsi_resid > 20) { 20321 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20322 "sd_send_scsi_READ_CAPACITY_16 received invalid " 20323 "capacity data"); 20324 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20325 return (EIO); 20326 } 20327 20328 /* 20329 * Read capacity and block size from the READ CAPACITY 16 data. 20330 * This data may be adjusted later due to device specific 20331 * issues. 20332 * 20333 * According to the SCSI spec, the READ CAPACITY 16 20334 * command returns the following: 20335 * 20336 * bytes 0-7: Maximum logical block address available. 20337 * (MSB in byte:0 & LSB in byte:7) 20338 * 20339 * bytes 8-11: Block length in bytes 20340 * (MSB in byte:8 & LSB in byte:11) 20341 * 20342 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 20343 */ 20344 capacity = BE_64(capacity16_buf[0]); 20345 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 20346 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f; 20347 20348 pbsize = lbasize << lbpb_exp; 20349 20350 /* 20351 * Done with capacity16_buf 20352 */ 20353 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20354 20355 /* 20356 * if the reported capacity is set to all 0xf's, then 20357 * this disk is too large. This could only happen with 20358 * a device that supports LBAs larger than 64 bits which 20359 * are not defined by any current T10 standards. 20360 */ 20361 if (capacity == 0xffffffffffffffff) { 20362 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20363 "disk is too large"); 20364 return (EIO); 20365 } 20366 break; /* Success! */ 20367 case EIO: 20368 switch (ucmd_buf.uscsi_status) { 20369 case STATUS_RESERVATION_CONFLICT: 20370 status = EACCES; 20371 break; 20372 case STATUS_CHECK: 20373 /* 20374 * Check condition; look for ASC/ASCQ of 0x04/0x01 20375 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20376 */ 20377 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20378 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20379 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20380 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20381 return (EAGAIN); 20382 } 20383 break; 20384 default: 20385 break; 20386 } 20387 /* FALLTHRU */ 20388 default: 20389 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20390 return (status); 20391 } 20392 20393 /* 20394 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20395 * (2352 and 0 are common) so for these devices always force the value 20396 * to 2048 as required by the ATAPI specs. 20397 */ 20398 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20399 lbasize = 2048; 20400 } 20401 20402 /* 20403 * Get the maximum LBA value from the READ CAPACITY 16 data. 20404 * Here we assume that the Partial Medium Indicator (PMI) bit 20405 * was cleared when issuing the command. This means that the LBA 20406 * returned from the device is the LBA of the last logical block 20407 * on the logical unit. The actual logical block count will be 20408 * this value plus one. 20409 */ 20410 capacity += 1; 20411 20412 /* 20413 * Currently, for removable media, the capacity is saved in terms 20414 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20415 */ 20416 if (un->un_f_has_removable_media) 20417 capacity *= (lbasize / un->un_sys_blocksize); 20418 20419 *capp = capacity; 20420 *lbap = lbasize; 20421 *psp = pbsize; 20422 20423 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20424 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20425 capacity, lbasize, pbsize); 20426 20427 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) { 20428 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20429 "sd_send_scsi_READ_CAPACITY_16 received invalid value " 20430 "capacity %llu lbasize %d pbsize %d", capacity, lbasize); 20431 return (EIO); 20432 } 20433 20434 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20435 return (0); 20436 } 20437 20438 20439 /* 20440 * Function: sd_send_scsi_START_STOP_UNIT 20441 * 20442 * Description: Issue a scsi START STOP UNIT command to the target. 20443 * 20444 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20445 * structure for this target. 20446 * pc_flag - SD_POWER_CONDITION 20447 * SD_START_STOP 20448 * flag - SD_TARGET_START 20449 * SD_TARGET_STOP 20450 * SD_TARGET_EJECT 20451 * SD_TARGET_CLOSE 20452 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20453 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20454 * to use the USCSI "direct" chain and bypass the normal 20455 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20456 * command is issued as part of an error recovery action. 20457 * 20458 * Return Code: 0 - Success 20459 * EIO - IO error 20460 * EACCES - Reservation conflict detected 20461 * ENXIO - Not Ready, medium not present 20462 * errno return code from sd_ssc_send() 20463 * 20464 * Context: Can sleep. 20465 */ 20466 20467 static int 20468 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag, 20469 int path_flag) 20470 { 20471 struct scsi_extended_sense sense_buf; 20472 union scsi_cdb cdb; 20473 struct uscsi_cmd ucmd_buf; 20474 int status; 20475 struct sd_lun *un; 20476 20477 ASSERT(ssc != NULL); 20478 un = ssc->ssc_un; 20479 ASSERT(un != NULL); 20480 ASSERT(!mutex_owned(SD_MUTEX(un))); 20481 20482 SD_TRACE(SD_LOG_IO, un, 20483 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20484 20485 if (un->un_f_check_start_stop && 20486 (pc_flag == SD_START_STOP) && 20487 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20488 (un->un_f_start_stop_supported != TRUE)) { 20489 return (0); 20490 } 20491 20492 /* 20493 * If we are performing an eject operation and 20494 * we receive any command other than SD_TARGET_EJECT 20495 * we should immediately return. 20496 */ 20497 if (flag != SD_TARGET_EJECT) { 20498 mutex_enter(SD_MUTEX(un)); 20499 if (un->un_f_ejecting == TRUE) { 20500 mutex_exit(SD_MUTEX(un)); 20501 return (EAGAIN); 20502 } 20503 mutex_exit(SD_MUTEX(un)); 20504 } 20505 20506 bzero(&cdb, sizeof (cdb)); 20507 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20508 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20509 20510 cdb.scc_cmd = SCMD_START_STOP; 20511 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ? 20512 (uchar_t)(flag << 4) : (uchar_t)flag; 20513 20514 ucmd_buf.uscsi_cdb = (char *)&cdb; 20515 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20516 ucmd_buf.uscsi_bufaddr = NULL; 20517 ucmd_buf.uscsi_buflen = 0; 20518 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20519 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20520 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20521 ucmd_buf.uscsi_timeout = 200; 20522 20523 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20524 UIO_SYSSPACE, path_flag); 20525 20526 switch (status) { 20527 case 0: 20528 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20529 break; /* Success! */ 20530 case EIO: 20531 switch (ucmd_buf.uscsi_status) { 20532 case STATUS_RESERVATION_CONFLICT: 20533 status = EACCES; 20534 break; 20535 case STATUS_CHECK: 20536 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20537 switch (scsi_sense_key( 20538 (uint8_t *)&sense_buf)) { 20539 case KEY_ILLEGAL_REQUEST: 20540 status = ENOTSUP; 20541 break; 20542 case KEY_NOT_READY: 20543 if (scsi_sense_asc( 20544 (uint8_t *)&sense_buf) 20545 == 0x3A) { 20546 status = ENXIO; 20547 } 20548 break; 20549 default: 20550 break; 20551 } 20552 } 20553 break; 20554 default: 20555 break; 20556 } 20557 break; 20558 default: 20559 break; 20560 } 20561 20562 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20563 20564 return (status); 20565 } 20566 20567 20568 /* 20569 * Function: sd_start_stop_unit_callback 20570 * 20571 * Description: timeout(9F) callback to begin recovery process for a 20572 * device that has spun down. 20573 * 20574 * Arguments: arg - pointer to associated softstate struct. 20575 * 20576 * Context: Executes in a timeout(9F) thread context 20577 */ 20578 20579 static void 20580 sd_start_stop_unit_callback(void *arg) 20581 { 20582 struct sd_lun *un = arg; 20583 ASSERT(un != NULL); 20584 ASSERT(!mutex_owned(SD_MUTEX(un))); 20585 20586 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20587 20588 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20589 } 20590 20591 20592 /* 20593 * Function: sd_start_stop_unit_task 20594 * 20595 * Description: Recovery procedure when a drive is spun down. 20596 * 20597 * Arguments: arg - pointer to associated softstate struct. 20598 * 20599 * Context: Executes in a taskq() thread context 20600 */ 20601 20602 static void 20603 sd_start_stop_unit_task(void *arg) 20604 { 20605 struct sd_lun *un = arg; 20606 sd_ssc_t *ssc; 20607 int power_level; 20608 int rval; 20609 20610 ASSERT(un != NULL); 20611 ASSERT(!mutex_owned(SD_MUTEX(un))); 20612 20613 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20614 20615 /* 20616 * Some unformatted drives report not ready error, no need to 20617 * restart if format has been initiated. 20618 */ 20619 mutex_enter(SD_MUTEX(un)); 20620 if (un->un_f_format_in_progress == TRUE) { 20621 mutex_exit(SD_MUTEX(un)); 20622 return; 20623 } 20624 mutex_exit(SD_MUTEX(un)); 20625 20626 ssc = sd_ssc_init(un); 20627 /* 20628 * When a START STOP command is issued from here, it is part of a 20629 * failure recovery operation and must be issued before any other 20630 * commands, including any pending retries. Thus it must be sent 20631 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20632 * succeeds or not, we will start I/O after the attempt. 20633 * If power condition is supported and the current power level 20634 * is capable of performing I/O, we should set the power condition 20635 * to that level. Otherwise, set the power condition to ACTIVE. 20636 */ 20637 if (un->un_f_power_condition_supported) { 20638 mutex_enter(SD_MUTEX(un)); 20639 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level)); 20640 power_level = sd_pwr_pc.ran_perf[un->un_power_level] 20641 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE; 20642 mutex_exit(SD_MUTEX(un)); 20643 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 20644 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY); 20645 } else { 20646 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 20647 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY); 20648 } 20649 20650 if (rval != 0) 20651 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20652 sd_ssc_fini(ssc); 20653 /* 20654 * The above call blocks until the START_STOP_UNIT command completes. 20655 * Now that it has completed, we must re-try the original IO that 20656 * received the NOT READY condition in the first place. There are 20657 * three possible conditions here: 20658 * 20659 * (1) The original IO is on un_retry_bp. 20660 * (2) The original IO is on the regular wait queue, and un_retry_bp 20661 * is NULL. 20662 * (3) The original IO is on the regular wait queue, and un_retry_bp 20663 * points to some other, unrelated bp. 20664 * 20665 * For each case, we must call sd_start_cmds() with un_retry_bp 20666 * as the argument. If un_retry_bp is NULL, this will initiate 20667 * processing of the regular wait queue. If un_retry_bp is not NULL, 20668 * then this will process the bp on un_retry_bp. That may or may not 20669 * be the original IO, but that does not matter: the important thing 20670 * is to keep the IO processing going at this point. 20671 * 20672 * Note: This is a very specific error recovery sequence associated 20673 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20674 * serialize the I/O with completion of the spin-up. 20675 */ 20676 mutex_enter(SD_MUTEX(un)); 20677 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20678 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20679 un, un->un_retry_bp); 20680 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20681 sd_start_cmds(un, un->un_retry_bp); 20682 mutex_exit(SD_MUTEX(un)); 20683 20684 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20685 } 20686 20687 20688 /* 20689 * Function: sd_send_scsi_INQUIRY 20690 * 20691 * Description: Issue the scsi INQUIRY command. 20692 * 20693 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20694 * structure for this target. 20695 * bufaddr 20696 * buflen 20697 * evpd 20698 * page_code 20699 * page_length 20700 * 20701 * Return Code: 0 - Success 20702 * errno return code from sd_ssc_send() 20703 * 20704 * Context: Can sleep. Does not return until command is completed. 20705 */ 20706 20707 static int 20708 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20709 uchar_t evpd, uchar_t page_code, size_t *residp) 20710 { 20711 union scsi_cdb cdb; 20712 struct uscsi_cmd ucmd_buf; 20713 int status; 20714 struct sd_lun *un; 20715 20716 ASSERT(ssc != NULL); 20717 un = ssc->ssc_un; 20718 ASSERT(un != NULL); 20719 ASSERT(!mutex_owned(SD_MUTEX(un))); 20720 ASSERT(bufaddr != NULL); 20721 20722 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20723 20724 bzero(&cdb, sizeof (cdb)); 20725 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20726 bzero(bufaddr, buflen); 20727 20728 cdb.scc_cmd = SCMD_INQUIRY; 20729 cdb.cdb_opaque[1] = evpd; 20730 cdb.cdb_opaque[2] = page_code; 20731 FORMG0COUNT(&cdb, buflen); 20732 20733 ucmd_buf.uscsi_cdb = (char *)&cdb; 20734 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20735 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20736 ucmd_buf.uscsi_buflen = buflen; 20737 ucmd_buf.uscsi_rqbuf = NULL; 20738 ucmd_buf.uscsi_rqlen = 0; 20739 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20740 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20741 20742 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20743 UIO_SYSSPACE, SD_PATH_DIRECT); 20744 20745 /* 20746 * Only handle status == 0, the upper-level caller 20747 * will put different assessment based on the context. 20748 */ 20749 if (status == 0) 20750 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20751 20752 if ((status == 0) && (residp != NULL)) { 20753 *residp = ucmd_buf.uscsi_resid; 20754 } 20755 20756 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20757 20758 return (status); 20759 } 20760 20761 20762 /* 20763 * Function: sd_send_scsi_TEST_UNIT_READY 20764 * 20765 * Description: Issue the scsi TEST UNIT READY command. 20766 * This routine can be told to set the flag USCSI_DIAGNOSE to 20767 * prevent retrying failed commands. Use this when the intent 20768 * is either to check for device readiness, to clear a Unit 20769 * Attention, or to clear any outstanding sense data. 20770 * However under specific conditions the expected behavior 20771 * is for retries to bring a device ready, so use the flag 20772 * with caution. 20773 * 20774 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20775 * structure for this target. 20776 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20777 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20778 * 0: dont check for media present, do retries on cmd. 20779 * 20780 * Return Code: 0 - Success 20781 * EIO - IO error 20782 * EACCES - Reservation conflict detected 20783 * ENXIO - Not Ready, medium not present 20784 * errno return code from sd_ssc_send() 20785 * 20786 * Context: Can sleep. Does not return until command is completed. 20787 */ 20788 20789 static int 20790 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20791 { 20792 struct scsi_extended_sense sense_buf; 20793 union scsi_cdb cdb; 20794 struct uscsi_cmd ucmd_buf; 20795 int status; 20796 struct sd_lun *un; 20797 20798 ASSERT(ssc != NULL); 20799 un = ssc->ssc_un; 20800 ASSERT(un != NULL); 20801 ASSERT(!mutex_owned(SD_MUTEX(un))); 20802 20803 SD_TRACE(SD_LOG_IO, un, 20804 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20805 20806 /* 20807 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20808 * timeouts when they receive a TUR and the queue is not empty. Check 20809 * the configuration flag set during attach (indicating the drive has 20810 * this firmware bug) and un_ncmds_in_transport before issuing the 20811 * TUR. If there are 20812 * pending commands return success, this is a bit arbitrary but is ok 20813 * for non-removables (i.e. the eliteI disks) and non-clustering 20814 * configurations. 20815 */ 20816 if (un->un_f_cfg_tur_check == TRUE) { 20817 mutex_enter(SD_MUTEX(un)); 20818 if (un->un_ncmds_in_transport != 0) { 20819 mutex_exit(SD_MUTEX(un)); 20820 return (0); 20821 } 20822 mutex_exit(SD_MUTEX(un)); 20823 } 20824 20825 bzero(&cdb, sizeof (cdb)); 20826 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20827 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20828 20829 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20830 20831 ucmd_buf.uscsi_cdb = (char *)&cdb; 20832 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20833 ucmd_buf.uscsi_bufaddr = NULL; 20834 ucmd_buf.uscsi_buflen = 0; 20835 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20836 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20837 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20838 20839 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20840 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20841 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20842 } 20843 ucmd_buf.uscsi_timeout = 60; 20844 20845 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20846 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20847 SD_PATH_STANDARD)); 20848 20849 switch (status) { 20850 case 0: 20851 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20852 break; /* Success! */ 20853 case EIO: 20854 switch (ucmd_buf.uscsi_status) { 20855 case STATUS_RESERVATION_CONFLICT: 20856 status = EACCES; 20857 break; 20858 case STATUS_CHECK: 20859 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20860 break; 20861 } 20862 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20863 (scsi_sense_key((uint8_t *)&sense_buf) == 20864 KEY_NOT_READY) && 20865 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20866 status = ENXIO; 20867 } 20868 break; 20869 default: 20870 break; 20871 } 20872 break; 20873 default: 20874 break; 20875 } 20876 20877 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20878 20879 return (status); 20880 } 20881 20882 /* 20883 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20884 * 20885 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20886 * 20887 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20888 * structure for this target. 20889 * 20890 * Return Code: 0 - Success 20891 * EACCES 20892 * ENOTSUP 20893 * errno return code from sd_ssc_send() 20894 * 20895 * Context: Can sleep. Does not return until command is completed. 20896 */ 20897 20898 static int 20899 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20900 uint16_t data_len, uchar_t *data_bufp) 20901 { 20902 struct scsi_extended_sense sense_buf; 20903 union scsi_cdb cdb; 20904 struct uscsi_cmd ucmd_buf; 20905 int status; 20906 int no_caller_buf = FALSE; 20907 struct sd_lun *un; 20908 20909 ASSERT(ssc != NULL); 20910 un = ssc->ssc_un; 20911 ASSERT(un != NULL); 20912 ASSERT(!mutex_owned(SD_MUTEX(un))); 20913 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20914 20915 SD_TRACE(SD_LOG_IO, un, 20916 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20917 20918 bzero(&cdb, sizeof (cdb)); 20919 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20920 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20921 if (data_bufp == NULL) { 20922 /* Allocate a default buf if the caller did not give one */ 20923 ASSERT(data_len == 0); 20924 data_len = MHIOC_RESV_KEY_SIZE; 20925 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20926 no_caller_buf = TRUE; 20927 } 20928 20929 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20930 cdb.cdb_opaque[1] = usr_cmd; 20931 FORMG1COUNT(&cdb, data_len); 20932 20933 ucmd_buf.uscsi_cdb = (char *)&cdb; 20934 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20935 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20936 ucmd_buf.uscsi_buflen = data_len; 20937 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20938 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20939 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20940 ucmd_buf.uscsi_timeout = 60; 20941 20942 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20943 UIO_SYSSPACE, SD_PATH_STANDARD); 20944 20945 switch (status) { 20946 case 0: 20947 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20948 20949 break; /* Success! */ 20950 case EIO: 20951 switch (ucmd_buf.uscsi_status) { 20952 case STATUS_RESERVATION_CONFLICT: 20953 status = EACCES; 20954 break; 20955 case STATUS_CHECK: 20956 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20957 (scsi_sense_key((uint8_t *)&sense_buf) == 20958 KEY_ILLEGAL_REQUEST)) { 20959 status = ENOTSUP; 20960 } 20961 break; 20962 default: 20963 break; 20964 } 20965 break; 20966 default: 20967 break; 20968 } 20969 20970 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20971 20972 if (no_caller_buf == TRUE) { 20973 kmem_free(data_bufp, data_len); 20974 } 20975 20976 return (status); 20977 } 20978 20979 20980 /* 20981 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20982 * 20983 * Description: This routine is the driver entry point for handling CD-ROM 20984 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20985 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20986 * device. 20987 * 20988 * Arguments: ssc - ssc contains un - pointer to soft state struct 20989 * for the target. 20990 * usr_cmd SCSI-3 reservation facility command (one of 20991 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20992 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR) 20993 * usr_bufp - user provided pointer register, reserve descriptor or 20994 * preempt and abort structure (mhioc_register_t, 20995 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20996 * 20997 * Return Code: 0 - Success 20998 * EACCES 20999 * ENOTSUP 21000 * errno return code from sd_ssc_send() 21001 * 21002 * Context: Can sleep. Does not return until command is completed. 21003 */ 21004 21005 static int 21006 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 21007 uchar_t *usr_bufp) 21008 { 21009 struct scsi_extended_sense sense_buf; 21010 union scsi_cdb cdb; 21011 struct uscsi_cmd ucmd_buf; 21012 int status; 21013 uchar_t data_len = sizeof (sd_prout_t); 21014 sd_prout_t *prp; 21015 struct sd_lun *un; 21016 21017 ASSERT(ssc != NULL); 21018 un = ssc->ssc_un; 21019 ASSERT(un != NULL); 21020 ASSERT(!mutex_owned(SD_MUTEX(un))); 21021 ASSERT(data_len == 24); /* required by scsi spec */ 21022 21023 SD_TRACE(SD_LOG_IO, un, 21024 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 21025 21026 if (usr_bufp == NULL) { 21027 return (EINVAL); 21028 } 21029 21030 bzero(&cdb, sizeof (cdb)); 21031 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21032 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21033 prp = kmem_zalloc(data_len, KM_SLEEP); 21034 21035 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 21036 cdb.cdb_opaque[1] = usr_cmd; 21037 FORMG1COUNT(&cdb, data_len); 21038 21039 ucmd_buf.uscsi_cdb = (char *)&cdb; 21040 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21041 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 21042 ucmd_buf.uscsi_buflen = data_len; 21043 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21044 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21045 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21046 ucmd_buf.uscsi_timeout = 60; 21047 21048 switch (usr_cmd) { 21049 case SD_SCSI3_REGISTER: { 21050 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 21051 21052 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21053 bcopy(ptr->newkey.key, prp->service_key, 21054 MHIOC_RESV_KEY_SIZE); 21055 prp->aptpl = ptr->aptpl; 21056 break; 21057 } 21058 case SD_SCSI3_CLEAR: { 21059 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21060 21061 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21062 break; 21063 } 21064 case SD_SCSI3_RESERVE: 21065 case SD_SCSI3_RELEASE: { 21066 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 21067 21068 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21069 prp->scope_address = BE_32(ptr->scope_specific_addr); 21070 cdb.cdb_opaque[2] = ptr->type; 21071 break; 21072 } 21073 case SD_SCSI3_PREEMPTANDABORT: { 21074 mhioc_preemptandabort_t *ptr = 21075 (mhioc_preemptandabort_t *)usr_bufp; 21076 21077 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 21078 bcopy(ptr->victim_key.key, prp->service_key, 21079 MHIOC_RESV_KEY_SIZE); 21080 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 21081 cdb.cdb_opaque[2] = ptr->resvdesc.type; 21082 ucmd_buf.uscsi_flags |= USCSI_HEAD; 21083 break; 21084 } 21085 case SD_SCSI3_REGISTERANDIGNOREKEY: 21086 { 21087 mhioc_registerandignorekey_t *ptr; 21088 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 21089 bcopy(ptr->newkey.key, 21090 prp->service_key, MHIOC_RESV_KEY_SIZE); 21091 prp->aptpl = ptr->aptpl; 21092 break; 21093 } 21094 default: 21095 ASSERT(FALSE); 21096 break; 21097 } 21098 21099 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21100 UIO_SYSSPACE, SD_PATH_STANDARD); 21101 21102 switch (status) { 21103 case 0: 21104 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21105 break; /* Success! */ 21106 case EIO: 21107 switch (ucmd_buf.uscsi_status) { 21108 case STATUS_RESERVATION_CONFLICT: 21109 status = EACCES; 21110 break; 21111 case STATUS_CHECK: 21112 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21113 (scsi_sense_key((uint8_t *)&sense_buf) == 21114 KEY_ILLEGAL_REQUEST)) { 21115 status = ENOTSUP; 21116 } 21117 break; 21118 default: 21119 break; 21120 } 21121 break; 21122 default: 21123 break; 21124 } 21125 21126 kmem_free(prp, data_len); 21127 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 21128 return (status); 21129 } 21130 21131 21132 /* 21133 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 21134 * 21135 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 21136 * 21137 * Arguments: un - pointer to the target's soft state struct 21138 * dkc - pointer to the callback structure 21139 * 21140 * Return Code: 0 - success 21141 * errno-type error code 21142 * 21143 * Context: kernel thread context only. 21144 * 21145 * _______________________________________________________________ 21146 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 21147 * |FLUSH_VOLATILE| | operation | 21148 * |______________|______________|_________________________________| 21149 * | 0 | NULL | Synchronous flush on both | 21150 * | | | volatile and non-volatile cache | 21151 * |______________|______________|_________________________________| 21152 * | 1 | NULL | Synchronous flush on volatile | 21153 * | | | cache; disk drivers may suppress| 21154 * | | | flush if disk table indicates | 21155 * | | | non-volatile cache | 21156 * |______________|______________|_________________________________| 21157 * | 0 | !NULL | Asynchronous flush on both | 21158 * | | | volatile and non-volatile cache;| 21159 * |______________|______________|_________________________________| 21160 * | 1 | !NULL | Asynchronous flush on volatile | 21161 * | | | cache; disk drivers may suppress| 21162 * | | | flush if disk table indicates | 21163 * | | | non-volatile cache | 21164 * |______________|______________|_________________________________| 21165 * 21166 */ 21167 21168 static int 21169 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 21170 { 21171 struct sd_uscsi_info *uip; 21172 struct uscsi_cmd *uscmd; 21173 union scsi_cdb *cdb; 21174 struct buf *bp; 21175 int rval = 0; 21176 int is_async; 21177 21178 SD_TRACE(SD_LOG_IO, un, 21179 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 21180 21181 ASSERT(un != NULL); 21182 ASSERT(!mutex_owned(SD_MUTEX(un))); 21183 21184 if (dkc == NULL || dkc->dkc_callback == NULL) { 21185 is_async = FALSE; 21186 } else { 21187 is_async = TRUE; 21188 } 21189 21190 mutex_enter(SD_MUTEX(un)); 21191 /* check whether cache flush should be suppressed */ 21192 if (un->un_f_suppress_cache_flush == TRUE) { 21193 mutex_exit(SD_MUTEX(un)); 21194 /* 21195 * suppress the cache flush if the device is told to do 21196 * so by sd.conf or disk table 21197 */ 21198 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 21199 skip the cache flush since suppress_cache_flush is %d!\n", 21200 un->un_f_suppress_cache_flush); 21201 21202 if (is_async == TRUE) { 21203 /* invoke callback for asynchronous flush */ 21204 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 21205 } 21206 return (rval); 21207 } 21208 mutex_exit(SD_MUTEX(un)); 21209 21210 /* 21211 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 21212 * set properly 21213 */ 21214 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 21215 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 21216 21217 mutex_enter(SD_MUTEX(un)); 21218 if (dkc != NULL && un->un_f_sync_nv_supported && 21219 (dkc->dkc_flag & FLUSH_VOLATILE)) { 21220 /* 21221 * if the device supports SYNC_NV bit, turn on 21222 * the SYNC_NV bit to only flush volatile cache 21223 */ 21224 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 21225 } 21226 mutex_exit(SD_MUTEX(un)); 21227 21228 /* 21229 * First get some memory for the uscsi_cmd struct and cdb 21230 * and initialize for SYNCHRONIZE_CACHE cmd. 21231 */ 21232 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21233 uscmd->uscsi_cdblen = CDB_GROUP1; 21234 uscmd->uscsi_cdb = (caddr_t)cdb; 21235 uscmd->uscsi_bufaddr = NULL; 21236 uscmd->uscsi_buflen = 0; 21237 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21238 uscmd->uscsi_rqlen = SENSE_LENGTH; 21239 uscmd->uscsi_rqresid = SENSE_LENGTH; 21240 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 21241 uscmd->uscsi_timeout = sd_io_time; 21242 21243 /* 21244 * Allocate an sd_uscsi_info struct and fill it with the info 21245 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 21246 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 21247 * since we allocate the buf here in this function, we do not 21248 * need to preserve the prior contents of b_private. 21249 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 21250 */ 21251 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 21252 uip->ui_flags = SD_PATH_DIRECT; 21253 uip->ui_cmdp = uscmd; 21254 21255 bp = getrbuf(KM_SLEEP); 21256 bp->b_private = uip; 21257 21258 /* 21259 * Setup buffer to carry uscsi request. 21260 */ 21261 bp->b_flags = B_BUSY; 21262 bp->b_bcount = 0; 21263 bp->b_blkno = 0; 21264 21265 if (is_async == TRUE) { 21266 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 21267 uip->ui_dkc = *dkc; 21268 } 21269 21270 bp->b_edev = SD_GET_DEV(un); 21271 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 21272 21273 /* 21274 * Unset un_f_sync_cache_required flag 21275 */ 21276 mutex_enter(SD_MUTEX(un)); 21277 un->un_f_sync_cache_required = FALSE; 21278 mutex_exit(SD_MUTEX(un)); 21279 21280 (void) sd_uscsi_strategy(bp); 21281 21282 /* 21283 * If synchronous request, wait for completion 21284 * If async just return and let b_iodone callback 21285 * cleanup. 21286 * NOTE: On return, u_ncmds_in_driver will be decremented, 21287 * but it was also incremented in sd_uscsi_strategy(), so 21288 * we should be ok. 21289 */ 21290 if (is_async == FALSE) { 21291 (void) biowait(bp); 21292 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 21293 } 21294 21295 return (rval); 21296 } 21297 21298 21299 static int 21300 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 21301 { 21302 struct sd_uscsi_info *uip; 21303 struct uscsi_cmd *uscmd; 21304 uint8_t *sense_buf; 21305 struct sd_lun *un; 21306 int status; 21307 union scsi_cdb *cdb; 21308 21309 uip = (struct sd_uscsi_info *)(bp->b_private); 21310 ASSERT(uip != NULL); 21311 21312 uscmd = uip->ui_cmdp; 21313 ASSERT(uscmd != NULL); 21314 21315 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 21316 ASSERT(sense_buf != NULL); 21317 21318 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 21319 ASSERT(un != NULL); 21320 21321 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 21322 21323 status = geterror(bp); 21324 switch (status) { 21325 case 0: 21326 break; /* Success! */ 21327 case EIO: 21328 switch (uscmd->uscsi_status) { 21329 case STATUS_RESERVATION_CONFLICT: 21330 /* Ignore reservation conflict */ 21331 status = 0; 21332 goto done; 21333 21334 case STATUS_CHECK: 21335 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 21336 (scsi_sense_key(sense_buf) == 21337 KEY_ILLEGAL_REQUEST)) { 21338 /* Ignore Illegal Request error */ 21339 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 21340 mutex_enter(SD_MUTEX(un)); 21341 un->un_f_sync_nv_supported = FALSE; 21342 mutex_exit(SD_MUTEX(un)); 21343 status = 0; 21344 SD_TRACE(SD_LOG_IO, un, 21345 "un_f_sync_nv_supported \ 21346 is set to false.\n"); 21347 goto done; 21348 } 21349 21350 mutex_enter(SD_MUTEX(un)); 21351 un->un_f_sync_cache_supported = FALSE; 21352 mutex_exit(SD_MUTEX(un)); 21353 SD_TRACE(SD_LOG_IO, un, 21354 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 21355 un_f_sync_cache_supported set to false \ 21356 with asc = %x, ascq = %x\n", 21357 scsi_sense_asc(sense_buf), 21358 scsi_sense_ascq(sense_buf)); 21359 status = ENOTSUP; 21360 goto done; 21361 } 21362 break; 21363 default: 21364 break; 21365 } 21366 /* FALLTHRU */ 21367 default: 21368 /* 21369 * Turn on the un_f_sync_cache_required flag 21370 * since the SYNC CACHE command failed 21371 */ 21372 mutex_enter(SD_MUTEX(un)); 21373 un->un_f_sync_cache_required = TRUE; 21374 mutex_exit(SD_MUTEX(un)); 21375 21376 /* 21377 * Don't log an error message if this device 21378 * has removable media. 21379 */ 21380 if (!un->un_f_has_removable_media) { 21381 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 21382 "SYNCHRONIZE CACHE command failed (%d)\n", status); 21383 } 21384 break; 21385 } 21386 21387 done: 21388 if (uip->ui_dkc.dkc_callback != NULL) { 21389 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 21390 } 21391 21392 ASSERT((bp->b_flags & B_REMAPPED) == 0); 21393 freerbuf(bp); 21394 kmem_free(uip, sizeof (struct sd_uscsi_info)); 21395 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 21396 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 21397 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 21398 21399 return (status); 21400 } 21401 21402 21403 /* 21404 * Function: sd_send_scsi_GET_CONFIGURATION 21405 * 21406 * Description: Issues the get configuration command to the device. 21407 * Called from sd_check_for_writable_cd & sd_get_media_info 21408 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 21409 * Arguments: ssc 21410 * ucmdbuf 21411 * rqbuf 21412 * rqbuflen 21413 * bufaddr 21414 * buflen 21415 * path_flag 21416 * 21417 * Return Code: 0 - Success 21418 * errno return code from sd_ssc_send() 21419 * 21420 * Context: Can sleep. Does not return until command is completed. 21421 * 21422 */ 21423 21424 static int 21425 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21426 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21427 int path_flag) 21428 { 21429 char cdb[CDB_GROUP1]; 21430 int status; 21431 struct sd_lun *un; 21432 21433 ASSERT(ssc != NULL); 21434 un = ssc->ssc_un; 21435 ASSERT(un != NULL); 21436 ASSERT(!mutex_owned(SD_MUTEX(un))); 21437 ASSERT(bufaddr != NULL); 21438 ASSERT(ucmdbuf != NULL); 21439 ASSERT(rqbuf != NULL); 21440 21441 SD_TRACE(SD_LOG_IO, un, 21442 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21443 21444 bzero(cdb, sizeof (cdb)); 21445 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21446 bzero(rqbuf, rqbuflen); 21447 bzero(bufaddr, buflen); 21448 21449 /* 21450 * Set up cdb field for the get configuration command. 21451 */ 21452 cdb[0] = SCMD_GET_CONFIGURATION; 21453 cdb[1] = 0x02; /* Requested Type */ 21454 cdb[8] = SD_PROFILE_HEADER_LEN; 21455 ucmdbuf->uscsi_cdb = cdb; 21456 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21457 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21458 ucmdbuf->uscsi_buflen = buflen; 21459 ucmdbuf->uscsi_timeout = sd_io_time; 21460 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21461 ucmdbuf->uscsi_rqlen = rqbuflen; 21462 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21463 21464 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21465 UIO_SYSSPACE, path_flag); 21466 21467 switch (status) { 21468 case 0: 21469 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21470 break; /* Success! */ 21471 case EIO: 21472 switch (ucmdbuf->uscsi_status) { 21473 case STATUS_RESERVATION_CONFLICT: 21474 status = EACCES; 21475 break; 21476 default: 21477 break; 21478 } 21479 break; 21480 default: 21481 break; 21482 } 21483 21484 if (status == 0) { 21485 SD_DUMP_MEMORY(un, SD_LOG_IO, 21486 "sd_send_scsi_GET_CONFIGURATION: data", 21487 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21488 } 21489 21490 SD_TRACE(SD_LOG_IO, un, 21491 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21492 21493 return (status); 21494 } 21495 21496 /* 21497 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21498 * 21499 * Description: Issues the get configuration command to the device to 21500 * retrieve a specific feature. Called from 21501 * sd_check_for_writable_cd & sd_set_mmc_caps. 21502 * Arguments: ssc 21503 * ucmdbuf 21504 * rqbuf 21505 * rqbuflen 21506 * bufaddr 21507 * buflen 21508 * feature 21509 * 21510 * Return Code: 0 - Success 21511 * errno return code from sd_ssc_send() 21512 * 21513 * Context: Can sleep. Does not return until command is completed. 21514 * 21515 */ 21516 static int 21517 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21518 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21519 char feature, int path_flag) 21520 { 21521 char cdb[CDB_GROUP1]; 21522 int status; 21523 struct sd_lun *un; 21524 21525 ASSERT(ssc != NULL); 21526 un = ssc->ssc_un; 21527 ASSERT(un != NULL); 21528 ASSERT(!mutex_owned(SD_MUTEX(un))); 21529 ASSERT(bufaddr != NULL); 21530 ASSERT(ucmdbuf != NULL); 21531 ASSERT(rqbuf != NULL); 21532 21533 SD_TRACE(SD_LOG_IO, un, 21534 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21535 21536 bzero(cdb, sizeof (cdb)); 21537 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21538 bzero(rqbuf, rqbuflen); 21539 bzero(bufaddr, buflen); 21540 21541 /* 21542 * Set up cdb field for the get configuration command. 21543 */ 21544 cdb[0] = SCMD_GET_CONFIGURATION; 21545 cdb[1] = 0x02; /* Requested Type */ 21546 cdb[3] = feature; 21547 cdb[8] = buflen; 21548 ucmdbuf->uscsi_cdb = cdb; 21549 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21550 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21551 ucmdbuf->uscsi_buflen = buflen; 21552 ucmdbuf->uscsi_timeout = sd_io_time; 21553 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21554 ucmdbuf->uscsi_rqlen = rqbuflen; 21555 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21556 21557 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21558 UIO_SYSSPACE, path_flag); 21559 21560 switch (status) { 21561 case 0: 21562 21563 break; /* Success! */ 21564 case EIO: 21565 switch (ucmdbuf->uscsi_status) { 21566 case STATUS_RESERVATION_CONFLICT: 21567 status = EACCES; 21568 break; 21569 default: 21570 break; 21571 } 21572 break; 21573 default: 21574 break; 21575 } 21576 21577 if (status == 0) { 21578 SD_DUMP_MEMORY(un, SD_LOG_IO, 21579 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21580 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21581 } 21582 21583 SD_TRACE(SD_LOG_IO, un, 21584 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21585 21586 return (status); 21587 } 21588 21589 21590 /* 21591 * Function: sd_send_scsi_MODE_SENSE 21592 * 21593 * Description: Utility function for issuing a scsi MODE SENSE command. 21594 * Note: This routine uses a consistent implementation for Group0, 21595 * Group1, and Group2 commands across all platforms. ATAPI devices 21596 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21597 * 21598 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21599 * structure for this target. 21600 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21601 * CDB_GROUP[1|2] (10 byte). 21602 * bufaddr - buffer for page data retrieved from the target. 21603 * buflen - size of page to be retrieved. 21604 * page_code - page code of data to be retrieved from the target. 21605 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21606 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21607 * to use the USCSI "direct" chain and bypass the normal 21608 * command waitq. 21609 * 21610 * Return Code: 0 - Success 21611 * errno return code from sd_ssc_send() 21612 * 21613 * Context: Can sleep. Does not return until command is completed. 21614 */ 21615 21616 static int 21617 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21618 size_t buflen, uchar_t page_code, int path_flag) 21619 { 21620 struct scsi_extended_sense sense_buf; 21621 union scsi_cdb cdb; 21622 struct uscsi_cmd ucmd_buf; 21623 int status; 21624 int headlen; 21625 struct sd_lun *un; 21626 21627 ASSERT(ssc != NULL); 21628 un = ssc->ssc_un; 21629 ASSERT(un != NULL); 21630 ASSERT(!mutex_owned(SD_MUTEX(un))); 21631 ASSERT(bufaddr != NULL); 21632 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21633 (cdbsize == CDB_GROUP2)); 21634 21635 SD_TRACE(SD_LOG_IO, un, 21636 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21637 21638 bzero(&cdb, sizeof (cdb)); 21639 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21640 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21641 bzero(bufaddr, buflen); 21642 21643 if (cdbsize == CDB_GROUP0) { 21644 cdb.scc_cmd = SCMD_MODE_SENSE; 21645 cdb.cdb_opaque[2] = page_code; 21646 FORMG0COUNT(&cdb, buflen); 21647 headlen = MODE_HEADER_LENGTH; 21648 } else { 21649 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21650 cdb.cdb_opaque[2] = page_code; 21651 FORMG1COUNT(&cdb, buflen); 21652 headlen = MODE_HEADER_LENGTH_GRP2; 21653 } 21654 21655 ASSERT(headlen <= buflen); 21656 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21657 21658 ucmd_buf.uscsi_cdb = (char *)&cdb; 21659 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21660 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21661 ucmd_buf.uscsi_buflen = buflen; 21662 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21663 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21664 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21665 ucmd_buf.uscsi_timeout = 60; 21666 21667 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21668 UIO_SYSSPACE, path_flag); 21669 21670 switch (status) { 21671 case 0: 21672 /* 21673 * sr_check_wp() uses 0x3f page code and check the header of 21674 * mode page to determine if target device is write-protected. 21675 * But some USB devices return 0 bytes for 0x3f page code. For 21676 * this case, make sure that mode page header is returned at 21677 * least. 21678 */ 21679 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21680 status = EIO; 21681 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21682 "mode page header is not returned"); 21683 } 21684 break; /* Success! */ 21685 case EIO: 21686 switch (ucmd_buf.uscsi_status) { 21687 case STATUS_RESERVATION_CONFLICT: 21688 status = EACCES; 21689 break; 21690 default: 21691 break; 21692 } 21693 break; 21694 default: 21695 break; 21696 } 21697 21698 if (status == 0) { 21699 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21700 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21701 } 21702 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21703 21704 return (status); 21705 } 21706 21707 21708 /* 21709 * Function: sd_send_scsi_MODE_SELECT 21710 * 21711 * Description: Utility function for issuing a scsi MODE SELECT command. 21712 * Note: This routine uses a consistent implementation for Group0, 21713 * Group1, and Group2 commands across all platforms. ATAPI devices 21714 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21715 * 21716 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21717 * structure for this target. 21718 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21719 * CDB_GROUP[1|2] (10 byte). 21720 * bufaddr - buffer for page data retrieved from the target. 21721 * buflen - size of page to be retrieved. 21722 * save_page - boolean to determin if SP bit should be set. 21723 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21724 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21725 * to use the USCSI "direct" chain and bypass the normal 21726 * command waitq. 21727 * 21728 * Return Code: 0 - Success 21729 * errno return code from sd_ssc_send() 21730 * 21731 * Context: Can sleep. Does not return until command is completed. 21732 */ 21733 21734 static int 21735 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21736 size_t buflen, uchar_t save_page, int path_flag) 21737 { 21738 struct scsi_extended_sense sense_buf; 21739 union scsi_cdb cdb; 21740 struct uscsi_cmd ucmd_buf; 21741 int status; 21742 struct sd_lun *un; 21743 21744 ASSERT(ssc != NULL); 21745 un = ssc->ssc_un; 21746 ASSERT(un != NULL); 21747 ASSERT(!mutex_owned(SD_MUTEX(un))); 21748 ASSERT(bufaddr != NULL); 21749 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21750 (cdbsize == CDB_GROUP2)); 21751 21752 SD_TRACE(SD_LOG_IO, un, 21753 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21754 21755 bzero(&cdb, sizeof (cdb)); 21756 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21757 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21758 21759 /* Set the PF bit for many third party drives */ 21760 cdb.cdb_opaque[1] = 0x10; 21761 21762 /* Set the savepage(SP) bit if given */ 21763 if (save_page == SD_SAVE_PAGE) { 21764 cdb.cdb_opaque[1] |= 0x01; 21765 } 21766 21767 if (cdbsize == CDB_GROUP0) { 21768 cdb.scc_cmd = SCMD_MODE_SELECT; 21769 FORMG0COUNT(&cdb, buflen); 21770 } else { 21771 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21772 FORMG1COUNT(&cdb, buflen); 21773 } 21774 21775 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21776 21777 ucmd_buf.uscsi_cdb = (char *)&cdb; 21778 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21779 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21780 ucmd_buf.uscsi_buflen = buflen; 21781 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21782 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21783 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21784 ucmd_buf.uscsi_timeout = 60; 21785 21786 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21787 UIO_SYSSPACE, path_flag); 21788 21789 switch (status) { 21790 case 0: 21791 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21792 break; /* Success! */ 21793 case EIO: 21794 switch (ucmd_buf.uscsi_status) { 21795 case STATUS_RESERVATION_CONFLICT: 21796 status = EACCES; 21797 break; 21798 default: 21799 break; 21800 } 21801 break; 21802 default: 21803 break; 21804 } 21805 21806 if (status == 0) { 21807 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21808 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21809 } 21810 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21811 21812 return (status); 21813 } 21814 21815 21816 /* 21817 * Function: sd_send_scsi_RDWR 21818 * 21819 * Description: Issue a scsi READ or WRITE command with the given parameters. 21820 * 21821 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21822 * structure for this target. 21823 * cmd: SCMD_READ or SCMD_WRITE 21824 * bufaddr: Address of caller's buffer to receive the RDWR data 21825 * buflen: Length of caller's buffer receive the RDWR data. 21826 * start_block: Block number for the start of the RDWR operation. 21827 * (Assumes target-native block size.) 21828 * residp: Pointer to variable to receive the redisual of the 21829 * RDWR operation (may be NULL of no residual requested). 21830 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21831 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21832 * to use the USCSI "direct" chain and bypass the normal 21833 * command waitq. 21834 * 21835 * Return Code: 0 - Success 21836 * errno return code from sd_ssc_send() 21837 * 21838 * Context: Can sleep. Does not return until command is completed. 21839 */ 21840 21841 static int 21842 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21843 size_t buflen, daddr_t start_block, int path_flag) 21844 { 21845 struct scsi_extended_sense sense_buf; 21846 union scsi_cdb cdb; 21847 struct uscsi_cmd ucmd_buf; 21848 uint32_t block_count; 21849 int status; 21850 int cdbsize; 21851 uchar_t flag; 21852 struct sd_lun *un; 21853 21854 ASSERT(ssc != NULL); 21855 un = ssc->ssc_un; 21856 ASSERT(un != NULL); 21857 ASSERT(!mutex_owned(SD_MUTEX(un))); 21858 ASSERT(bufaddr != NULL); 21859 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21860 21861 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21862 21863 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21864 return (EINVAL); 21865 } 21866 21867 mutex_enter(SD_MUTEX(un)); 21868 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21869 mutex_exit(SD_MUTEX(un)); 21870 21871 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21872 21873 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21874 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21875 bufaddr, buflen, start_block, block_count); 21876 21877 bzero(&cdb, sizeof (cdb)); 21878 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21879 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21880 21881 /* Compute CDB size to use */ 21882 if (start_block > 0xffffffff) 21883 cdbsize = CDB_GROUP4; 21884 else if ((start_block & 0xFFE00000) || 21885 (un->un_f_cfg_is_atapi == TRUE)) 21886 cdbsize = CDB_GROUP1; 21887 else 21888 cdbsize = CDB_GROUP0; 21889 21890 switch (cdbsize) { 21891 case CDB_GROUP0: /* 6-byte CDBs */ 21892 cdb.scc_cmd = cmd; 21893 FORMG0ADDR(&cdb, start_block); 21894 FORMG0COUNT(&cdb, block_count); 21895 break; 21896 case CDB_GROUP1: /* 10-byte CDBs */ 21897 cdb.scc_cmd = cmd | SCMD_GROUP1; 21898 FORMG1ADDR(&cdb, start_block); 21899 FORMG1COUNT(&cdb, block_count); 21900 break; 21901 case CDB_GROUP4: /* 16-byte CDBs */ 21902 cdb.scc_cmd = cmd | SCMD_GROUP4; 21903 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21904 FORMG4COUNT(&cdb, block_count); 21905 break; 21906 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21907 default: 21908 /* All others reserved */ 21909 return (EINVAL); 21910 } 21911 21912 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21913 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21914 21915 ucmd_buf.uscsi_cdb = (char *)&cdb; 21916 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21917 ucmd_buf.uscsi_bufaddr = bufaddr; 21918 ucmd_buf.uscsi_buflen = buflen; 21919 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21920 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21921 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21922 ucmd_buf.uscsi_timeout = 60; 21923 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21924 UIO_SYSSPACE, path_flag); 21925 21926 switch (status) { 21927 case 0: 21928 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21929 break; /* Success! */ 21930 case EIO: 21931 switch (ucmd_buf.uscsi_status) { 21932 case STATUS_RESERVATION_CONFLICT: 21933 status = EACCES; 21934 break; 21935 default: 21936 break; 21937 } 21938 break; 21939 default: 21940 break; 21941 } 21942 21943 if (status == 0) { 21944 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21945 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21946 } 21947 21948 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21949 21950 return (status); 21951 } 21952 21953 21954 /* 21955 * Function: sd_send_scsi_LOG_SENSE 21956 * 21957 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21958 * 21959 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21960 * structure for this target. 21961 * 21962 * Return Code: 0 - Success 21963 * errno return code from sd_ssc_send() 21964 * 21965 * Context: Can sleep. Does not return until command is completed. 21966 */ 21967 21968 static int 21969 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21970 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, int path_flag) 21971 { 21972 struct scsi_extended_sense sense_buf; 21973 union scsi_cdb cdb; 21974 struct uscsi_cmd ucmd_buf; 21975 int status; 21976 struct sd_lun *un; 21977 21978 ASSERT(ssc != NULL); 21979 un = ssc->ssc_un; 21980 ASSERT(un != NULL); 21981 ASSERT(!mutex_owned(SD_MUTEX(un))); 21982 21983 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21984 21985 bzero(&cdb, sizeof (cdb)); 21986 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21987 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21988 21989 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21990 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21991 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21992 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21993 FORMG1COUNT(&cdb, buflen); 21994 21995 ucmd_buf.uscsi_cdb = (char *)&cdb; 21996 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21997 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21998 ucmd_buf.uscsi_buflen = buflen; 21999 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22000 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22001 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 22002 ucmd_buf.uscsi_timeout = 60; 22003 22004 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22005 UIO_SYSSPACE, path_flag); 22006 22007 switch (status) { 22008 case 0: 22009 break; 22010 case EIO: 22011 switch (ucmd_buf.uscsi_status) { 22012 case STATUS_RESERVATION_CONFLICT: 22013 status = EACCES; 22014 break; 22015 case STATUS_CHECK: 22016 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 22017 (scsi_sense_key((uint8_t *)&sense_buf) == 22018 KEY_ILLEGAL_REQUEST) && 22019 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 22020 /* 22021 * ASC 0x24: INVALID FIELD IN CDB 22022 */ 22023 switch (page_code) { 22024 case START_STOP_CYCLE_PAGE: 22025 /* 22026 * The start stop cycle counter is 22027 * implemented as page 0x31 in earlier 22028 * generation disks. In new generation 22029 * disks the start stop cycle counter is 22030 * implemented as page 0xE. To properly 22031 * handle this case if an attempt for 22032 * log page 0xE is made and fails we 22033 * will try again using page 0x31. 22034 * 22035 * Network storage BU committed to 22036 * maintain the page 0x31 for this 22037 * purpose and will not have any other 22038 * page implemented with page code 0x31 22039 * until all disks transition to the 22040 * standard page. 22041 */ 22042 mutex_enter(SD_MUTEX(un)); 22043 un->un_start_stop_cycle_page = 22044 START_STOP_CYCLE_VU_PAGE; 22045 cdb.cdb_opaque[2] = 22046 (char)(page_control << 6) | 22047 un->un_start_stop_cycle_page; 22048 mutex_exit(SD_MUTEX(un)); 22049 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22050 status = sd_ssc_send( 22051 ssc, &ucmd_buf, FKIOCTL, 22052 UIO_SYSSPACE, path_flag); 22053 22054 break; 22055 case TEMPERATURE_PAGE: 22056 status = ENOTTY; 22057 break; 22058 default: 22059 break; 22060 } 22061 } 22062 break; 22063 default: 22064 break; 22065 } 22066 break; 22067 default: 22068 break; 22069 } 22070 22071 if (status == 0) { 22072 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22073 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 22074 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22075 } 22076 22077 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 22078 22079 return (status); 22080 } 22081 22082 22083 /* 22084 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 22085 * 22086 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command. 22087 * 22088 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22089 * structure for this target. 22090 * bufaddr 22091 * buflen 22092 * class_req 22093 * 22094 * Return Code: 0 - Success 22095 * errno return code from sd_ssc_send() 22096 * 22097 * Context: Can sleep. Does not return until command is completed. 22098 */ 22099 22100 static int 22101 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr, 22102 size_t buflen, uchar_t class_req) 22103 { 22104 union scsi_cdb cdb; 22105 struct uscsi_cmd ucmd_buf; 22106 int status; 22107 struct sd_lun *un; 22108 22109 ASSERT(ssc != NULL); 22110 un = ssc->ssc_un; 22111 ASSERT(un != NULL); 22112 ASSERT(!mutex_owned(SD_MUTEX(un))); 22113 ASSERT(bufaddr != NULL); 22114 22115 SD_TRACE(SD_LOG_IO, un, 22116 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un); 22117 22118 bzero(&cdb, sizeof (cdb)); 22119 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22120 bzero(bufaddr, buflen); 22121 22122 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION; 22123 cdb.cdb_opaque[1] = 1; /* polled */ 22124 cdb.cdb_opaque[4] = class_req; 22125 FORMG1COUNT(&cdb, buflen); 22126 22127 ucmd_buf.uscsi_cdb = (char *)&cdb; 22128 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22129 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22130 ucmd_buf.uscsi_buflen = buflen; 22131 ucmd_buf.uscsi_rqbuf = NULL; 22132 ucmd_buf.uscsi_rqlen = 0; 22133 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 22134 ucmd_buf.uscsi_timeout = 60; 22135 22136 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22137 UIO_SYSSPACE, SD_PATH_DIRECT); 22138 22139 /* 22140 * Only handle status == 0, the upper-level caller 22141 * will put different assessment based on the context. 22142 */ 22143 if (status == 0) { 22144 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22145 22146 if (ucmd_buf.uscsi_resid != 0) { 22147 status = EIO; 22148 } 22149 } 22150 22151 SD_TRACE(SD_LOG_IO, un, 22152 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n"); 22153 22154 return (status); 22155 } 22156 22157 22158 static boolean_t 22159 sd_gesn_media_data_valid(uchar_t *data) 22160 { 22161 uint16_t len; 22162 22163 len = (data[1] << 8) | data[0]; 22164 return ((len >= 6) && 22165 ((data[2] & SD_GESN_HEADER_NEA) == 0) && 22166 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) && 22167 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0)); 22168 } 22169 22170 22171 /* 22172 * Function: sdioctl 22173 * 22174 * Description: Driver's ioctl(9e) entry point function. 22175 * 22176 * Arguments: dev - device number 22177 * cmd - ioctl operation to be performed 22178 * arg - user argument, contains data to be set or reference 22179 * parameter for get 22180 * flag - bit flag, indicating open settings, 32/64 bit type 22181 * cred_p - user credential pointer 22182 * rval_p - calling process return value (OPT) 22183 * 22184 * Return Code: EINVAL 22185 * ENOTTY 22186 * ENXIO 22187 * EIO 22188 * EFAULT 22189 * ENOTSUP 22190 * EPERM 22191 * 22192 * Context: Called from the device switch at normal priority. 22193 */ 22194 22195 static int 22196 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 22197 { 22198 struct sd_lun *un = NULL; 22199 int err = 0; 22200 int i = 0; 22201 cred_t *cr; 22202 int tmprval = EINVAL; 22203 boolean_t is_valid; 22204 sd_ssc_t *ssc; 22205 22206 /* 22207 * All device accesses go thru sdstrategy where we check on suspend 22208 * status 22209 */ 22210 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22211 return (ENXIO); 22212 } 22213 22214 ASSERT(!mutex_owned(SD_MUTEX(un))); 22215 22216 /* Initialize sd_ssc_t for internal uscsi commands */ 22217 ssc = sd_ssc_init(un); 22218 22219 is_valid = SD_IS_VALID_LABEL(un); 22220 22221 /* 22222 * Moved this wait from sd_uscsi_strategy to here for 22223 * reasons of deadlock prevention. Internal driver commands, 22224 * specifically those to change a devices power level, result 22225 * in a call to sd_uscsi_strategy. 22226 */ 22227 mutex_enter(SD_MUTEX(un)); 22228 while ((un->un_state == SD_STATE_SUSPENDED) || 22229 (un->un_state == SD_STATE_PM_CHANGING)) { 22230 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 22231 } 22232 /* 22233 * Twiddling the counter here protects commands from now 22234 * through to the top of sd_uscsi_strategy. Without the 22235 * counter inc. a power down, for example, could get in 22236 * after the above check for state is made and before 22237 * execution gets to the top of sd_uscsi_strategy. 22238 * That would cause problems. 22239 */ 22240 un->un_ncmds_in_driver++; 22241 22242 if (!is_valid && 22243 (flag & (FNDELAY | FNONBLOCK))) { 22244 switch (cmd) { 22245 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 22246 case DKIOCGVTOC: 22247 case DKIOCGEXTVTOC: 22248 case DKIOCGAPART: 22249 case DKIOCPARTINFO: 22250 case DKIOCEXTPARTINFO: 22251 case DKIOCSGEOM: 22252 case DKIOCSAPART: 22253 case DKIOCGETEFI: 22254 case DKIOCPARTITION: 22255 case DKIOCSVTOC: 22256 case DKIOCSEXTVTOC: 22257 case DKIOCSETEFI: 22258 case DKIOCGMBOOT: 22259 case DKIOCSMBOOT: 22260 case DKIOCG_PHYGEOM: 22261 case DKIOCG_VIRTGEOM: 22262 #if defined(__i386) || defined(__amd64) 22263 case DKIOCSETEXTPART: 22264 #endif 22265 /* let cmlb handle it */ 22266 goto skip_ready_valid; 22267 22268 case CDROMPAUSE: 22269 case CDROMRESUME: 22270 case CDROMPLAYMSF: 22271 case CDROMPLAYTRKIND: 22272 case CDROMREADTOCHDR: 22273 case CDROMREADTOCENTRY: 22274 case CDROMSTOP: 22275 case CDROMSTART: 22276 case CDROMVOLCTRL: 22277 case CDROMSUBCHNL: 22278 case CDROMREADMODE2: 22279 case CDROMREADMODE1: 22280 case CDROMREADOFFSET: 22281 case CDROMSBLKMODE: 22282 case CDROMGBLKMODE: 22283 case CDROMGDRVSPEED: 22284 case CDROMSDRVSPEED: 22285 case CDROMCDDA: 22286 case CDROMCDXA: 22287 case CDROMSUBCODE: 22288 if (!ISCD(un)) { 22289 un->un_ncmds_in_driver--; 22290 ASSERT(un->un_ncmds_in_driver >= 0); 22291 mutex_exit(SD_MUTEX(un)); 22292 err = ENOTTY; 22293 goto done_without_assess; 22294 } 22295 break; 22296 case FDEJECT: 22297 case DKIOCEJECT: 22298 case CDROMEJECT: 22299 if (!un->un_f_eject_media_supported) { 22300 un->un_ncmds_in_driver--; 22301 ASSERT(un->un_ncmds_in_driver >= 0); 22302 mutex_exit(SD_MUTEX(un)); 22303 err = ENOTTY; 22304 goto done_without_assess; 22305 } 22306 break; 22307 case DKIOCFLUSHWRITECACHE: 22308 mutex_exit(SD_MUTEX(un)); 22309 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22310 if (err != 0) { 22311 mutex_enter(SD_MUTEX(un)); 22312 un->un_ncmds_in_driver--; 22313 ASSERT(un->un_ncmds_in_driver >= 0); 22314 mutex_exit(SD_MUTEX(un)); 22315 err = EIO; 22316 goto done_quick_assess; 22317 } 22318 mutex_enter(SD_MUTEX(un)); 22319 /* FALLTHROUGH */ 22320 case DKIOCREMOVABLE: 22321 case DKIOCHOTPLUGGABLE: 22322 case DKIOCINFO: 22323 case DKIOCGMEDIAINFO: 22324 case DKIOCGMEDIAINFOEXT: 22325 case DKIOCSOLIDSTATE: 22326 case MHIOCENFAILFAST: 22327 case MHIOCSTATUS: 22328 case MHIOCTKOWN: 22329 case MHIOCRELEASE: 22330 case MHIOCGRP_INKEYS: 22331 case MHIOCGRP_INRESV: 22332 case MHIOCGRP_REGISTER: 22333 case MHIOCGRP_CLEAR: 22334 case MHIOCGRP_RESERVE: 22335 case MHIOCGRP_PREEMPTANDABORT: 22336 case MHIOCGRP_REGISTERANDIGNOREKEY: 22337 case CDROMCLOSETRAY: 22338 case USCSICMD: 22339 goto skip_ready_valid; 22340 default: 22341 break; 22342 } 22343 22344 mutex_exit(SD_MUTEX(un)); 22345 err = sd_ready_and_valid(ssc, SDPART(dev)); 22346 mutex_enter(SD_MUTEX(un)); 22347 22348 if (err != SD_READY_VALID) { 22349 switch (cmd) { 22350 case DKIOCSTATE: 22351 case CDROMGDRVSPEED: 22352 case CDROMSDRVSPEED: 22353 case FDEJECT: /* for eject command */ 22354 case DKIOCEJECT: 22355 case CDROMEJECT: 22356 case DKIOCREMOVABLE: 22357 case DKIOCHOTPLUGGABLE: 22358 break; 22359 default: 22360 if (un->un_f_has_removable_media) { 22361 err = ENXIO; 22362 } else { 22363 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 22364 if (err == SD_RESERVED_BY_OTHERS) { 22365 err = EACCES; 22366 } else { 22367 err = EIO; 22368 } 22369 } 22370 un->un_ncmds_in_driver--; 22371 ASSERT(un->un_ncmds_in_driver >= 0); 22372 mutex_exit(SD_MUTEX(un)); 22373 22374 goto done_without_assess; 22375 } 22376 } 22377 } 22378 22379 skip_ready_valid: 22380 mutex_exit(SD_MUTEX(un)); 22381 22382 switch (cmd) { 22383 case DKIOCINFO: 22384 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 22385 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 22386 break; 22387 22388 case DKIOCGMEDIAINFO: 22389 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 22390 err = sd_get_media_info(dev, (caddr_t)arg, flag); 22391 break; 22392 22393 case DKIOCGMEDIAINFOEXT: 22394 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 22395 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 22396 break; 22397 22398 case DKIOCGGEOM: 22399 case DKIOCGVTOC: 22400 case DKIOCGEXTVTOC: 22401 case DKIOCGAPART: 22402 case DKIOCPARTINFO: 22403 case DKIOCEXTPARTINFO: 22404 case DKIOCSGEOM: 22405 case DKIOCSAPART: 22406 case DKIOCGETEFI: 22407 case DKIOCPARTITION: 22408 case DKIOCSVTOC: 22409 case DKIOCSEXTVTOC: 22410 case DKIOCSETEFI: 22411 case DKIOCGMBOOT: 22412 case DKIOCSMBOOT: 22413 case DKIOCG_PHYGEOM: 22414 case DKIOCG_VIRTGEOM: 22415 #if defined(__i386) || defined(__amd64) 22416 case DKIOCSETEXTPART: 22417 #endif 22418 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 22419 22420 /* TUR should spin up */ 22421 22422 if (un->un_f_has_removable_media) 22423 err = sd_send_scsi_TEST_UNIT_READY(ssc, 22424 SD_CHECK_FOR_MEDIA); 22425 22426 else 22427 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22428 22429 if (err != 0) 22430 goto done_with_assess; 22431 22432 err = cmlb_ioctl(un->un_cmlbhandle, dev, 22433 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 22434 22435 if ((err == 0) && 22436 ((cmd == DKIOCSETEFI) || 22437 ((un->un_f_pkstats_enabled) && 22438 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 22439 cmd == DKIOCSEXTVTOC)))) { 22440 22441 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 22442 (void *)SD_PATH_DIRECT); 22443 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 22444 sd_set_pstats(un); 22445 SD_TRACE(SD_LOG_IO_PARTITION, un, 22446 "sd_ioctl: un:0x%p pstats created and " 22447 "set\n", un); 22448 } 22449 } 22450 22451 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 22452 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 22453 22454 mutex_enter(SD_MUTEX(un)); 22455 if (un->un_f_devid_supported && 22456 (un->un_f_opt_fab_devid == TRUE)) { 22457 if (un->un_devid == NULL) { 22458 sd_register_devid(ssc, SD_DEVINFO(un), 22459 SD_TARGET_IS_UNRESERVED); 22460 } else { 22461 /* 22462 * The device id for this disk 22463 * has been fabricated. The 22464 * device id must be preserved 22465 * by writing it back out to 22466 * disk. 22467 */ 22468 if (sd_write_deviceid(ssc) != 0) { 22469 ddi_devid_free(un->un_devid); 22470 un->un_devid = NULL; 22471 } 22472 } 22473 } 22474 mutex_exit(SD_MUTEX(un)); 22475 } 22476 22477 break; 22478 22479 case DKIOCLOCK: 22480 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 22481 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22482 SD_PATH_STANDARD); 22483 goto done_with_assess; 22484 22485 case DKIOCUNLOCK: 22486 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 22487 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 22488 SD_PATH_STANDARD); 22489 goto done_with_assess; 22490 22491 case DKIOCSTATE: { 22492 enum dkio_state state; 22493 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 22494 22495 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 22496 err = EFAULT; 22497 } else { 22498 err = sd_check_media(dev, state); 22499 if (err == 0) { 22500 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22501 sizeof (int), flag) != 0) 22502 err = EFAULT; 22503 } 22504 } 22505 break; 22506 } 22507 22508 case DKIOCREMOVABLE: 22509 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22510 i = un->un_f_has_removable_media ? 1 : 0; 22511 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22512 err = EFAULT; 22513 } else { 22514 err = 0; 22515 } 22516 break; 22517 22518 case DKIOCSOLIDSTATE: 22519 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n"); 22520 i = un->un_f_is_solid_state ? 1 : 0; 22521 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22522 err = EFAULT; 22523 } else { 22524 err = 0; 22525 } 22526 break; 22527 22528 case DKIOCHOTPLUGGABLE: 22529 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22530 i = un->un_f_is_hotpluggable ? 1 : 0; 22531 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22532 err = EFAULT; 22533 } else { 22534 err = 0; 22535 } 22536 break; 22537 22538 case DKIOCREADONLY: 22539 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n"); 22540 i = 0; 22541 if ((ISCD(un) && !un->un_f_mmc_writable_media) || 22542 (sr_check_wp(dev) != 0)) { 22543 i = 1; 22544 } 22545 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22546 err = EFAULT; 22547 } else { 22548 err = 0; 22549 } 22550 break; 22551 22552 case DKIOCGTEMPERATURE: 22553 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22554 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22555 break; 22556 22557 case MHIOCENFAILFAST: 22558 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22559 if ((err = drv_priv(cred_p)) == 0) { 22560 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22561 } 22562 break; 22563 22564 case MHIOCTKOWN: 22565 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22566 if ((err = drv_priv(cred_p)) == 0) { 22567 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22568 } 22569 break; 22570 22571 case MHIOCRELEASE: 22572 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22573 if ((err = drv_priv(cred_p)) == 0) { 22574 err = sd_mhdioc_release(dev); 22575 } 22576 break; 22577 22578 case MHIOCSTATUS: 22579 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22580 if ((err = drv_priv(cred_p)) == 0) { 22581 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22582 case 0: 22583 err = 0; 22584 break; 22585 case EACCES: 22586 *rval_p = 1; 22587 err = 0; 22588 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22589 break; 22590 default: 22591 err = EIO; 22592 goto done_with_assess; 22593 } 22594 } 22595 break; 22596 22597 case MHIOCQRESERVE: 22598 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22599 if ((err = drv_priv(cred_p)) == 0) { 22600 err = sd_reserve_release(dev, SD_RESERVE); 22601 } 22602 break; 22603 22604 case MHIOCREREGISTERDEVID: 22605 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22606 if (drv_priv(cred_p) == EPERM) { 22607 err = EPERM; 22608 } else if (!un->un_f_devid_supported) { 22609 err = ENOTTY; 22610 } else { 22611 err = sd_mhdioc_register_devid(dev); 22612 } 22613 break; 22614 22615 case MHIOCGRP_INKEYS: 22616 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22617 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22618 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22619 err = ENOTSUP; 22620 } else { 22621 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22622 flag); 22623 } 22624 } 22625 break; 22626 22627 case MHIOCGRP_INRESV: 22628 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22629 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22630 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22631 err = ENOTSUP; 22632 } else { 22633 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22634 } 22635 } 22636 break; 22637 22638 case MHIOCGRP_REGISTER: 22639 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22640 if ((err = drv_priv(cred_p)) != EPERM) { 22641 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22642 err = ENOTSUP; 22643 } else if (arg != NULL) { 22644 mhioc_register_t reg; 22645 if (ddi_copyin((void *)arg, ®, 22646 sizeof (mhioc_register_t), flag) != 0) { 22647 err = EFAULT; 22648 } else { 22649 err = 22650 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22651 ssc, SD_SCSI3_REGISTER, 22652 (uchar_t *)®); 22653 if (err != 0) 22654 goto done_with_assess; 22655 } 22656 } 22657 } 22658 break; 22659 22660 case MHIOCGRP_CLEAR: 22661 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n"); 22662 if ((err = drv_priv(cred_p)) != EPERM) { 22663 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22664 err = ENOTSUP; 22665 } else if (arg != NULL) { 22666 mhioc_register_t reg; 22667 if (ddi_copyin((void *)arg, ®, 22668 sizeof (mhioc_register_t), flag) != 0) { 22669 err = EFAULT; 22670 } else { 22671 err = 22672 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22673 ssc, SD_SCSI3_CLEAR, 22674 (uchar_t *)®); 22675 if (err != 0) 22676 goto done_with_assess; 22677 } 22678 } 22679 } 22680 break; 22681 22682 case MHIOCGRP_RESERVE: 22683 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22684 if ((err = drv_priv(cred_p)) != EPERM) { 22685 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22686 err = ENOTSUP; 22687 } else if (arg != NULL) { 22688 mhioc_resv_desc_t resv_desc; 22689 if (ddi_copyin((void *)arg, &resv_desc, 22690 sizeof (mhioc_resv_desc_t), flag) != 0) { 22691 err = EFAULT; 22692 } else { 22693 err = 22694 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22695 ssc, SD_SCSI3_RESERVE, 22696 (uchar_t *)&resv_desc); 22697 if (err != 0) 22698 goto done_with_assess; 22699 } 22700 } 22701 } 22702 break; 22703 22704 case MHIOCGRP_PREEMPTANDABORT: 22705 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22706 if ((err = drv_priv(cred_p)) != EPERM) { 22707 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22708 err = ENOTSUP; 22709 } else if (arg != NULL) { 22710 mhioc_preemptandabort_t preempt_abort; 22711 if (ddi_copyin((void *)arg, &preempt_abort, 22712 sizeof (mhioc_preemptandabort_t), 22713 flag) != 0) { 22714 err = EFAULT; 22715 } else { 22716 err = 22717 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22718 ssc, SD_SCSI3_PREEMPTANDABORT, 22719 (uchar_t *)&preempt_abort); 22720 if (err != 0) 22721 goto done_with_assess; 22722 } 22723 } 22724 } 22725 break; 22726 22727 case MHIOCGRP_REGISTERANDIGNOREKEY: 22728 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22729 if ((err = drv_priv(cred_p)) != EPERM) { 22730 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22731 err = ENOTSUP; 22732 } else if (arg != NULL) { 22733 mhioc_registerandignorekey_t r_and_i; 22734 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22735 sizeof (mhioc_registerandignorekey_t), 22736 flag) != 0) { 22737 err = EFAULT; 22738 } else { 22739 err = 22740 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22741 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22742 (uchar_t *)&r_and_i); 22743 if (err != 0) 22744 goto done_with_assess; 22745 } 22746 } 22747 } 22748 break; 22749 22750 case USCSICMD: 22751 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22752 cr = ddi_get_cred(); 22753 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22754 err = EPERM; 22755 } else { 22756 enum uio_seg uioseg; 22757 22758 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22759 UIO_USERSPACE; 22760 if (un->un_f_format_in_progress == TRUE) { 22761 err = EAGAIN; 22762 break; 22763 } 22764 22765 err = sd_ssc_send(ssc, 22766 (struct uscsi_cmd *)arg, 22767 flag, uioseg, SD_PATH_STANDARD); 22768 if (err != 0) 22769 goto done_with_assess; 22770 else 22771 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22772 } 22773 break; 22774 22775 case CDROMPAUSE: 22776 case CDROMRESUME: 22777 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22778 if (!ISCD(un)) { 22779 err = ENOTTY; 22780 } else { 22781 err = sr_pause_resume(dev, cmd); 22782 } 22783 break; 22784 22785 case CDROMPLAYMSF: 22786 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22787 if (!ISCD(un)) { 22788 err = ENOTTY; 22789 } else { 22790 err = sr_play_msf(dev, (caddr_t)arg, flag); 22791 } 22792 break; 22793 22794 case CDROMPLAYTRKIND: 22795 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22796 #if defined(__i386) || defined(__amd64) 22797 /* 22798 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22799 */ 22800 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22801 #else 22802 if (!ISCD(un)) { 22803 #endif 22804 err = ENOTTY; 22805 } else { 22806 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22807 } 22808 break; 22809 22810 case CDROMREADTOCHDR: 22811 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22812 if (!ISCD(un)) { 22813 err = ENOTTY; 22814 } else { 22815 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22816 } 22817 break; 22818 22819 case CDROMREADTOCENTRY: 22820 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22821 if (!ISCD(un)) { 22822 err = ENOTTY; 22823 } else { 22824 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22825 } 22826 break; 22827 22828 case CDROMSTOP: 22829 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22830 if (!ISCD(un)) { 22831 err = ENOTTY; 22832 } else { 22833 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22834 SD_TARGET_STOP, SD_PATH_STANDARD); 22835 goto done_with_assess; 22836 } 22837 break; 22838 22839 case CDROMSTART: 22840 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22841 if (!ISCD(un)) { 22842 err = ENOTTY; 22843 } else { 22844 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22845 SD_TARGET_START, SD_PATH_STANDARD); 22846 goto done_with_assess; 22847 } 22848 break; 22849 22850 case CDROMCLOSETRAY: 22851 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22852 if (!ISCD(un)) { 22853 err = ENOTTY; 22854 } else { 22855 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22856 SD_TARGET_CLOSE, SD_PATH_STANDARD); 22857 goto done_with_assess; 22858 } 22859 break; 22860 22861 case FDEJECT: /* for eject command */ 22862 case DKIOCEJECT: 22863 case CDROMEJECT: 22864 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22865 if (!un->un_f_eject_media_supported) { 22866 err = ENOTTY; 22867 } else { 22868 err = sr_eject(dev); 22869 } 22870 break; 22871 22872 case CDROMVOLCTRL: 22873 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22874 if (!ISCD(un)) { 22875 err = ENOTTY; 22876 } else { 22877 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22878 } 22879 break; 22880 22881 case CDROMSUBCHNL: 22882 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22883 if (!ISCD(un)) { 22884 err = ENOTTY; 22885 } else { 22886 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22887 } 22888 break; 22889 22890 case CDROMREADMODE2: 22891 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22892 if (!ISCD(un)) { 22893 err = ENOTTY; 22894 } else if (un->un_f_cfg_is_atapi == TRUE) { 22895 /* 22896 * If the drive supports READ CD, use that instead of 22897 * switching the LBA size via a MODE SELECT 22898 * Block Descriptor 22899 */ 22900 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22901 } else { 22902 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22903 } 22904 break; 22905 22906 case CDROMREADMODE1: 22907 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22908 if (!ISCD(un)) { 22909 err = ENOTTY; 22910 } else { 22911 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22912 } 22913 break; 22914 22915 case CDROMREADOFFSET: 22916 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22917 if (!ISCD(un)) { 22918 err = ENOTTY; 22919 } else { 22920 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22921 flag); 22922 } 22923 break; 22924 22925 case CDROMSBLKMODE: 22926 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22927 /* 22928 * There is no means of changing block size in case of atapi 22929 * drives, thus return ENOTTY if drive type is atapi 22930 */ 22931 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22932 err = ENOTTY; 22933 } else if (un->un_f_mmc_cap == TRUE) { 22934 22935 /* 22936 * MMC Devices do not support changing the 22937 * logical block size 22938 * 22939 * Note: EINVAL is being returned instead of ENOTTY to 22940 * maintain consistancy with the original mmc 22941 * driver update. 22942 */ 22943 err = EINVAL; 22944 } else { 22945 mutex_enter(SD_MUTEX(un)); 22946 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22947 (un->un_ncmds_in_transport > 0)) { 22948 mutex_exit(SD_MUTEX(un)); 22949 err = EINVAL; 22950 } else { 22951 mutex_exit(SD_MUTEX(un)); 22952 err = sr_change_blkmode(dev, cmd, arg, flag); 22953 } 22954 } 22955 break; 22956 22957 case CDROMGBLKMODE: 22958 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22959 if (!ISCD(un)) { 22960 err = ENOTTY; 22961 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22962 (un->un_f_blockcount_is_valid != FALSE)) { 22963 /* 22964 * Drive is an ATAPI drive so return target block 22965 * size for ATAPI drives since we cannot change the 22966 * blocksize on ATAPI drives. Used primarily to detect 22967 * if an ATAPI cdrom is present. 22968 */ 22969 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22970 sizeof (int), flag) != 0) { 22971 err = EFAULT; 22972 } else { 22973 err = 0; 22974 } 22975 22976 } else { 22977 /* 22978 * Drive supports changing block sizes via a Mode 22979 * Select. 22980 */ 22981 err = sr_change_blkmode(dev, cmd, arg, flag); 22982 } 22983 break; 22984 22985 case CDROMGDRVSPEED: 22986 case CDROMSDRVSPEED: 22987 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22988 if (!ISCD(un)) { 22989 err = ENOTTY; 22990 } else if (un->un_f_mmc_cap == TRUE) { 22991 /* 22992 * Note: In the future the driver implementation 22993 * for getting and 22994 * setting cd speed should entail: 22995 * 1) If non-mmc try the Toshiba mode page 22996 * (sr_change_speed) 22997 * 2) If mmc but no support for Real Time Streaming try 22998 * the SET CD SPEED (0xBB) command 22999 * (sr_atapi_change_speed) 23000 * 3) If mmc and support for Real Time Streaming 23001 * try the GET PERFORMANCE and SET STREAMING 23002 * commands (not yet implemented, 4380808) 23003 */ 23004 /* 23005 * As per recent MMC spec, CD-ROM speed is variable 23006 * and changes with LBA. Since there is no such 23007 * things as drive speed now, fail this ioctl. 23008 * 23009 * Note: EINVAL is returned for consistancy of original 23010 * implementation which included support for getting 23011 * the drive speed of mmc devices but not setting 23012 * the drive speed. Thus EINVAL would be returned 23013 * if a set request was made for an mmc device. 23014 * We no longer support get or set speed for 23015 * mmc but need to remain consistent with regard 23016 * to the error code returned. 23017 */ 23018 err = EINVAL; 23019 } else if (un->un_f_cfg_is_atapi == TRUE) { 23020 err = sr_atapi_change_speed(dev, cmd, arg, flag); 23021 } else { 23022 err = sr_change_speed(dev, cmd, arg, flag); 23023 } 23024 break; 23025 23026 case CDROMCDDA: 23027 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 23028 if (!ISCD(un)) { 23029 err = ENOTTY; 23030 } else { 23031 err = sr_read_cdda(dev, (void *)arg, flag); 23032 } 23033 break; 23034 23035 case CDROMCDXA: 23036 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 23037 if (!ISCD(un)) { 23038 err = ENOTTY; 23039 } else { 23040 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 23041 } 23042 break; 23043 23044 case CDROMSUBCODE: 23045 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 23046 if (!ISCD(un)) { 23047 err = ENOTTY; 23048 } else { 23049 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 23050 } 23051 break; 23052 23053 23054 #ifdef SDDEBUG 23055 /* RESET/ABORTS testing ioctls */ 23056 case DKIOCRESET: { 23057 int reset_level; 23058 23059 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 23060 err = EFAULT; 23061 } else { 23062 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 23063 "reset_level = 0x%lx\n", reset_level); 23064 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 23065 err = 0; 23066 } else { 23067 err = EIO; 23068 } 23069 } 23070 break; 23071 } 23072 23073 case DKIOCABORT: 23074 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 23075 if (scsi_abort(SD_ADDRESS(un), NULL)) { 23076 err = 0; 23077 } else { 23078 err = EIO; 23079 } 23080 break; 23081 #endif 23082 23083 #ifdef SD_FAULT_INJECTION 23084 /* SDIOC FaultInjection testing ioctls */ 23085 case SDIOCSTART: 23086 case SDIOCSTOP: 23087 case SDIOCINSERTPKT: 23088 case SDIOCINSERTXB: 23089 case SDIOCINSERTUN: 23090 case SDIOCINSERTARQ: 23091 case SDIOCPUSH: 23092 case SDIOCRETRIEVE: 23093 case SDIOCRUN: 23094 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 23095 "SDIOC detected cmd:0x%X:\n", cmd); 23096 /* call error generator */ 23097 sd_faultinjection_ioctl(cmd, arg, un); 23098 err = 0; 23099 break; 23100 23101 #endif /* SD_FAULT_INJECTION */ 23102 23103 case DKIOCFLUSHWRITECACHE: 23104 { 23105 struct dk_callback *dkc = (struct dk_callback *)arg; 23106 23107 mutex_enter(SD_MUTEX(un)); 23108 if (!un->un_f_sync_cache_supported || 23109 !un->un_f_write_cache_enabled) { 23110 err = un->un_f_sync_cache_supported ? 23111 0 : ENOTSUP; 23112 mutex_exit(SD_MUTEX(un)); 23113 if ((flag & FKIOCTL) && dkc != NULL && 23114 dkc->dkc_callback != NULL) { 23115 (*dkc->dkc_callback)(dkc->dkc_cookie, 23116 err); 23117 /* 23118 * Did callback and reported error. 23119 * Since we did a callback, ioctl 23120 * should return 0. 23121 */ 23122 err = 0; 23123 } 23124 break; 23125 } 23126 mutex_exit(SD_MUTEX(un)); 23127 23128 if ((flag & FKIOCTL) && dkc != NULL && 23129 dkc->dkc_callback != NULL) { 23130 /* async SYNC CACHE request */ 23131 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 23132 } else { 23133 /* synchronous SYNC CACHE request */ 23134 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 23135 } 23136 } 23137 break; 23138 23139 case DKIOCGETWCE: { 23140 23141 int wce; 23142 23143 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 23144 break; 23145 } 23146 23147 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 23148 err = EFAULT; 23149 } 23150 break; 23151 } 23152 23153 case DKIOCSETWCE: { 23154 23155 int wce, sync_supported; 23156 int cur_wce = 0; 23157 23158 if (!un->un_f_cache_mode_changeable) { 23159 err = EINVAL; 23160 break; 23161 } 23162 23163 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 23164 err = EFAULT; 23165 break; 23166 } 23167 23168 /* 23169 * Synchronize multiple threads trying to enable 23170 * or disable the cache via the un_f_wcc_cv 23171 * condition variable. 23172 */ 23173 mutex_enter(SD_MUTEX(un)); 23174 23175 /* 23176 * Don't allow the cache to be enabled if the 23177 * config file has it disabled. 23178 */ 23179 if (un->un_f_opt_disable_cache && wce) { 23180 mutex_exit(SD_MUTEX(un)); 23181 err = EINVAL; 23182 break; 23183 } 23184 23185 /* 23186 * Wait for write cache change in progress 23187 * bit to be clear before proceeding. 23188 */ 23189 while (un->un_f_wcc_inprog) 23190 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 23191 23192 un->un_f_wcc_inprog = 1; 23193 23194 mutex_exit(SD_MUTEX(un)); 23195 23196 /* 23197 * Get the current write cache state 23198 */ 23199 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) { 23200 mutex_enter(SD_MUTEX(un)); 23201 un->un_f_wcc_inprog = 0; 23202 cv_broadcast(&un->un_wcc_cv); 23203 mutex_exit(SD_MUTEX(un)); 23204 break; 23205 } 23206 23207 mutex_enter(SD_MUTEX(un)); 23208 un->un_f_write_cache_enabled = (cur_wce != 0); 23209 23210 if (un->un_f_write_cache_enabled && wce == 0) { 23211 /* 23212 * Disable the write cache. Don't clear 23213 * un_f_write_cache_enabled until after 23214 * the mode select and flush are complete. 23215 */ 23216 sync_supported = un->un_f_sync_cache_supported; 23217 23218 /* 23219 * If cache flush is suppressed, we assume that the 23220 * controller firmware will take care of managing the 23221 * write cache for us: no need to explicitly 23222 * disable it. 23223 */ 23224 if (!un->un_f_suppress_cache_flush) { 23225 mutex_exit(SD_MUTEX(un)); 23226 if ((err = sd_cache_control(ssc, 23227 SD_CACHE_NOCHANGE, 23228 SD_CACHE_DISABLE)) == 0 && 23229 sync_supported) { 23230 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 23231 NULL); 23232 } 23233 } else { 23234 mutex_exit(SD_MUTEX(un)); 23235 } 23236 23237 mutex_enter(SD_MUTEX(un)); 23238 if (err == 0) { 23239 un->un_f_write_cache_enabled = 0; 23240 } 23241 23242 } else if (!un->un_f_write_cache_enabled && wce != 0) { 23243 /* 23244 * Set un_f_write_cache_enabled first, so there is 23245 * no window where the cache is enabled, but the 23246 * bit says it isn't. 23247 */ 23248 un->un_f_write_cache_enabled = 1; 23249 23250 /* 23251 * If cache flush is suppressed, we assume that the 23252 * controller firmware will take care of managing the 23253 * write cache for us: no need to explicitly 23254 * enable it. 23255 */ 23256 if (!un->un_f_suppress_cache_flush) { 23257 mutex_exit(SD_MUTEX(un)); 23258 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 23259 SD_CACHE_ENABLE); 23260 } else { 23261 mutex_exit(SD_MUTEX(un)); 23262 } 23263 23264 mutex_enter(SD_MUTEX(un)); 23265 23266 if (err) { 23267 un->un_f_write_cache_enabled = 0; 23268 } 23269 } 23270 23271 un->un_f_wcc_inprog = 0; 23272 cv_broadcast(&un->un_wcc_cv); 23273 mutex_exit(SD_MUTEX(un)); 23274 break; 23275 } 23276 23277 default: 23278 err = ENOTTY; 23279 break; 23280 } 23281 mutex_enter(SD_MUTEX(un)); 23282 un->un_ncmds_in_driver--; 23283 ASSERT(un->un_ncmds_in_driver >= 0); 23284 mutex_exit(SD_MUTEX(un)); 23285 23286 23287 done_without_assess: 23288 sd_ssc_fini(ssc); 23289 23290 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23291 return (err); 23292 23293 done_with_assess: 23294 mutex_enter(SD_MUTEX(un)); 23295 un->un_ncmds_in_driver--; 23296 ASSERT(un->un_ncmds_in_driver >= 0); 23297 mutex_exit(SD_MUTEX(un)); 23298 23299 done_quick_assess: 23300 if (err != 0) 23301 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23302 /* Uninitialize sd_ssc_t pointer */ 23303 sd_ssc_fini(ssc); 23304 23305 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23306 return (err); 23307 } 23308 23309 23310 /* 23311 * Function: sd_dkio_ctrl_info 23312 * 23313 * Description: This routine is the driver entry point for handling controller 23314 * information ioctl requests (DKIOCINFO). 23315 * 23316 * Arguments: dev - the device number 23317 * arg - pointer to user provided dk_cinfo structure 23318 * specifying the controller type and attributes. 23319 * flag - this argument is a pass through to ddi_copyxxx() 23320 * directly from the mode argument of ioctl(). 23321 * 23322 * Return Code: 0 23323 * EFAULT 23324 * ENXIO 23325 */ 23326 23327 static int 23328 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 23329 { 23330 struct sd_lun *un = NULL; 23331 struct dk_cinfo *info; 23332 dev_info_t *pdip; 23333 int lun, tgt; 23334 23335 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23336 return (ENXIO); 23337 } 23338 23339 info = (struct dk_cinfo *) 23340 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 23341 23342 switch (un->un_ctype) { 23343 case CTYPE_CDROM: 23344 info->dki_ctype = DKC_CDROM; 23345 break; 23346 default: 23347 info->dki_ctype = DKC_SCSI_CCS; 23348 break; 23349 } 23350 pdip = ddi_get_parent(SD_DEVINFO(un)); 23351 info->dki_cnum = ddi_get_instance(pdip); 23352 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 23353 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 23354 } else { 23355 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 23356 DK_DEVLEN - 1); 23357 } 23358 23359 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23360 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 23361 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23362 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 23363 23364 /* Unit Information */ 23365 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 23366 info->dki_slave = ((tgt << 3) | lun); 23367 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 23368 DK_DEVLEN - 1); 23369 info->dki_flags = DKI_FMTVOL; 23370 info->dki_partition = SDPART(dev); 23371 23372 /* Max Transfer size of this device in blocks */ 23373 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 23374 info->dki_addr = 0; 23375 info->dki_space = 0; 23376 info->dki_prio = 0; 23377 info->dki_vec = 0; 23378 23379 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 23380 kmem_free(info, sizeof (struct dk_cinfo)); 23381 return (EFAULT); 23382 } else { 23383 kmem_free(info, sizeof (struct dk_cinfo)); 23384 return (0); 23385 } 23386 } 23387 23388 /* 23389 * Function: sd_get_media_info_com 23390 * 23391 * Description: This routine returns the information required to populate 23392 * the fields for the dk_minfo/dk_minfo_ext structures. 23393 * 23394 * Arguments: dev - the device number 23395 * dki_media_type - media_type 23396 * dki_lbsize - logical block size 23397 * dki_capacity - capacity in blocks 23398 * dki_pbsize - physical block size (if requested) 23399 * 23400 * Return Code: 0 23401 * EACCESS 23402 * EFAULT 23403 * ENXIO 23404 * EIO 23405 */ 23406 static int 23407 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize, 23408 diskaddr_t *dki_capacity, uint_t *dki_pbsize) 23409 { 23410 struct sd_lun *un = NULL; 23411 struct uscsi_cmd com; 23412 struct scsi_inquiry *sinq; 23413 u_longlong_t media_capacity; 23414 uint64_t capacity; 23415 uint_t lbasize; 23416 uint_t pbsize; 23417 uchar_t *out_data; 23418 uchar_t *rqbuf; 23419 int rval = 0; 23420 int rtn; 23421 sd_ssc_t *ssc; 23422 23423 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23424 (un->un_state == SD_STATE_OFFLINE)) { 23425 return (ENXIO); 23426 } 23427 23428 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n"); 23429 23430 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23431 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23432 ssc = sd_ssc_init(un); 23433 23434 /* Issue a TUR to determine if the drive is ready with media present */ 23435 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23436 if (rval == ENXIO) { 23437 goto done; 23438 } else if (rval != 0) { 23439 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23440 } 23441 23442 /* Now get configuration data */ 23443 if (ISCD(un)) { 23444 *dki_media_type = DK_CDROM; 23445 23446 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23447 if (un->un_f_mmc_cap == TRUE) { 23448 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23449 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23450 SD_PATH_STANDARD); 23451 23452 if (rtn) { 23453 /* 23454 * We ignore all failures for CD and need to 23455 * put the assessment before processing code 23456 * to avoid missing assessment for FMA. 23457 */ 23458 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23459 /* 23460 * Failed for other than an illegal request 23461 * or command not supported 23462 */ 23463 if ((com.uscsi_status == STATUS_CHECK) && 23464 (com.uscsi_rqstatus == STATUS_GOOD)) { 23465 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23466 (rqbuf[12] != 0x20)) { 23467 rval = EIO; 23468 goto no_assessment; 23469 } 23470 } 23471 } else { 23472 /* 23473 * The GET CONFIGURATION command succeeded 23474 * so set the media type according to the 23475 * returned data 23476 */ 23477 *dki_media_type = out_data[6]; 23478 *dki_media_type <<= 8; 23479 *dki_media_type |= out_data[7]; 23480 } 23481 } 23482 } else { 23483 /* 23484 * The profile list is not available, so we attempt to identify 23485 * the media type based on the inquiry data 23486 */ 23487 sinq = un->un_sd->sd_inq; 23488 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23489 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23490 /* This is a direct access device or optical disk */ 23491 *dki_media_type = DK_FIXED_DISK; 23492 23493 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23494 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23495 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23496 *dki_media_type = DK_ZIP; 23497 } else if ( 23498 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23499 *dki_media_type = DK_JAZ; 23500 } 23501 } 23502 } else { 23503 /* 23504 * Not a CD, direct access or optical disk so return 23505 * unknown media 23506 */ 23507 *dki_media_type = DK_UNKNOWN; 23508 } 23509 } 23510 23511 /* 23512 * Now read the capacity so we can provide the lbasize, 23513 * pbsize and capacity. 23514 */ 23515 if (dki_pbsize && un->un_f_descr_format_supported) { 23516 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 23517 &pbsize, SD_PATH_DIRECT); 23518 23519 /* 23520 * Override the physical blocksize if the instance already 23521 * has a larger value. 23522 */ 23523 pbsize = MAX(pbsize, un->un_phy_blocksize); 23524 } 23525 23526 if (dki_pbsize == NULL || rval != 0 || 23527 !un->un_f_descr_format_supported) { 23528 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23529 SD_PATH_DIRECT); 23530 23531 switch (rval) { 23532 case 0: 23533 if (un->un_f_enable_rmw && 23534 un->un_phy_blocksize != 0) { 23535 pbsize = un->un_phy_blocksize; 23536 } else { 23537 pbsize = lbasize; 23538 } 23539 media_capacity = capacity; 23540 23541 /* 23542 * sd_send_scsi_READ_CAPACITY() reports capacity in 23543 * un->un_sys_blocksize chunks. So we need to convert 23544 * it into cap.lbsize chunks. 23545 */ 23546 if (un->un_f_has_removable_media) { 23547 media_capacity *= un->un_sys_blocksize; 23548 media_capacity /= lbasize; 23549 } 23550 break; 23551 case EACCES: 23552 rval = EACCES; 23553 goto done; 23554 default: 23555 rval = EIO; 23556 goto done; 23557 } 23558 } else { 23559 if (un->un_f_enable_rmw && 23560 !ISP2(pbsize % DEV_BSIZE)) { 23561 pbsize = SSD_SECSIZE; 23562 } else if (!ISP2(lbasize % DEV_BSIZE) || 23563 !ISP2(pbsize % DEV_BSIZE)) { 23564 pbsize = lbasize = DEV_BSIZE; 23565 } 23566 media_capacity = capacity; 23567 } 23568 23569 /* 23570 * If lun is expanded dynamically, update the un structure. 23571 */ 23572 mutex_enter(SD_MUTEX(un)); 23573 if ((un->un_f_blockcount_is_valid == TRUE) && 23574 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23575 (capacity > un->un_blockcount)) { 23576 un->un_f_expnevent = B_FALSE; 23577 sd_update_block_info(un, lbasize, capacity); 23578 } 23579 mutex_exit(SD_MUTEX(un)); 23580 23581 *dki_lbsize = lbasize; 23582 *dki_capacity = media_capacity; 23583 if (dki_pbsize) 23584 *dki_pbsize = pbsize; 23585 23586 done: 23587 if (rval != 0) { 23588 if (rval == EIO) 23589 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23590 else 23591 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23592 } 23593 no_assessment: 23594 sd_ssc_fini(ssc); 23595 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23596 kmem_free(rqbuf, SENSE_LENGTH); 23597 return (rval); 23598 } 23599 23600 /* 23601 * Function: sd_get_media_info 23602 * 23603 * Description: This routine is the driver entry point for handling ioctl 23604 * requests for the media type or command set profile used by the 23605 * drive to operate on the media (DKIOCGMEDIAINFO). 23606 * 23607 * Arguments: dev - the device number 23608 * arg - pointer to user provided dk_minfo structure 23609 * specifying the media type, logical block size and 23610 * drive capacity. 23611 * flag - this argument is a pass through to ddi_copyxxx() 23612 * directly from the mode argument of ioctl(). 23613 * 23614 * Return Code: returns the value from sd_get_media_info_com 23615 */ 23616 static int 23617 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 23618 { 23619 struct dk_minfo mi; 23620 int rval; 23621 23622 rval = sd_get_media_info_com(dev, &mi.dki_media_type, 23623 &mi.dki_lbsize, &mi.dki_capacity, NULL); 23624 23625 if (rval) 23626 return (rval); 23627 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag)) 23628 rval = EFAULT; 23629 return (rval); 23630 } 23631 23632 /* 23633 * Function: sd_get_media_info_ext 23634 * 23635 * Description: This routine is the driver entry point for handling ioctl 23636 * requests for the media type or command set profile used by the 23637 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23638 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23639 * of this ioctl contains both logical block size and physical 23640 * block size. 23641 * 23642 * 23643 * Arguments: dev - the device number 23644 * arg - pointer to user provided dk_minfo_ext structure 23645 * specifying the media type, logical block size, 23646 * physical block size and disk capacity. 23647 * flag - this argument is a pass through to ddi_copyxxx() 23648 * directly from the mode argument of ioctl(). 23649 * 23650 * Return Code: returns the value from sd_get_media_info_com 23651 */ 23652 static int 23653 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23654 { 23655 struct dk_minfo_ext mie; 23656 int rval = 0; 23657 23658 rval = sd_get_media_info_com(dev, &mie.dki_media_type, 23659 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize); 23660 23661 if (rval) 23662 return (rval); 23663 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag)) 23664 rval = EFAULT; 23665 return (rval); 23666 23667 } 23668 23669 /* 23670 * Function: sd_watch_request_submit 23671 * 23672 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit 23673 * depending on which is supported by device. 23674 */ 23675 static opaque_t 23676 sd_watch_request_submit(struct sd_lun *un) 23677 { 23678 dev_t dev; 23679 23680 /* All submissions are unified to use same device number */ 23681 dev = sd_make_device(SD_DEVINFO(un)); 23682 23683 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 23684 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un), 23685 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23686 (caddr_t)dev)); 23687 } else { 23688 return (scsi_watch_request_submit(SD_SCSI_DEVP(un), 23689 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23690 (caddr_t)dev)); 23691 } 23692 } 23693 23694 23695 /* 23696 * Function: sd_check_media 23697 * 23698 * Description: This utility routine implements the functionality for the 23699 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23700 * driver state changes from that specified by the user 23701 * (inserted or ejected). For example, if the user specifies 23702 * DKIO_EJECTED and the current media state is inserted this 23703 * routine will immediately return DKIO_INSERTED. However, if the 23704 * current media state is not inserted the user thread will be 23705 * blocked until the drive state changes. If DKIO_NONE is specified 23706 * the user thread will block until a drive state change occurs. 23707 * 23708 * Arguments: dev - the device number 23709 * state - user pointer to a dkio_state, updated with the current 23710 * drive state at return. 23711 * 23712 * Return Code: ENXIO 23713 * EIO 23714 * EAGAIN 23715 * EINTR 23716 */ 23717 23718 static int 23719 sd_check_media(dev_t dev, enum dkio_state state) 23720 { 23721 struct sd_lun *un = NULL; 23722 enum dkio_state prev_state; 23723 opaque_t token = NULL; 23724 int rval = 0; 23725 sd_ssc_t *ssc; 23726 23727 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23728 return (ENXIO); 23729 } 23730 23731 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23732 23733 ssc = sd_ssc_init(un); 23734 23735 mutex_enter(SD_MUTEX(un)); 23736 23737 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23738 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23739 23740 prev_state = un->un_mediastate; 23741 23742 /* is there anything to do? */ 23743 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23744 /* 23745 * submit the request to the scsi_watch service; 23746 * scsi_media_watch_cb() does the real work 23747 */ 23748 mutex_exit(SD_MUTEX(un)); 23749 23750 /* 23751 * This change handles the case where a scsi watch request is 23752 * added to a device that is powered down. To accomplish this 23753 * we power up the device before adding the scsi watch request, 23754 * since the scsi watch sends a TUR directly to the device 23755 * which the device cannot handle if it is powered down. 23756 */ 23757 if (sd_pm_entry(un) != DDI_SUCCESS) { 23758 mutex_enter(SD_MUTEX(un)); 23759 goto done; 23760 } 23761 23762 token = sd_watch_request_submit(un); 23763 23764 sd_pm_exit(un); 23765 23766 mutex_enter(SD_MUTEX(un)); 23767 if (token == NULL) { 23768 rval = EAGAIN; 23769 goto done; 23770 } 23771 23772 /* 23773 * This is a special case IOCTL that doesn't return 23774 * until the media state changes. Routine sdpower 23775 * knows about and handles this so don't count it 23776 * as an active cmd in the driver, which would 23777 * keep the device busy to the pm framework. 23778 * If the count isn't decremented the device can't 23779 * be powered down. 23780 */ 23781 un->un_ncmds_in_driver--; 23782 ASSERT(un->un_ncmds_in_driver >= 0); 23783 23784 /* 23785 * if a prior request had been made, this will be the same 23786 * token, as scsi_watch was designed that way. 23787 */ 23788 un->un_swr_token = token; 23789 un->un_specified_mediastate = state; 23790 23791 /* 23792 * now wait for media change 23793 * we will not be signalled unless mediastate == state but it is 23794 * still better to test for this condition, since there is a 23795 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23796 */ 23797 SD_TRACE(SD_LOG_COMMON, un, 23798 "sd_check_media: waiting for media state change\n"); 23799 while (un->un_mediastate == state) { 23800 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23801 SD_TRACE(SD_LOG_COMMON, un, 23802 "sd_check_media: waiting for media state " 23803 "was interrupted\n"); 23804 un->un_ncmds_in_driver++; 23805 rval = EINTR; 23806 goto done; 23807 } 23808 SD_TRACE(SD_LOG_COMMON, un, 23809 "sd_check_media: received signal, state=%x\n", 23810 un->un_mediastate); 23811 } 23812 /* 23813 * Inc the counter to indicate the device once again 23814 * has an active outstanding cmd. 23815 */ 23816 un->un_ncmds_in_driver++; 23817 } 23818 23819 /* invalidate geometry */ 23820 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23821 sr_ejected(un); 23822 } 23823 23824 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23825 uint64_t capacity; 23826 uint_t lbasize; 23827 23828 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23829 mutex_exit(SD_MUTEX(un)); 23830 /* 23831 * Since the following routines use SD_PATH_DIRECT, we must 23832 * call PM directly before the upcoming disk accesses. This 23833 * may cause the disk to be power/spin up. 23834 */ 23835 23836 if (sd_pm_entry(un) == DDI_SUCCESS) { 23837 rval = sd_send_scsi_READ_CAPACITY(ssc, 23838 &capacity, &lbasize, SD_PATH_DIRECT); 23839 if (rval != 0) { 23840 sd_pm_exit(un); 23841 if (rval == EIO) 23842 sd_ssc_assessment(ssc, 23843 SD_FMT_STATUS_CHECK); 23844 else 23845 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23846 mutex_enter(SD_MUTEX(un)); 23847 goto done; 23848 } 23849 } else { 23850 rval = EIO; 23851 mutex_enter(SD_MUTEX(un)); 23852 goto done; 23853 } 23854 mutex_enter(SD_MUTEX(un)); 23855 23856 sd_update_block_info(un, lbasize, capacity); 23857 23858 /* 23859 * Check if the media in the device is writable or not 23860 */ 23861 if (ISCD(un)) { 23862 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23863 } 23864 23865 mutex_exit(SD_MUTEX(un)); 23866 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23867 if ((cmlb_validate(un->un_cmlbhandle, 0, 23868 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23869 sd_set_pstats(un); 23870 SD_TRACE(SD_LOG_IO_PARTITION, un, 23871 "sd_check_media: un:0x%p pstats created and " 23872 "set\n", un); 23873 } 23874 23875 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23876 SD_PATH_DIRECT); 23877 23878 sd_pm_exit(un); 23879 23880 if (rval != 0) { 23881 if (rval == EIO) 23882 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23883 else 23884 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23885 } 23886 23887 mutex_enter(SD_MUTEX(un)); 23888 } 23889 done: 23890 sd_ssc_fini(ssc); 23891 un->un_f_watcht_stopped = FALSE; 23892 if (token != NULL && un->un_swr_token != NULL) { 23893 /* 23894 * Use of this local token and the mutex ensures that we avoid 23895 * some race conditions associated with terminating the 23896 * scsi watch. 23897 */ 23898 token = un->un_swr_token; 23899 mutex_exit(SD_MUTEX(un)); 23900 (void) scsi_watch_request_terminate(token, 23901 SCSI_WATCH_TERMINATE_WAIT); 23902 if (scsi_watch_get_ref_count(token) == 0) { 23903 mutex_enter(SD_MUTEX(un)); 23904 un->un_swr_token = (opaque_t)NULL; 23905 } else { 23906 mutex_enter(SD_MUTEX(un)); 23907 } 23908 } 23909 23910 /* 23911 * Update the capacity kstat value, if no media previously 23912 * (capacity kstat is 0) and a media has been inserted 23913 * (un_f_blockcount_is_valid == TRUE) 23914 */ 23915 if (un->un_errstats) { 23916 struct sd_errstats *stp = NULL; 23917 23918 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23919 if ((stp->sd_capacity.value.ui64 == 0) && 23920 (un->un_f_blockcount_is_valid == TRUE)) { 23921 stp->sd_capacity.value.ui64 = 23922 (uint64_t)((uint64_t)un->un_blockcount * 23923 un->un_sys_blocksize); 23924 } 23925 } 23926 mutex_exit(SD_MUTEX(un)); 23927 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23928 return (rval); 23929 } 23930 23931 23932 /* 23933 * Function: sd_delayed_cv_broadcast 23934 * 23935 * Description: Delayed cv_broadcast to allow for target to recover from media 23936 * insertion. 23937 * 23938 * Arguments: arg - driver soft state (unit) structure 23939 */ 23940 23941 static void 23942 sd_delayed_cv_broadcast(void *arg) 23943 { 23944 struct sd_lun *un = arg; 23945 23946 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23947 23948 mutex_enter(SD_MUTEX(un)); 23949 un->un_dcvb_timeid = NULL; 23950 cv_broadcast(&un->un_state_cv); 23951 mutex_exit(SD_MUTEX(un)); 23952 } 23953 23954 23955 /* 23956 * Function: sd_media_watch_cb 23957 * 23958 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23959 * routine processes the TUR sense data and updates the driver 23960 * state if a transition has occurred. The user thread 23961 * (sd_check_media) is then signalled. 23962 * 23963 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23964 * among multiple watches that share this callback function 23965 * resultp - scsi watch facility result packet containing scsi 23966 * packet, status byte and sense data 23967 * 23968 * Return Code: 0 for success, -1 for failure 23969 */ 23970 23971 static int 23972 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23973 { 23974 struct sd_lun *un; 23975 struct scsi_status *statusp = resultp->statusp; 23976 uint8_t *sensep = (uint8_t *)resultp->sensep; 23977 enum dkio_state state = DKIO_NONE; 23978 dev_t dev = (dev_t)arg; 23979 uchar_t actual_sense_length; 23980 uint8_t skey, asc, ascq; 23981 23982 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23983 return (-1); 23984 } 23985 actual_sense_length = resultp->actual_sense_length; 23986 23987 mutex_enter(SD_MUTEX(un)); 23988 SD_TRACE(SD_LOG_COMMON, un, 23989 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23990 *((char *)statusp), (void *)sensep, actual_sense_length); 23991 23992 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23993 un->un_mediastate = DKIO_DEV_GONE; 23994 cv_broadcast(&un->un_state_cv); 23995 mutex_exit(SD_MUTEX(un)); 23996 23997 return (0); 23998 } 23999 24000 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 24001 if (sd_gesn_media_data_valid(resultp->mmc_data)) { 24002 if ((resultp->mmc_data[5] & 24003 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) { 24004 state = DKIO_INSERTED; 24005 } else { 24006 state = DKIO_EJECTED; 24007 } 24008 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) == 24009 SD_GESN_MEDIA_EVENT_EJECTREQUEST) { 24010 sd_log_eject_request_event(un, KM_NOSLEEP); 24011 } 24012 } 24013 } else if (sensep != NULL) { 24014 /* 24015 * If there was a check condition then sensep points to valid 24016 * sense data. If status was not a check condition but a 24017 * reservation or busy status then the new state is DKIO_NONE. 24018 */ 24019 skey = scsi_sense_key(sensep); 24020 asc = scsi_sense_asc(sensep); 24021 ascq = scsi_sense_ascq(sensep); 24022 24023 SD_INFO(SD_LOG_COMMON, un, 24024 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 24025 skey, asc, ascq); 24026 /* This routine only uses up to 13 bytes of sense data. */ 24027 if (actual_sense_length >= 13) { 24028 if (skey == KEY_UNIT_ATTENTION) { 24029 if (asc == 0x28) { 24030 state = DKIO_INSERTED; 24031 } 24032 } else if (skey == KEY_NOT_READY) { 24033 /* 24034 * Sense data of 02/06/00 means that the 24035 * drive could not read the media (No 24036 * reference position found). In this case 24037 * to prevent a hang on the DKIOCSTATE IOCTL 24038 * we set the media state to DKIO_INSERTED. 24039 */ 24040 if (asc == 0x06 && ascq == 0x00) 24041 state = DKIO_INSERTED; 24042 24043 /* 24044 * if 02/04/02 means that the host 24045 * should send start command. Explicitly 24046 * leave the media state as is 24047 * (inserted) as the media is inserted 24048 * and host has stopped device for PM 24049 * reasons. Upon next true read/write 24050 * to this media will bring the 24051 * device to the right state good for 24052 * media access. 24053 */ 24054 if (asc == 0x3a) { 24055 state = DKIO_EJECTED; 24056 } else { 24057 /* 24058 * If the drive is busy with an 24059 * operation or long write, keep the 24060 * media in an inserted state. 24061 */ 24062 24063 if ((asc == 0x04) && 24064 ((ascq == 0x02) || 24065 (ascq == 0x07) || 24066 (ascq == 0x08))) { 24067 state = DKIO_INSERTED; 24068 } 24069 } 24070 } else if (skey == KEY_NO_SENSE) { 24071 if ((asc == 0x00) && (ascq == 0x00)) { 24072 /* 24073 * Sense Data 00/00/00 does not provide 24074 * any information about the state of 24075 * the media. Ignore it. 24076 */ 24077 mutex_exit(SD_MUTEX(un)); 24078 return (0); 24079 } 24080 } 24081 } 24082 } else if ((*((char *)statusp) == STATUS_GOOD) && 24083 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 24084 state = DKIO_INSERTED; 24085 } 24086 24087 SD_TRACE(SD_LOG_COMMON, un, 24088 "sd_media_watch_cb: state=%x, specified=%x\n", 24089 state, un->un_specified_mediastate); 24090 24091 /* 24092 * now signal the waiting thread if this is *not* the specified state; 24093 * delay the signal if the state is DKIO_INSERTED to allow the target 24094 * to recover 24095 */ 24096 if (state != un->un_specified_mediastate) { 24097 un->un_mediastate = state; 24098 if (state == DKIO_INSERTED) { 24099 /* 24100 * delay the signal to give the drive a chance 24101 * to do what it apparently needs to do 24102 */ 24103 SD_TRACE(SD_LOG_COMMON, un, 24104 "sd_media_watch_cb: delayed cv_broadcast\n"); 24105 if (un->un_dcvb_timeid == NULL) { 24106 un->un_dcvb_timeid = 24107 timeout(sd_delayed_cv_broadcast, un, 24108 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 24109 } 24110 } else { 24111 SD_TRACE(SD_LOG_COMMON, un, 24112 "sd_media_watch_cb: immediate cv_broadcast\n"); 24113 cv_broadcast(&un->un_state_cv); 24114 } 24115 } 24116 mutex_exit(SD_MUTEX(un)); 24117 return (0); 24118 } 24119 24120 24121 /* 24122 * Function: sd_dkio_get_temp 24123 * 24124 * Description: This routine is the driver entry point for handling ioctl 24125 * requests to get the disk temperature. 24126 * 24127 * Arguments: dev - the device number 24128 * arg - pointer to user provided dk_temperature structure. 24129 * flag - this argument is a pass through to ddi_copyxxx() 24130 * directly from the mode argument of ioctl(). 24131 * 24132 * Return Code: 0 24133 * EFAULT 24134 * ENXIO 24135 * EAGAIN 24136 */ 24137 24138 static int 24139 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 24140 { 24141 struct sd_lun *un = NULL; 24142 struct dk_temperature *dktemp = NULL; 24143 uchar_t *temperature_page; 24144 int rval = 0; 24145 int path_flag = SD_PATH_STANDARD; 24146 sd_ssc_t *ssc; 24147 24148 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24149 return (ENXIO); 24150 } 24151 24152 ssc = sd_ssc_init(un); 24153 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 24154 24155 /* copyin the disk temp argument to get the user flags */ 24156 if (ddi_copyin((void *)arg, dktemp, 24157 sizeof (struct dk_temperature), flag) != 0) { 24158 rval = EFAULT; 24159 goto done; 24160 } 24161 24162 /* Initialize the temperature to invalid. */ 24163 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24164 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24165 24166 /* 24167 * Note: Investigate removing the "bypass pm" semantic. 24168 * Can we just bypass PM always? 24169 */ 24170 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 24171 path_flag = SD_PATH_DIRECT; 24172 ASSERT(!mutex_owned(&un->un_pm_mutex)); 24173 mutex_enter(&un->un_pm_mutex); 24174 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24175 /* 24176 * If DKT_BYPASS_PM is set, and the drive happens to be 24177 * in low power mode, we can not wake it up, Need to 24178 * return EAGAIN. 24179 */ 24180 mutex_exit(&un->un_pm_mutex); 24181 rval = EAGAIN; 24182 goto done; 24183 } else { 24184 /* 24185 * Indicate to PM the device is busy. This is required 24186 * to avoid a race - i.e. the ioctl is issuing a 24187 * command and the pm framework brings down the device 24188 * to low power mode (possible power cut-off on some 24189 * platforms). 24190 */ 24191 mutex_exit(&un->un_pm_mutex); 24192 if (sd_pm_entry(un) != DDI_SUCCESS) { 24193 rval = EAGAIN; 24194 goto done; 24195 } 24196 } 24197 } 24198 24199 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24200 24201 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 24202 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 24203 if (rval != 0) 24204 goto done2; 24205 24206 /* 24207 * For the current temperature verify that the parameter length is 0x02 24208 * and the parameter code is 0x00 24209 */ 24210 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24211 (temperature_page[5] == 0x00)) { 24212 if (temperature_page[9] == 0xFF) { 24213 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24214 } else { 24215 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24216 } 24217 } 24218 24219 /* 24220 * For the reference temperature verify that the parameter 24221 * length is 0x02 and the parameter code is 0x01 24222 */ 24223 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24224 (temperature_page[11] == 0x01)) { 24225 if (temperature_page[15] == 0xFF) { 24226 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24227 } else { 24228 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24229 } 24230 } 24231 24232 /* Do the copyout regardless of the temperature commands status. */ 24233 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24234 flag) != 0) { 24235 rval = EFAULT; 24236 goto done1; 24237 } 24238 24239 done2: 24240 if (rval != 0) { 24241 if (rval == EIO) 24242 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24243 else 24244 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24245 } 24246 done1: 24247 if (path_flag == SD_PATH_DIRECT) { 24248 sd_pm_exit(un); 24249 } 24250 24251 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24252 done: 24253 sd_ssc_fini(ssc); 24254 if (dktemp != NULL) { 24255 kmem_free(dktemp, sizeof (struct dk_temperature)); 24256 } 24257 24258 return (rval); 24259 } 24260 24261 24262 /* 24263 * Function: sd_log_page_supported 24264 * 24265 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24266 * supported log pages. 24267 * 24268 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 24269 * structure for this target. 24270 * log_page - 24271 * 24272 * Return Code: -1 - on error (log sense is optional and may not be supported). 24273 * 0 - log page not found. 24274 * 1 - log page found. 24275 */ 24276 24277 static int 24278 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 24279 { 24280 uchar_t *log_page_data; 24281 int i; 24282 int match = 0; 24283 int log_size; 24284 int status = 0; 24285 struct sd_lun *un; 24286 24287 ASSERT(ssc != NULL); 24288 un = ssc->ssc_un; 24289 ASSERT(un != NULL); 24290 24291 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24292 24293 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 24294 SD_PATH_DIRECT); 24295 24296 if (status != 0) { 24297 if (status == EIO) { 24298 /* 24299 * Some disks do not support log sense, we 24300 * should ignore this kind of error(sense key is 24301 * 0x5 - illegal request). 24302 */ 24303 uint8_t *sensep; 24304 int senlen; 24305 24306 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 24307 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 24308 ssc->ssc_uscsi_cmd->uscsi_rqresid); 24309 24310 if (senlen > 0 && 24311 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 24312 sd_ssc_assessment(ssc, 24313 SD_FMT_IGNORE_COMPROMISE); 24314 } else { 24315 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24316 } 24317 } else { 24318 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24319 } 24320 24321 SD_ERROR(SD_LOG_COMMON, un, 24322 "sd_log_page_supported: failed log page retrieval\n"); 24323 kmem_free(log_page_data, 0xFF); 24324 return (-1); 24325 } 24326 24327 log_size = log_page_data[3]; 24328 24329 /* 24330 * The list of supported log pages start from the fourth byte. Check 24331 * until we run out of log pages or a match is found. 24332 */ 24333 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24334 if (log_page_data[i] == log_page) { 24335 match++; 24336 } 24337 } 24338 kmem_free(log_page_data, 0xFF); 24339 return (match); 24340 } 24341 24342 24343 /* 24344 * Function: sd_mhdioc_failfast 24345 * 24346 * Description: This routine is the driver entry point for handling ioctl 24347 * requests to enable/disable the multihost failfast option. 24348 * (MHIOCENFAILFAST) 24349 * 24350 * Arguments: dev - the device number 24351 * arg - user specified probing interval. 24352 * flag - this argument is a pass through to ddi_copyxxx() 24353 * directly from the mode argument of ioctl(). 24354 * 24355 * Return Code: 0 24356 * EFAULT 24357 * ENXIO 24358 */ 24359 24360 static int 24361 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24362 { 24363 struct sd_lun *un = NULL; 24364 int mh_time; 24365 int rval = 0; 24366 24367 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24368 return (ENXIO); 24369 } 24370 24371 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24372 return (EFAULT); 24373 24374 if (mh_time) { 24375 mutex_enter(SD_MUTEX(un)); 24376 un->un_resvd_status |= SD_FAILFAST; 24377 mutex_exit(SD_MUTEX(un)); 24378 /* 24379 * If mh_time is INT_MAX, then this ioctl is being used for 24380 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24381 */ 24382 if (mh_time != INT_MAX) { 24383 rval = sd_check_mhd(dev, mh_time); 24384 } 24385 } else { 24386 (void) sd_check_mhd(dev, 0); 24387 mutex_enter(SD_MUTEX(un)); 24388 un->un_resvd_status &= ~SD_FAILFAST; 24389 mutex_exit(SD_MUTEX(un)); 24390 } 24391 return (rval); 24392 } 24393 24394 24395 /* 24396 * Function: sd_mhdioc_takeown 24397 * 24398 * Description: This routine is the driver entry point for handling ioctl 24399 * requests to forcefully acquire exclusive access rights to the 24400 * multihost disk (MHIOCTKOWN). 24401 * 24402 * Arguments: dev - the device number 24403 * arg - user provided structure specifying the delay 24404 * parameters in milliseconds 24405 * flag - this argument is a pass through to ddi_copyxxx() 24406 * directly from the mode argument of ioctl(). 24407 * 24408 * Return Code: 0 24409 * EFAULT 24410 * ENXIO 24411 */ 24412 24413 static int 24414 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24415 { 24416 struct sd_lun *un = NULL; 24417 struct mhioctkown *tkown = NULL; 24418 int rval = 0; 24419 24420 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24421 return (ENXIO); 24422 } 24423 24424 if (arg != NULL) { 24425 tkown = (struct mhioctkown *) 24426 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24427 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24428 if (rval != 0) { 24429 rval = EFAULT; 24430 goto error; 24431 } 24432 } 24433 24434 rval = sd_take_ownership(dev, tkown); 24435 mutex_enter(SD_MUTEX(un)); 24436 if (rval == 0) { 24437 un->un_resvd_status |= SD_RESERVE; 24438 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24439 sd_reinstate_resv_delay = 24440 tkown->reinstate_resv_delay * 1000; 24441 } else { 24442 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24443 } 24444 /* 24445 * Give the scsi_watch routine interval set by 24446 * the MHIOCENFAILFAST ioctl precedence here. 24447 */ 24448 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24449 mutex_exit(SD_MUTEX(un)); 24450 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24451 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24452 "sd_mhdioc_takeown : %d\n", 24453 sd_reinstate_resv_delay); 24454 } else { 24455 mutex_exit(SD_MUTEX(un)); 24456 } 24457 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24458 sd_mhd_reset_notify_cb, (caddr_t)un); 24459 } else { 24460 un->un_resvd_status &= ~SD_RESERVE; 24461 mutex_exit(SD_MUTEX(un)); 24462 } 24463 24464 error: 24465 if (tkown != NULL) { 24466 kmem_free(tkown, sizeof (struct mhioctkown)); 24467 } 24468 return (rval); 24469 } 24470 24471 24472 /* 24473 * Function: sd_mhdioc_release 24474 * 24475 * Description: This routine is the driver entry point for handling ioctl 24476 * requests to release exclusive access rights to the multihost 24477 * disk (MHIOCRELEASE). 24478 * 24479 * Arguments: dev - the device number 24480 * 24481 * Return Code: 0 24482 * ENXIO 24483 */ 24484 24485 static int 24486 sd_mhdioc_release(dev_t dev) 24487 { 24488 struct sd_lun *un = NULL; 24489 timeout_id_t resvd_timeid_save; 24490 int resvd_status_save; 24491 int rval = 0; 24492 24493 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24494 return (ENXIO); 24495 } 24496 24497 mutex_enter(SD_MUTEX(un)); 24498 resvd_status_save = un->un_resvd_status; 24499 un->un_resvd_status &= 24500 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24501 if (un->un_resvd_timeid) { 24502 resvd_timeid_save = un->un_resvd_timeid; 24503 un->un_resvd_timeid = NULL; 24504 mutex_exit(SD_MUTEX(un)); 24505 (void) untimeout(resvd_timeid_save); 24506 } else { 24507 mutex_exit(SD_MUTEX(un)); 24508 } 24509 24510 /* 24511 * destroy any pending timeout thread that may be attempting to 24512 * reinstate reservation on this device. 24513 */ 24514 sd_rmv_resv_reclaim_req(dev); 24515 24516 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24517 mutex_enter(SD_MUTEX(un)); 24518 if ((un->un_mhd_token) && 24519 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24520 mutex_exit(SD_MUTEX(un)); 24521 (void) sd_check_mhd(dev, 0); 24522 } else { 24523 mutex_exit(SD_MUTEX(un)); 24524 } 24525 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24526 sd_mhd_reset_notify_cb, (caddr_t)un); 24527 } else { 24528 /* 24529 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24530 */ 24531 mutex_enter(SD_MUTEX(un)); 24532 un->un_resvd_status = resvd_status_save; 24533 mutex_exit(SD_MUTEX(un)); 24534 } 24535 return (rval); 24536 } 24537 24538 24539 /* 24540 * Function: sd_mhdioc_register_devid 24541 * 24542 * Description: This routine is the driver entry point for handling ioctl 24543 * requests to register the device id (MHIOCREREGISTERDEVID). 24544 * 24545 * Note: The implementation for this ioctl has been updated to 24546 * be consistent with the original PSARC case (1999/357) 24547 * (4375899, 4241671, 4220005) 24548 * 24549 * Arguments: dev - the device number 24550 * 24551 * Return Code: 0 24552 * ENXIO 24553 */ 24554 24555 static int 24556 sd_mhdioc_register_devid(dev_t dev) 24557 { 24558 struct sd_lun *un = NULL; 24559 int rval = 0; 24560 sd_ssc_t *ssc; 24561 24562 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24563 return (ENXIO); 24564 } 24565 24566 ASSERT(!mutex_owned(SD_MUTEX(un))); 24567 24568 mutex_enter(SD_MUTEX(un)); 24569 24570 /* If a devid already exists, de-register it */ 24571 if (un->un_devid != NULL) { 24572 ddi_devid_unregister(SD_DEVINFO(un)); 24573 /* 24574 * After unregister devid, needs to free devid memory 24575 */ 24576 ddi_devid_free(un->un_devid); 24577 un->un_devid = NULL; 24578 } 24579 24580 /* Check for reservation conflict */ 24581 mutex_exit(SD_MUTEX(un)); 24582 ssc = sd_ssc_init(un); 24583 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24584 mutex_enter(SD_MUTEX(un)); 24585 24586 switch (rval) { 24587 case 0: 24588 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24589 break; 24590 case EACCES: 24591 break; 24592 default: 24593 rval = EIO; 24594 } 24595 24596 mutex_exit(SD_MUTEX(un)); 24597 if (rval != 0) { 24598 if (rval == EIO) 24599 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24600 else 24601 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24602 } 24603 sd_ssc_fini(ssc); 24604 return (rval); 24605 } 24606 24607 24608 /* 24609 * Function: sd_mhdioc_inkeys 24610 * 24611 * Description: This routine is the driver entry point for handling ioctl 24612 * requests to issue the SCSI-3 Persistent In Read Keys command 24613 * to the device (MHIOCGRP_INKEYS). 24614 * 24615 * Arguments: dev - the device number 24616 * arg - user provided in_keys structure 24617 * flag - this argument is a pass through to ddi_copyxxx() 24618 * directly from the mode argument of ioctl(). 24619 * 24620 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24621 * ENXIO 24622 * EFAULT 24623 */ 24624 24625 static int 24626 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24627 { 24628 struct sd_lun *un; 24629 mhioc_inkeys_t inkeys; 24630 int rval = 0; 24631 24632 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24633 return (ENXIO); 24634 } 24635 24636 #ifdef _MULTI_DATAMODEL 24637 switch (ddi_model_convert_from(flag & FMODELS)) { 24638 case DDI_MODEL_ILP32: { 24639 struct mhioc_inkeys32 inkeys32; 24640 24641 if (ddi_copyin(arg, &inkeys32, 24642 sizeof (struct mhioc_inkeys32), flag) != 0) { 24643 return (EFAULT); 24644 } 24645 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24646 if ((rval = sd_persistent_reservation_in_read_keys(un, 24647 &inkeys, flag)) != 0) { 24648 return (rval); 24649 } 24650 inkeys32.generation = inkeys.generation; 24651 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24652 flag) != 0) { 24653 return (EFAULT); 24654 } 24655 break; 24656 } 24657 case DDI_MODEL_NONE: 24658 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24659 flag) != 0) { 24660 return (EFAULT); 24661 } 24662 if ((rval = sd_persistent_reservation_in_read_keys(un, 24663 &inkeys, flag)) != 0) { 24664 return (rval); 24665 } 24666 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24667 flag) != 0) { 24668 return (EFAULT); 24669 } 24670 break; 24671 } 24672 24673 #else /* ! _MULTI_DATAMODEL */ 24674 24675 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24676 return (EFAULT); 24677 } 24678 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24679 if (rval != 0) { 24680 return (rval); 24681 } 24682 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24683 return (EFAULT); 24684 } 24685 24686 #endif /* _MULTI_DATAMODEL */ 24687 24688 return (rval); 24689 } 24690 24691 24692 /* 24693 * Function: sd_mhdioc_inresv 24694 * 24695 * Description: This routine is the driver entry point for handling ioctl 24696 * requests to issue the SCSI-3 Persistent In Read Reservations 24697 * command to the device (MHIOCGRP_INKEYS). 24698 * 24699 * Arguments: dev - the device number 24700 * arg - user provided in_resv structure 24701 * flag - this argument is a pass through to ddi_copyxxx() 24702 * directly from the mode argument of ioctl(). 24703 * 24704 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24705 * ENXIO 24706 * EFAULT 24707 */ 24708 24709 static int 24710 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24711 { 24712 struct sd_lun *un; 24713 mhioc_inresvs_t inresvs; 24714 int rval = 0; 24715 24716 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24717 return (ENXIO); 24718 } 24719 24720 #ifdef _MULTI_DATAMODEL 24721 24722 switch (ddi_model_convert_from(flag & FMODELS)) { 24723 case DDI_MODEL_ILP32: { 24724 struct mhioc_inresvs32 inresvs32; 24725 24726 if (ddi_copyin(arg, &inresvs32, 24727 sizeof (struct mhioc_inresvs32), flag) != 0) { 24728 return (EFAULT); 24729 } 24730 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24731 if ((rval = sd_persistent_reservation_in_read_resv(un, 24732 &inresvs, flag)) != 0) { 24733 return (rval); 24734 } 24735 inresvs32.generation = inresvs.generation; 24736 if (ddi_copyout(&inresvs32, arg, 24737 sizeof (struct mhioc_inresvs32), flag) != 0) { 24738 return (EFAULT); 24739 } 24740 break; 24741 } 24742 case DDI_MODEL_NONE: 24743 if (ddi_copyin(arg, &inresvs, 24744 sizeof (mhioc_inresvs_t), flag) != 0) { 24745 return (EFAULT); 24746 } 24747 if ((rval = sd_persistent_reservation_in_read_resv(un, 24748 &inresvs, flag)) != 0) { 24749 return (rval); 24750 } 24751 if (ddi_copyout(&inresvs, arg, 24752 sizeof (mhioc_inresvs_t), flag) != 0) { 24753 return (EFAULT); 24754 } 24755 break; 24756 } 24757 24758 #else /* ! _MULTI_DATAMODEL */ 24759 24760 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24761 return (EFAULT); 24762 } 24763 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24764 if (rval != 0) { 24765 return (rval); 24766 } 24767 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24768 return (EFAULT); 24769 } 24770 24771 #endif /* ! _MULTI_DATAMODEL */ 24772 24773 return (rval); 24774 } 24775 24776 24777 /* 24778 * The following routines support the clustering functionality described below 24779 * and implement lost reservation reclaim functionality. 24780 * 24781 * Clustering 24782 * ---------- 24783 * The clustering code uses two different, independent forms of SCSI 24784 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24785 * Persistent Group Reservations. For any particular disk, it will use either 24786 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24787 * 24788 * SCSI-2 24789 * The cluster software takes ownership of a multi-hosted disk by issuing the 24790 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24791 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24792 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24793 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24794 * driver. The meaning of failfast is that if the driver (on this host) ever 24795 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24796 * it should immediately panic the host. The motivation for this ioctl is that 24797 * if this host does encounter reservation conflict, the underlying cause is 24798 * that some other host of the cluster has decided that this host is no longer 24799 * in the cluster and has seized control of the disks for itself. Since this 24800 * host is no longer in the cluster, it ought to panic itself. The 24801 * MHIOCENFAILFAST ioctl does two things: 24802 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24803 * error to panic the host 24804 * (b) it sets up a periodic timer to test whether this host still has 24805 * "access" (in that no other host has reserved the device): if the 24806 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24807 * purpose of that periodic timer is to handle scenarios where the host is 24808 * otherwise temporarily quiescent, temporarily doing no real i/o. 24809 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24810 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24811 * the device itself. 24812 * 24813 * SCSI-3 PGR 24814 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24815 * facility is supported through the shared multihost disk ioctls 24816 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24817 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR) 24818 * 24819 * Reservation Reclaim: 24820 * -------------------- 24821 * To support the lost reservation reclaim operations this driver creates a 24822 * single thread to handle reinstating reservations on all devices that have 24823 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24824 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24825 * and the reservation reclaim thread loops through the requests to regain the 24826 * lost reservations. 24827 */ 24828 24829 /* 24830 * Function: sd_check_mhd() 24831 * 24832 * Description: This function sets up and submits a scsi watch request or 24833 * terminates an existing watch request. This routine is used in 24834 * support of reservation reclaim. 24835 * 24836 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24837 * among multiple watches that share the callback function 24838 * interval - the number of microseconds specifying the watch 24839 * interval for issuing TEST UNIT READY commands. If 24840 * set to 0 the watch should be terminated. If the 24841 * interval is set to 0 and if the device is required 24842 * to hold reservation while disabling failfast, the 24843 * watch is restarted with an interval of 24844 * reinstate_resv_delay. 24845 * 24846 * Return Code: 0 - Successful submit/terminate of scsi watch request 24847 * ENXIO - Indicates an invalid device was specified 24848 * EAGAIN - Unable to submit the scsi watch request 24849 */ 24850 24851 static int 24852 sd_check_mhd(dev_t dev, int interval) 24853 { 24854 struct sd_lun *un; 24855 opaque_t token; 24856 24857 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24858 return (ENXIO); 24859 } 24860 24861 /* is this a watch termination request? */ 24862 if (interval == 0) { 24863 mutex_enter(SD_MUTEX(un)); 24864 /* if there is an existing watch task then terminate it */ 24865 if (un->un_mhd_token) { 24866 token = un->un_mhd_token; 24867 un->un_mhd_token = NULL; 24868 mutex_exit(SD_MUTEX(un)); 24869 (void) scsi_watch_request_terminate(token, 24870 SCSI_WATCH_TERMINATE_ALL_WAIT); 24871 mutex_enter(SD_MUTEX(un)); 24872 } else { 24873 mutex_exit(SD_MUTEX(un)); 24874 /* 24875 * Note: If we return here we don't check for the 24876 * failfast case. This is the original legacy 24877 * implementation but perhaps we should be checking 24878 * the failfast case. 24879 */ 24880 return (0); 24881 } 24882 /* 24883 * If the device is required to hold reservation while 24884 * disabling failfast, we need to restart the scsi_watch 24885 * routine with an interval of reinstate_resv_delay. 24886 */ 24887 if (un->un_resvd_status & SD_RESERVE) { 24888 interval = sd_reinstate_resv_delay/1000; 24889 } else { 24890 /* no failfast so bail */ 24891 mutex_exit(SD_MUTEX(un)); 24892 return (0); 24893 } 24894 mutex_exit(SD_MUTEX(un)); 24895 } 24896 24897 /* 24898 * adjust minimum time interval to 1 second, 24899 * and convert from msecs to usecs 24900 */ 24901 if (interval > 0 && interval < 1000) { 24902 interval = 1000; 24903 } 24904 interval *= 1000; 24905 24906 /* 24907 * submit the request to the scsi_watch service 24908 */ 24909 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24910 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24911 if (token == NULL) { 24912 return (EAGAIN); 24913 } 24914 24915 /* 24916 * save token for termination later on 24917 */ 24918 mutex_enter(SD_MUTEX(un)); 24919 un->un_mhd_token = token; 24920 mutex_exit(SD_MUTEX(un)); 24921 return (0); 24922 } 24923 24924 24925 /* 24926 * Function: sd_mhd_watch_cb() 24927 * 24928 * Description: This function is the call back function used by the scsi watch 24929 * facility. The scsi watch facility sends the "Test Unit Ready" 24930 * and processes the status. If applicable (i.e. a "Unit Attention" 24931 * status and automatic "Request Sense" not used) the scsi watch 24932 * facility will send a "Request Sense" and retrieve the sense data 24933 * to be passed to this callback function. In either case the 24934 * automatic "Request Sense" or the facility submitting one, this 24935 * callback is passed the status and sense data. 24936 * 24937 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24938 * among multiple watches that share this callback function 24939 * resultp - scsi watch facility result packet containing scsi 24940 * packet, status byte and sense data 24941 * 24942 * Return Code: 0 - continue the watch task 24943 * non-zero - terminate the watch task 24944 */ 24945 24946 static int 24947 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24948 { 24949 struct sd_lun *un; 24950 struct scsi_status *statusp; 24951 uint8_t *sensep; 24952 struct scsi_pkt *pkt; 24953 uchar_t actual_sense_length; 24954 dev_t dev = (dev_t)arg; 24955 24956 ASSERT(resultp != NULL); 24957 statusp = resultp->statusp; 24958 sensep = (uint8_t *)resultp->sensep; 24959 pkt = resultp->pkt; 24960 actual_sense_length = resultp->actual_sense_length; 24961 24962 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24963 return (ENXIO); 24964 } 24965 24966 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24967 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24968 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24969 24970 /* Begin processing of the status and/or sense data */ 24971 if (pkt->pkt_reason != CMD_CMPLT) { 24972 /* Handle the incomplete packet */ 24973 sd_mhd_watch_incomplete(un, pkt); 24974 return (0); 24975 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24976 if (*((unsigned char *)statusp) 24977 == STATUS_RESERVATION_CONFLICT) { 24978 /* 24979 * Handle a reservation conflict by panicking if 24980 * configured for failfast or by logging the conflict 24981 * and updating the reservation status 24982 */ 24983 mutex_enter(SD_MUTEX(un)); 24984 if ((un->un_resvd_status & SD_FAILFAST) && 24985 (sd_failfast_enable)) { 24986 sd_panic_for_res_conflict(un); 24987 /*NOTREACHED*/ 24988 } 24989 SD_INFO(SD_LOG_IOCTL_MHD, un, 24990 "sd_mhd_watch_cb: Reservation Conflict\n"); 24991 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24992 mutex_exit(SD_MUTEX(un)); 24993 } 24994 } 24995 24996 if (sensep != NULL) { 24997 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24998 mutex_enter(SD_MUTEX(un)); 24999 if ((scsi_sense_asc(sensep) == 25000 SD_SCSI_RESET_SENSE_CODE) && 25001 (un->un_resvd_status & SD_RESERVE)) { 25002 /* 25003 * The additional sense code indicates a power 25004 * on or bus device reset has occurred; update 25005 * the reservation status. 25006 */ 25007 un->un_resvd_status |= 25008 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25009 SD_INFO(SD_LOG_IOCTL_MHD, un, 25010 "sd_mhd_watch_cb: Lost Reservation\n"); 25011 } 25012 } else { 25013 return (0); 25014 } 25015 } else { 25016 mutex_enter(SD_MUTEX(un)); 25017 } 25018 25019 if ((un->un_resvd_status & SD_RESERVE) && 25020 (un->un_resvd_status & SD_LOST_RESERVE)) { 25021 if (un->un_resvd_status & SD_WANT_RESERVE) { 25022 /* 25023 * A reset occurred in between the last probe and this 25024 * one so if a timeout is pending cancel it. 25025 */ 25026 if (un->un_resvd_timeid) { 25027 timeout_id_t temp_id = un->un_resvd_timeid; 25028 un->un_resvd_timeid = NULL; 25029 mutex_exit(SD_MUTEX(un)); 25030 (void) untimeout(temp_id); 25031 mutex_enter(SD_MUTEX(un)); 25032 } 25033 un->un_resvd_status &= ~SD_WANT_RESERVE; 25034 } 25035 if (un->un_resvd_timeid == 0) { 25036 /* Schedule a timeout to handle the lost reservation */ 25037 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 25038 (void *)dev, 25039 drv_usectohz(sd_reinstate_resv_delay)); 25040 } 25041 } 25042 mutex_exit(SD_MUTEX(un)); 25043 return (0); 25044 } 25045 25046 25047 /* 25048 * Function: sd_mhd_watch_incomplete() 25049 * 25050 * Description: This function is used to find out why a scsi pkt sent by the 25051 * scsi watch facility was not completed. Under some scenarios this 25052 * routine will return. Otherwise it will send a bus reset to see 25053 * if the drive is still online. 25054 * 25055 * Arguments: un - driver soft state (unit) structure 25056 * pkt - incomplete scsi pkt 25057 */ 25058 25059 static void 25060 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 25061 { 25062 int be_chatty; 25063 int perr; 25064 25065 ASSERT(pkt != NULL); 25066 ASSERT(un != NULL); 25067 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 25068 perr = (pkt->pkt_statistics & STAT_PERR); 25069 25070 mutex_enter(SD_MUTEX(un)); 25071 if (un->un_state == SD_STATE_DUMPING) { 25072 mutex_exit(SD_MUTEX(un)); 25073 return; 25074 } 25075 25076 switch (pkt->pkt_reason) { 25077 case CMD_UNX_BUS_FREE: 25078 /* 25079 * If we had a parity error that caused the target to drop BSY*, 25080 * don't be chatty about it. 25081 */ 25082 if (perr && be_chatty) { 25083 be_chatty = 0; 25084 } 25085 break; 25086 case CMD_TAG_REJECT: 25087 /* 25088 * The SCSI-2 spec states that a tag reject will be sent by the 25089 * target if tagged queuing is not supported. A tag reject may 25090 * also be sent during certain initialization periods or to 25091 * control internal resources. For the latter case the target 25092 * may also return Queue Full. 25093 * 25094 * If this driver receives a tag reject from a target that is 25095 * going through an init period or controlling internal 25096 * resources tagged queuing will be disabled. This is a less 25097 * than optimal behavior but the driver is unable to determine 25098 * the target state and assumes tagged queueing is not supported 25099 */ 25100 pkt->pkt_flags = 0; 25101 un->un_tagflags = 0; 25102 25103 if (un->un_f_opt_queueing == TRUE) { 25104 un->un_throttle = min(un->un_throttle, 3); 25105 } else { 25106 un->un_throttle = 1; 25107 } 25108 mutex_exit(SD_MUTEX(un)); 25109 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 25110 mutex_enter(SD_MUTEX(un)); 25111 break; 25112 case CMD_INCOMPLETE: 25113 /* 25114 * The transport stopped with an abnormal state, fallthrough and 25115 * reset the target and/or bus unless selection did not complete 25116 * (indicated by STATE_GOT_BUS) in which case we don't want to 25117 * go through a target/bus reset 25118 */ 25119 if (pkt->pkt_state == STATE_GOT_BUS) { 25120 break; 25121 } 25122 /*FALLTHROUGH*/ 25123 25124 case CMD_TIMEOUT: 25125 default: 25126 /* 25127 * The lun may still be running the command, so a lun reset 25128 * should be attempted. If the lun reset fails or cannot be 25129 * issued, than try a target reset. Lastly try a bus reset. 25130 */ 25131 if ((pkt->pkt_statistics & 25132 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 25133 int reset_retval = 0; 25134 mutex_exit(SD_MUTEX(un)); 25135 if (un->un_f_allow_bus_device_reset == TRUE) { 25136 if (un->un_f_lun_reset_enabled == TRUE) { 25137 reset_retval = 25138 scsi_reset(SD_ADDRESS(un), 25139 RESET_LUN); 25140 } 25141 if (reset_retval == 0) { 25142 reset_retval = 25143 scsi_reset(SD_ADDRESS(un), 25144 RESET_TARGET); 25145 } 25146 } 25147 if (reset_retval == 0) { 25148 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25149 } 25150 mutex_enter(SD_MUTEX(un)); 25151 } 25152 break; 25153 } 25154 25155 /* A device/bus reset has occurred; update the reservation status. */ 25156 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 25157 (STAT_BUS_RESET | STAT_DEV_RESET))) { 25158 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25159 un->un_resvd_status |= 25160 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25161 SD_INFO(SD_LOG_IOCTL_MHD, un, 25162 "sd_mhd_watch_incomplete: Lost Reservation\n"); 25163 } 25164 } 25165 25166 /* 25167 * The disk has been turned off; Update the device state. 25168 * 25169 * Note: Should we be offlining the disk here? 25170 */ 25171 if (pkt->pkt_state == STATE_GOT_BUS) { 25172 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 25173 "Disk not responding to selection\n"); 25174 if (un->un_state != SD_STATE_OFFLINE) { 25175 New_state(un, SD_STATE_OFFLINE); 25176 } 25177 } else if (be_chatty) { 25178 /* 25179 * suppress messages if they are all the same pkt reason; 25180 * with TQ, many (up to 256) are returned with the same 25181 * pkt_reason 25182 */ 25183 if (pkt->pkt_reason != un->un_last_pkt_reason) { 25184 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25185 "sd_mhd_watch_incomplete: " 25186 "SCSI transport failed: reason '%s'\n", 25187 scsi_rname(pkt->pkt_reason)); 25188 } 25189 } 25190 un->un_last_pkt_reason = pkt->pkt_reason; 25191 mutex_exit(SD_MUTEX(un)); 25192 } 25193 25194 25195 /* 25196 * Function: sd_sname() 25197 * 25198 * Description: This is a simple little routine to return a string containing 25199 * a printable description of command status byte for use in 25200 * logging. 25201 * 25202 * Arguments: status - pointer to a status byte 25203 * 25204 * Return Code: char * - string containing status description. 25205 */ 25206 25207 static char * 25208 sd_sname(uchar_t status) 25209 { 25210 switch (status & STATUS_MASK) { 25211 case STATUS_GOOD: 25212 return ("good status"); 25213 case STATUS_CHECK: 25214 return ("check condition"); 25215 case STATUS_MET: 25216 return ("condition met"); 25217 case STATUS_BUSY: 25218 return ("busy"); 25219 case STATUS_INTERMEDIATE: 25220 return ("intermediate"); 25221 case STATUS_INTERMEDIATE_MET: 25222 return ("intermediate - condition met"); 25223 case STATUS_RESERVATION_CONFLICT: 25224 return ("reservation_conflict"); 25225 case STATUS_TERMINATED: 25226 return ("command terminated"); 25227 case STATUS_QFULL: 25228 return ("queue full"); 25229 default: 25230 return ("<unknown status>"); 25231 } 25232 } 25233 25234 25235 /* 25236 * Function: sd_mhd_resvd_recover() 25237 * 25238 * Description: This function adds a reservation entry to the 25239 * sd_resv_reclaim_request list and signals the reservation 25240 * reclaim thread that there is work pending. If the reservation 25241 * reclaim thread has not been previously created this function 25242 * will kick it off. 25243 * 25244 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25245 * among multiple watches that share this callback function 25246 * 25247 * Context: This routine is called by timeout() and is run in interrupt 25248 * context. It must not sleep or call other functions which may 25249 * sleep. 25250 */ 25251 25252 static void 25253 sd_mhd_resvd_recover(void *arg) 25254 { 25255 dev_t dev = (dev_t)arg; 25256 struct sd_lun *un; 25257 struct sd_thr_request *sd_treq = NULL; 25258 struct sd_thr_request *sd_cur = NULL; 25259 struct sd_thr_request *sd_prev = NULL; 25260 int already_there = 0; 25261 25262 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25263 return; 25264 } 25265 25266 mutex_enter(SD_MUTEX(un)); 25267 un->un_resvd_timeid = NULL; 25268 if (un->un_resvd_status & SD_WANT_RESERVE) { 25269 /* 25270 * There was a reset so don't issue the reserve, allow the 25271 * sd_mhd_watch_cb callback function to notice this and 25272 * reschedule the timeout for reservation. 25273 */ 25274 mutex_exit(SD_MUTEX(un)); 25275 return; 25276 } 25277 mutex_exit(SD_MUTEX(un)); 25278 25279 /* 25280 * Add this device to the sd_resv_reclaim_request list and the 25281 * sd_resv_reclaim_thread should take care of the rest. 25282 * 25283 * Note: We can't sleep in this context so if the memory allocation 25284 * fails allow the sd_mhd_watch_cb callback function to notice this and 25285 * reschedule the timeout for reservation. (4378460) 25286 */ 25287 sd_treq = (struct sd_thr_request *) 25288 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25289 if (sd_treq == NULL) { 25290 return; 25291 } 25292 25293 sd_treq->sd_thr_req_next = NULL; 25294 sd_treq->dev = dev; 25295 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25296 if (sd_tr.srq_thr_req_head == NULL) { 25297 sd_tr.srq_thr_req_head = sd_treq; 25298 } else { 25299 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25300 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25301 if (sd_cur->dev == dev) { 25302 /* 25303 * already in Queue so don't log 25304 * another request for the device 25305 */ 25306 already_there = 1; 25307 break; 25308 } 25309 sd_prev = sd_cur; 25310 } 25311 if (!already_there) { 25312 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25313 "logging request for %lx\n", dev); 25314 sd_prev->sd_thr_req_next = sd_treq; 25315 } else { 25316 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25317 } 25318 } 25319 25320 /* 25321 * Create a kernel thread to do the reservation reclaim and free up this 25322 * thread. We cannot block this thread while we go away to do the 25323 * reservation reclaim 25324 */ 25325 if (sd_tr.srq_resv_reclaim_thread == NULL) 25326 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25327 sd_resv_reclaim_thread, NULL, 25328 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25329 25330 /* Tell the reservation reclaim thread that it has work to do */ 25331 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25332 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25333 } 25334 25335 /* 25336 * Function: sd_resv_reclaim_thread() 25337 * 25338 * Description: This function implements the reservation reclaim operations 25339 * 25340 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25341 * among multiple watches that share this callback function 25342 */ 25343 25344 static void 25345 sd_resv_reclaim_thread() 25346 { 25347 struct sd_lun *un; 25348 struct sd_thr_request *sd_mhreq; 25349 25350 /* Wait for work */ 25351 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25352 if (sd_tr.srq_thr_req_head == NULL) { 25353 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25354 &sd_tr.srq_resv_reclaim_mutex); 25355 } 25356 25357 /* Loop while we have work */ 25358 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25359 un = ddi_get_soft_state(sd_state, 25360 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25361 if (un == NULL) { 25362 /* 25363 * softstate structure is NULL so just 25364 * dequeue the request and continue 25365 */ 25366 sd_tr.srq_thr_req_head = 25367 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25368 kmem_free(sd_tr.srq_thr_cur_req, 25369 sizeof (struct sd_thr_request)); 25370 continue; 25371 } 25372 25373 /* dequeue the request */ 25374 sd_mhreq = sd_tr.srq_thr_cur_req; 25375 sd_tr.srq_thr_req_head = 25376 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25377 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25378 25379 /* 25380 * Reclaim reservation only if SD_RESERVE is still set. There 25381 * may have been a call to MHIOCRELEASE before we got here. 25382 */ 25383 mutex_enter(SD_MUTEX(un)); 25384 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25385 /* 25386 * Note: The SD_LOST_RESERVE flag is cleared before 25387 * reclaiming the reservation. If this is done after the 25388 * call to sd_reserve_release a reservation loss in the 25389 * window between pkt completion of reserve cmd and 25390 * mutex_enter below may not be recognized 25391 */ 25392 un->un_resvd_status &= ~SD_LOST_RESERVE; 25393 mutex_exit(SD_MUTEX(un)); 25394 25395 if (sd_reserve_release(sd_mhreq->dev, 25396 SD_RESERVE) == 0) { 25397 mutex_enter(SD_MUTEX(un)); 25398 un->un_resvd_status |= SD_RESERVE; 25399 mutex_exit(SD_MUTEX(un)); 25400 SD_INFO(SD_LOG_IOCTL_MHD, un, 25401 "sd_resv_reclaim_thread: " 25402 "Reservation Recovered\n"); 25403 } else { 25404 mutex_enter(SD_MUTEX(un)); 25405 un->un_resvd_status |= SD_LOST_RESERVE; 25406 mutex_exit(SD_MUTEX(un)); 25407 SD_INFO(SD_LOG_IOCTL_MHD, un, 25408 "sd_resv_reclaim_thread: Failed " 25409 "Reservation Recovery\n"); 25410 } 25411 } else { 25412 mutex_exit(SD_MUTEX(un)); 25413 } 25414 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25415 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25416 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25417 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25418 /* 25419 * wakeup the destroy thread if anyone is waiting on 25420 * us to complete. 25421 */ 25422 cv_signal(&sd_tr.srq_inprocess_cv); 25423 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25424 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25425 } 25426 25427 /* 25428 * cleanup the sd_tr structure now that this thread will not exist 25429 */ 25430 ASSERT(sd_tr.srq_thr_req_head == NULL); 25431 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25432 sd_tr.srq_resv_reclaim_thread = NULL; 25433 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25434 thread_exit(); 25435 } 25436 25437 25438 /* 25439 * Function: sd_rmv_resv_reclaim_req() 25440 * 25441 * Description: This function removes any pending reservation reclaim requests 25442 * for the specified device. 25443 * 25444 * Arguments: dev - the device 'dev_t' 25445 */ 25446 25447 static void 25448 sd_rmv_resv_reclaim_req(dev_t dev) 25449 { 25450 struct sd_thr_request *sd_mhreq; 25451 struct sd_thr_request *sd_prev; 25452 25453 /* Remove a reservation reclaim request from the list */ 25454 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25455 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25456 /* 25457 * We are attempting to reinstate reservation for 25458 * this device. We wait for sd_reserve_release() 25459 * to return before we return. 25460 */ 25461 cv_wait(&sd_tr.srq_inprocess_cv, 25462 &sd_tr.srq_resv_reclaim_mutex); 25463 } else { 25464 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25465 if (sd_mhreq && sd_mhreq->dev == dev) { 25466 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25467 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25468 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25469 return; 25470 } 25471 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25472 if (sd_mhreq && sd_mhreq->dev == dev) { 25473 break; 25474 } 25475 sd_prev = sd_mhreq; 25476 } 25477 if (sd_mhreq != NULL) { 25478 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25479 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25480 } 25481 } 25482 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25483 } 25484 25485 25486 /* 25487 * Function: sd_mhd_reset_notify_cb() 25488 * 25489 * Description: This is a call back function for scsi_reset_notify. This 25490 * function updates the softstate reserved status and logs the 25491 * reset. The driver scsi watch facility callback function 25492 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25493 * will reclaim the reservation. 25494 * 25495 * Arguments: arg - driver soft state (unit) structure 25496 */ 25497 25498 static void 25499 sd_mhd_reset_notify_cb(caddr_t arg) 25500 { 25501 struct sd_lun *un = (struct sd_lun *)arg; 25502 25503 mutex_enter(SD_MUTEX(un)); 25504 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25505 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25506 SD_INFO(SD_LOG_IOCTL_MHD, un, 25507 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25508 } 25509 mutex_exit(SD_MUTEX(un)); 25510 } 25511 25512 25513 /* 25514 * Function: sd_take_ownership() 25515 * 25516 * Description: This routine implements an algorithm to achieve a stable 25517 * reservation on disks which don't implement priority reserve, 25518 * and makes sure that other host lose re-reservation attempts. 25519 * This algorithm contains of a loop that keeps issuing the RESERVE 25520 * for some period of time (min_ownership_delay, default 6 seconds) 25521 * During that loop, it looks to see if there has been a bus device 25522 * reset or bus reset (both of which cause an existing reservation 25523 * to be lost). If the reservation is lost issue RESERVE until a 25524 * period of min_ownership_delay with no resets has gone by, or 25525 * until max_ownership_delay has expired. This loop ensures that 25526 * the host really did manage to reserve the device, in spite of 25527 * resets. The looping for min_ownership_delay (default six 25528 * seconds) is important to early generation clustering products, 25529 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25530 * MHIOCENFAILFAST periodic timer of two seconds. By having 25531 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25532 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25533 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25534 * have already noticed, via the MHIOCENFAILFAST polling, that it 25535 * no longer "owns" the disk and will have panicked itself. Thus, 25536 * the host issuing the MHIOCTKOWN is assured (with timing 25537 * dependencies) that by the time it actually starts to use the 25538 * disk for real work, the old owner is no longer accessing it. 25539 * 25540 * min_ownership_delay is the minimum amount of time for which the 25541 * disk must be reserved continuously devoid of resets before the 25542 * MHIOCTKOWN ioctl will return success. 25543 * 25544 * max_ownership_delay indicates the amount of time by which the 25545 * take ownership should succeed or timeout with an error. 25546 * 25547 * Arguments: dev - the device 'dev_t' 25548 * *p - struct containing timing info. 25549 * 25550 * Return Code: 0 for success or error code 25551 */ 25552 25553 static int 25554 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25555 { 25556 struct sd_lun *un; 25557 int rval; 25558 int err; 25559 int reservation_count = 0; 25560 int min_ownership_delay = 6000000; /* in usec */ 25561 int max_ownership_delay = 30000000; /* in usec */ 25562 clock_t start_time; /* starting time of this algorithm */ 25563 clock_t end_time; /* time limit for giving up */ 25564 clock_t ownership_time; /* time limit for stable ownership */ 25565 clock_t current_time; 25566 clock_t previous_current_time; 25567 25568 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25569 return (ENXIO); 25570 } 25571 25572 /* 25573 * Attempt a device reservation. A priority reservation is requested. 25574 */ 25575 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25576 != SD_SUCCESS) { 25577 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25578 "sd_take_ownership: return(1)=%d\n", rval); 25579 return (rval); 25580 } 25581 25582 /* Update the softstate reserved status to indicate the reservation */ 25583 mutex_enter(SD_MUTEX(un)); 25584 un->un_resvd_status |= SD_RESERVE; 25585 un->un_resvd_status &= 25586 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25587 mutex_exit(SD_MUTEX(un)); 25588 25589 if (p != NULL) { 25590 if (p->min_ownership_delay != 0) { 25591 min_ownership_delay = p->min_ownership_delay * 1000; 25592 } 25593 if (p->max_ownership_delay != 0) { 25594 max_ownership_delay = p->max_ownership_delay * 1000; 25595 } 25596 } 25597 SD_INFO(SD_LOG_IOCTL_MHD, un, 25598 "sd_take_ownership: min, max delays: %d, %d\n", 25599 min_ownership_delay, max_ownership_delay); 25600 25601 start_time = ddi_get_lbolt(); 25602 current_time = start_time; 25603 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25604 end_time = start_time + drv_usectohz(max_ownership_delay); 25605 25606 while (current_time - end_time < 0) { 25607 delay(drv_usectohz(500000)); 25608 25609 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25610 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25611 mutex_enter(SD_MUTEX(un)); 25612 rval = (un->un_resvd_status & 25613 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25614 mutex_exit(SD_MUTEX(un)); 25615 break; 25616 } 25617 } 25618 previous_current_time = current_time; 25619 current_time = ddi_get_lbolt(); 25620 mutex_enter(SD_MUTEX(un)); 25621 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25622 ownership_time = ddi_get_lbolt() + 25623 drv_usectohz(min_ownership_delay); 25624 reservation_count = 0; 25625 } else { 25626 reservation_count++; 25627 } 25628 un->un_resvd_status |= SD_RESERVE; 25629 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25630 mutex_exit(SD_MUTEX(un)); 25631 25632 SD_INFO(SD_LOG_IOCTL_MHD, un, 25633 "sd_take_ownership: ticks for loop iteration=%ld, " 25634 "reservation=%s\n", (current_time - previous_current_time), 25635 reservation_count ? "ok" : "reclaimed"); 25636 25637 if (current_time - ownership_time >= 0 && 25638 reservation_count >= 4) { 25639 rval = 0; /* Achieved a stable ownership */ 25640 break; 25641 } 25642 if (current_time - end_time >= 0) { 25643 rval = EACCES; /* No ownership in max possible time */ 25644 break; 25645 } 25646 } 25647 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25648 "sd_take_ownership: return(2)=%d\n", rval); 25649 return (rval); 25650 } 25651 25652 25653 /* 25654 * Function: sd_reserve_release() 25655 * 25656 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25657 * PRIORITY RESERVE commands based on a user specified command type 25658 * 25659 * Arguments: dev - the device 'dev_t' 25660 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25661 * SD_RESERVE, SD_RELEASE 25662 * 25663 * Return Code: 0 or Error Code 25664 */ 25665 25666 static int 25667 sd_reserve_release(dev_t dev, int cmd) 25668 { 25669 struct uscsi_cmd *com = NULL; 25670 struct sd_lun *un = NULL; 25671 char cdb[CDB_GROUP0]; 25672 int rval; 25673 25674 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25675 (cmd == SD_PRIORITY_RESERVE)); 25676 25677 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25678 return (ENXIO); 25679 } 25680 25681 /* instantiate and initialize the command and cdb */ 25682 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25683 bzero(cdb, CDB_GROUP0); 25684 com->uscsi_flags = USCSI_SILENT; 25685 com->uscsi_timeout = un->un_reserve_release_time; 25686 com->uscsi_cdblen = CDB_GROUP0; 25687 com->uscsi_cdb = cdb; 25688 if (cmd == SD_RELEASE) { 25689 cdb[0] = SCMD_RELEASE; 25690 } else { 25691 cdb[0] = SCMD_RESERVE; 25692 } 25693 25694 /* Send the command. */ 25695 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25696 SD_PATH_STANDARD); 25697 25698 /* 25699 * "break" a reservation that is held by another host, by issuing a 25700 * reset if priority reserve is desired, and we could not get the 25701 * device. 25702 */ 25703 if ((cmd == SD_PRIORITY_RESERVE) && 25704 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25705 /* 25706 * First try to reset the LUN. If we cannot, then try a target 25707 * reset, followed by a bus reset if the target reset fails. 25708 */ 25709 int reset_retval = 0; 25710 if (un->un_f_lun_reset_enabled == TRUE) { 25711 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25712 } 25713 if (reset_retval == 0) { 25714 /* The LUN reset either failed or was not issued */ 25715 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25716 } 25717 if ((reset_retval == 0) && 25718 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25719 rval = EIO; 25720 kmem_free(com, sizeof (*com)); 25721 return (rval); 25722 } 25723 25724 bzero(com, sizeof (struct uscsi_cmd)); 25725 com->uscsi_flags = USCSI_SILENT; 25726 com->uscsi_cdb = cdb; 25727 com->uscsi_cdblen = CDB_GROUP0; 25728 com->uscsi_timeout = 5; 25729 25730 /* 25731 * Reissue the last reserve command, this time without request 25732 * sense. Assume that it is just a regular reserve command. 25733 */ 25734 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25735 SD_PATH_STANDARD); 25736 } 25737 25738 /* Return an error if still getting a reservation conflict. */ 25739 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25740 rval = EACCES; 25741 } 25742 25743 kmem_free(com, sizeof (*com)); 25744 return (rval); 25745 } 25746 25747 25748 #define SD_NDUMP_RETRIES 12 25749 /* 25750 * System Crash Dump routine 25751 */ 25752 25753 static int 25754 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25755 { 25756 int instance; 25757 int partition; 25758 int i; 25759 int err; 25760 struct sd_lun *un; 25761 struct scsi_pkt *wr_pktp; 25762 struct buf *wr_bp; 25763 struct buf wr_buf; 25764 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25765 daddr_t tgt_blkno; /* rmw - blkno for target */ 25766 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25767 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25768 size_t io_start_offset; 25769 int doing_rmw = FALSE; 25770 int rval; 25771 ssize_t dma_resid; 25772 daddr_t oblkno; 25773 diskaddr_t nblks = 0; 25774 diskaddr_t start_block; 25775 25776 instance = SDUNIT(dev); 25777 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25778 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25779 return (ENXIO); 25780 } 25781 25782 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25783 25784 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25785 25786 partition = SDPART(dev); 25787 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25788 25789 if (!(NOT_DEVBSIZE(un))) { 25790 int secmask = 0; 25791 int blknomask = 0; 25792 25793 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25794 secmask = un->un_tgt_blocksize - 1; 25795 25796 if (blkno & blknomask) { 25797 SD_TRACE(SD_LOG_DUMP, un, 25798 "sddump: dump start block not modulo %d\n", 25799 un->un_tgt_blocksize); 25800 return (EINVAL); 25801 } 25802 25803 if ((nblk * DEV_BSIZE) & secmask) { 25804 SD_TRACE(SD_LOG_DUMP, un, 25805 "sddump: dump length not modulo %d\n", 25806 un->un_tgt_blocksize); 25807 return (EINVAL); 25808 } 25809 25810 } 25811 25812 /* Validate blocks to dump at against partition size. */ 25813 25814 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25815 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25816 25817 if (NOT_DEVBSIZE(un)) { 25818 if ((blkno + nblk) > nblks) { 25819 SD_TRACE(SD_LOG_DUMP, un, 25820 "sddump: dump range larger than partition: " 25821 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25822 blkno, nblk, nblks); 25823 return (EINVAL); 25824 } 25825 } else { 25826 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25827 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25828 SD_TRACE(SD_LOG_DUMP, un, 25829 "sddump: dump range larger than partition: " 25830 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25831 blkno, nblk, nblks); 25832 return (EINVAL); 25833 } 25834 } 25835 25836 mutex_enter(&un->un_pm_mutex); 25837 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25838 struct scsi_pkt *start_pktp; 25839 25840 mutex_exit(&un->un_pm_mutex); 25841 25842 /* 25843 * use pm framework to power on HBA 1st 25844 */ 25845 (void) pm_raise_power(SD_DEVINFO(un), 0, 25846 SD_PM_STATE_ACTIVE(un)); 25847 25848 /* 25849 * Dump no long uses sdpower to power on a device, it's 25850 * in-line here so it can be done in polled mode. 25851 */ 25852 25853 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25854 25855 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25856 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25857 25858 if (start_pktp == NULL) { 25859 /* We were not given a SCSI packet, fail. */ 25860 return (EIO); 25861 } 25862 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25863 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25864 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25865 start_pktp->pkt_flags = FLAG_NOINTR; 25866 25867 mutex_enter(SD_MUTEX(un)); 25868 SD_FILL_SCSI1_LUN(un, start_pktp); 25869 mutex_exit(SD_MUTEX(un)); 25870 /* 25871 * Scsi_poll returns 0 (success) if the command completes and 25872 * the status block is STATUS_GOOD. 25873 */ 25874 if (sd_scsi_poll(un, start_pktp) != 0) { 25875 scsi_destroy_pkt(start_pktp); 25876 return (EIO); 25877 } 25878 scsi_destroy_pkt(start_pktp); 25879 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un), 25880 SD_PM_STATE_CHANGE); 25881 } else { 25882 mutex_exit(&un->un_pm_mutex); 25883 } 25884 25885 mutex_enter(SD_MUTEX(un)); 25886 un->un_throttle = 0; 25887 25888 /* 25889 * The first time through, reset the specific target device. 25890 * However, when cpr calls sddump we know that sd is in a 25891 * a good state so no bus reset is required. 25892 * Clear sense data via Request Sense cmd. 25893 * In sddump we don't care about allow_bus_device_reset anymore 25894 */ 25895 25896 if ((un->un_state != SD_STATE_SUSPENDED) && 25897 (un->un_state != SD_STATE_DUMPING)) { 25898 25899 New_state(un, SD_STATE_DUMPING); 25900 25901 if (un->un_f_is_fibre == FALSE) { 25902 mutex_exit(SD_MUTEX(un)); 25903 /* 25904 * Attempt a bus reset for parallel scsi. 25905 * 25906 * Note: A bus reset is required because on some host 25907 * systems (i.e. E420R) a bus device reset is 25908 * insufficient to reset the state of the target. 25909 * 25910 * Note: Don't issue the reset for fibre-channel, 25911 * because this tends to hang the bus (loop) for 25912 * too long while everyone is logging out and in 25913 * and the deadman timer for dumping will fire 25914 * before the dump is complete. 25915 */ 25916 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25917 mutex_enter(SD_MUTEX(un)); 25918 Restore_state(un); 25919 mutex_exit(SD_MUTEX(un)); 25920 return (EIO); 25921 } 25922 25923 /* Delay to give the device some recovery time. */ 25924 drv_usecwait(10000); 25925 25926 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25927 SD_INFO(SD_LOG_DUMP, un, 25928 "sddump: sd_send_polled_RQS failed\n"); 25929 } 25930 mutex_enter(SD_MUTEX(un)); 25931 } 25932 } 25933 25934 /* 25935 * Convert the partition-relative block number to a 25936 * disk physical block number. 25937 */ 25938 if (NOT_DEVBSIZE(un)) { 25939 blkno += start_block; 25940 } else { 25941 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25942 blkno += start_block; 25943 } 25944 25945 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25946 25947 25948 /* 25949 * Check if the device has a non-512 block size. 25950 */ 25951 wr_bp = NULL; 25952 if (NOT_DEVBSIZE(un)) { 25953 tgt_byte_offset = blkno * un->un_sys_blocksize; 25954 tgt_byte_count = nblk * un->un_sys_blocksize; 25955 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25956 (tgt_byte_count % un->un_tgt_blocksize)) { 25957 doing_rmw = TRUE; 25958 /* 25959 * Calculate the block number and number of block 25960 * in terms of the media block size. 25961 */ 25962 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25963 tgt_nblk = 25964 ((tgt_byte_offset + tgt_byte_count + 25965 (un->un_tgt_blocksize - 1)) / 25966 un->un_tgt_blocksize) - tgt_blkno; 25967 25968 /* 25969 * Invoke the routine which is going to do read part 25970 * of read-modify-write. 25971 * Note that this routine returns a pointer to 25972 * a valid bp in wr_bp. 25973 */ 25974 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25975 &wr_bp); 25976 if (err) { 25977 mutex_exit(SD_MUTEX(un)); 25978 return (err); 25979 } 25980 /* 25981 * Offset is being calculated as - 25982 * (original block # * system block size) - 25983 * (new block # * target block size) 25984 */ 25985 io_start_offset = 25986 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25987 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25988 25989 ASSERT(io_start_offset < un->un_tgt_blocksize); 25990 /* 25991 * Do the modify portion of read modify write. 25992 */ 25993 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25994 (size_t)nblk * un->un_sys_blocksize); 25995 } else { 25996 doing_rmw = FALSE; 25997 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25998 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25999 } 26000 26001 /* Convert blkno and nblk to target blocks */ 26002 blkno = tgt_blkno; 26003 nblk = tgt_nblk; 26004 } else { 26005 wr_bp = &wr_buf; 26006 bzero(wr_bp, sizeof (struct buf)); 26007 wr_bp->b_flags = B_BUSY; 26008 wr_bp->b_un.b_addr = addr; 26009 wr_bp->b_bcount = nblk << DEV_BSHIFT; 26010 wr_bp->b_resid = 0; 26011 } 26012 26013 mutex_exit(SD_MUTEX(un)); 26014 26015 /* 26016 * Obtain a SCSI packet for the write command. 26017 * It should be safe to call the allocator here without 26018 * worrying about being locked for DVMA mapping because 26019 * the address we're passed is already a DVMA mapping 26020 * 26021 * We are also not going to worry about semaphore ownership 26022 * in the dump buffer. Dumping is single threaded at present. 26023 */ 26024 26025 wr_pktp = NULL; 26026 26027 dma_resid = wr_bp->b_bcount; 26028 oblkno = blkno; 26029 26030 if (!(NOT_DEVBSIZE(un))) { 26031 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 26032 } 26033 26034 while (dma_resid != 0) { 26035 26036 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26037 wr_bp->b_flags &= ~B_ERROR; 26038 26039 if (un->un_partial_dma_supported == 1) { 26040 blkno = oblkno + 26041 ((wr_bp->b_bcount - dma_resid) / 26042 un->un_tgt_blocksize); 26043 nblk = dma_resid / un->un_tgt_blocksize; 26044 26045 if (wr_pktp) { 26046 /* 26047 * Partial DMA transfers after initial transfer 26048 */ 26049 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 26050 blkno, nblk); 26051 } else { 26052 /* Initial transfer */ 26053 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26054 un->un_pkt_flags, NULL_FUNC, NULL, 26055 blkno, nblk); 26056 } 26057 } else { 26058 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26059 0, NULL_FUNC, NULL, blkno, nblk); 26060 } 26061 26062 if (rval == 0) { 26063 /* We were given a SCSI packet, continue. */ 26064 break; 26065 } 26066 26067 if (i == 0) { 26068 if (wr_bp->b_flags & B_ERROR) { 26069 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26070 "no resources for dumping; " 26071 "error code: 0x%x, retrying", 26072 geterror(wr_bp)); 26073 } else { 26074 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26075 "no resources for dumping; retrying"); 26076 } 26077 } else if (i != (SD_NDUMP_RETRIES - 1)) { 26078 if (wr_bp->b_flags & B_ERROR) { 26079 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26080 "no resources for dumping; error code: " 26081 "0x%x, retrying\n", geterror(wr_bp)); 26082 } 26083 } else { 26084 if (wr_bp->b_flags & B_ERROR) { 26085 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26086 "no resources for dumping; " 26087 "error code: 0x%x, retries failed, " 26088 "giving up.\n", geterror(wr_bp)); 26089 } else { 26090 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26091 "no resources for dumping; " 26092 "retries failed, giving up.\n"); 26093 } 26094 mutex_enter(SD_MUTEX(un)); 26095 Restore_state(un); 26096 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 26097 mutex_exit(SD_MUTEX(un)); 26098 scsi_free_consistent_buf(wr_bp); 26099 } else { 26100 mutex_exit(SD_MUTEX(un)); 26101 } 26102 return (EIO); 26103 } 26104 drv_usecwait(10000); 26105 } 26106 26107 if (un->un_partial_dma_supported == 1) { 26108 /* 26109 * save the resid from PARTIAL_DMA 26110 */ 26111 dma_resid = wr_pktp->pkt_resid; 26112 if (dma_resid != 0) 26113 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 26114 wr_pktp->pkt_resid = 0; 26115 } else { 26116 dma_resid = 0; 26117 } 26118 26119 /* SunBug 1222170 */ 26120 wr_pktp->pkt_flags = FLAG_NOINTR; 26121 26122 err = EIO; 26123 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26124 26125 /* 26126 * Scsi_poll returns 0 (success) if the command completes and 26127 * the status block is STATUS_GOOD. We should only check 26128 * errors if this condition is not true. Even then we should 26129 * send our own request sense packet only if we have a check 26130 * condition and auto request sense has not been performed by 26131 * the hba. 26132 */ 26133 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 26134 26135 if ((sd_scsi_poll(un, wr_pktp) == 0) && 26136 (wr_pktp->pkt_resid == 0)) { 26137 err = SD_SUCCESS; 26138 break; 26139 } 26140 26141 /* 26142 * Check CMD_DEV_GONE 1st, give up if device is gone. 26143 */ 26144 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 26145 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26146 "Error while dumping state...Device is gone\n"); 26147 break; 26148 } 26149 26150 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 26151 SD_INFO(SD_LOG_DUMP, un, 26152 "sddump: write failed with CHECK, try # %d\n", i); 26153 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 26154 (void) sd_send_polled_RQS(un); 26155 } 26156 26157 continue; 26158 } 26159 26160 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 26161 int reset_retval = 0; 26162 26163 SD_INFO(SD_LOG_DUMP, un, 26164 "sddump: write failed with BUSY, try # %d\n", i); 26165 26166 if (un->un_f_lun_reset_enabled == TRUE) { 26167 reset_retval = scsi_reset(SD_ADDRESS(un), 26168 RESET_LUN); 26169 } 26170 if (reset_retval == 0) { 26171 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26172 } 26173 (void) sd_send_polled_RQS(un); 26174 26175 } else { 26176 SD_INFO(SD_LOG_DUMP, un, 26177 "sddump: write failed with 0x%x, try # %d\n", 26178 SD_GET_PKT_STATUS(wr_pktp), i); 26179 mutex_enter(SD_MUTEX(un)); 26180 sd_reset_target(un, wr_pktp); 26181 mutex_exit(SD_MUTEX(un)); 26182 } 26183 26184 /* 26185 * If we are not getting anywhere with lun/target resets, 26186 * let's reset the bus. 26187 */ 26188 if (i == SD_NDUMP_RETRIES/2) { 26189 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26190 (void) sd_send_polled_RQS(un); 26191 } 26192 } 26193 } 26194 26195 scsi_destroy_pkt(wr_pktp); 26196 mutex_enter(SD_MUTEX(un)); 26197 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 26198 mutex_exit(SD_MUTEX(un)); 26199 scsi_free_consistent_buf(wr_bp); 26200 } else { 26201 mutex_exit(SD_MUTEX(un)); 26202 } 26203 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 26204 return (err); 26205 } 26206 26207 /* 26208 * Function: sd_scsi_poll() 26209 * 26210 * Description: This is a wrapper for the scsi_poll call. 26211 * 26212 * Arguments: sd_lun - The unit structure 26213 * scsi_pkt - The scsi packet being sent to the device. 26214 * 26215 * Return Code: 0 - Command completed successfully with good status 26216 * -1 - Command failed. This could indicate a check condition 26217 * or other status value requiring recovery action. 26218 * 26219 * NOTE: This code is only called off sddump(). 26220 */ 26221 26222 static int 26223 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 26224 { 26225 int status; 26226 26227 ASSERT(un != NULL); 26228 ASSERT(!mutex_owned(SD_MUTEX(un))); 26229 ASSERT(pktp != NULL); 26230 26231 status = SD_SUCCESS; 26232 26233 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 26234 pktp->pkt_flags |= un->un_tagflags; 26235 pktp->pkt_flags &= ~FLAG_NODISCON; 26236 } 26237 26238 status = sd_ddi_scsi_poll(pktp); 26239 /* 26240 * Scsi_poll returns 0 (success) if the command completes and the 26241 * status block is STATUS_GOOD. We should only check errors if this 26242 * condition is not true. Even then we should send our own request 26243 * sense packet only if we have a check condition and auto 26244 * request sense has not been performed by the hba. 26245 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 26246 */ 26247 if ((status != SD_SUCCESS) && 26248 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 26249 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 26250 (pktp->pkt_reason != CMD_DEV_GONE)) 26251 (void) sd_send_polled_RQS(un); 26252 26253 return (status); 26254 } 26255 26256 /* 26257 * Function: sd_send_polled_RQS() 26258 * 26259 * Description: This sends the request sense command to a device. 26260 * 26261 * Arguments: sd_lun - The unit structure 26262 * 26263 * Return Code: 0 - Command completed successfully with good status 26264 * -1 - Command failed. 26265 * 26266 */ 26267 26268 static int 26269 sd_send_polled_RQS(struct sd_lun *un) 26270 { 26271 int ret_val; 26272 struct scsi_pkt *rqs_pktp; 26273 struct buf *rqs_bp; 26274 26275 ASSERT(un != NULL); 26276 ASSERT(!mutex_owned(SD_MUTEX(un))); 26277 26278 ret_val = SD_SUCCESS; 26279 26280 rqs_pktp = un->un_rqs_pktp; 26281 rqs_bp = un->un_rqs_bp; 26282 26283 mutex_enter(SD_MUTEX(un)); 26284 26285 if (un->un_sense_isbusy) { 26286 ret_val = SD_FAILURE; 26287 mutex_exit(SD_MUTEX(un)); 26288 return (ret_val); 26289 } 26290 26291 /* 26292 * If the request sense buffer (and packet) is not in use, 26293 * let's set the un_sense_isbusy and send our packet 26294 */ 26295 un->un_sense_isbusy = 1; 26296 rqs_pktp->pkt_resid = 0; 26297 rqs_pktp->pkt_reason = 0; 26298 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26299 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26300 26301 mutex_exit(SD_MUTEX(un)); 26302 26303 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26304 " 0x%p\n", rqs_bp->b_un.b_addr); 26305 26306 /* 26307 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26308 * axle - it has a call into us! 26309 */ 26310 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26311 SD_INFO(SD_LOG_COMMON, un, 26312 "sd_send_polled_RQS: RQS failed\n"); 26313 } 26314 26315 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26316 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26317 26318 mutex_enter(SD_MUTEX(un)); 26319 un->un_sense_isbusy = 0; 26320 mutex_exit(SD_MUTEX(un)); 26321 26322 return (ret_val); 26323 } 26324 26325 /* 26326 * Defines needed for localized version of the scsi_poll routine. 26327 */ 26328 #define CSEC 10000 /* usecs */ 26329 #define SEC_TO_CSEC (1000000/CSEC) 26330 26331 /* 26332 * Function: sd_ddi_scsi_poll() 26333 * 26334 * Description: Localized version of the scsi_poll routine. The purpose is to 26335 * send a scsi_pkt to a device as a polled command. This version 26336 * is to ensure more robust handling of transport errors. 26337 * Specifically this routine cures not ready, coming ready 26338 * transition for power up and reset of sonoma's. This can take 26339 * up to 45 seconds for power-on and 20 seconds for reset of a 26340 * sonoma lun. 26341 * 26342 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26343 * 26344 * Return Code: 0 - Command completed successfully with good status 26345 * -1 - Command failed. 26346 * 26347 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 26348 * be fixed (removing this code), we need to determine how to handle the 26349 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 26350 * 26351 * NOTE: This code is only called off sddump(). 26352 */ 26353 static int 26354 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26355 { 26356 int rval = -1; 26357 int savef; 26358 long savet; 26359 void (*savec)(); 26360 int timeout; 26361 int busy_count; 26362 int poll_delay; 26363 int rc; 26364 uint8_t *sensep; 26365 struct scsi_arq_status *arqstat; 26366 extern int do_polled_io; 26367 26368 ASSERT(pkt->pkt_scbp); 26369 26370 /* 26371 * save old flags.. 26372 */ 26373 savef = pkt->pkt_flags; 26374 savec = pkt->pkt_comp; 26375 savet = pkt->pkt_time; 26376 26377 pkt->pkt_flags |= FLAG_NOINTR; 26378 26379 /* 26380 * XXX there is nothing in the SCSA spec that states that we should not 26381 * do a callback for polled cmds; however, removing this will break sd 26382 * and probably other target drivers 26383 */ 26384 pkt->pkt_comp = NULL; 26385 26386 /* 26387 * we don't like a polled command without timeout. 26388 * 60 seconds seems long enough. 26389 */ 26390 if (pkt->pkt_time == 0) 26391 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26392 26393 /* 26394 * Send polled cmd. 26395 * 26396 * We do some error recovery for various errors. Tran_busy, 26397 * queue full, and non-dispatched commands are retried every 10 msec. 26398 * as they are typically transient failures. Busy status and Not 26399 * Ready are retried every second as this status takes a while to 26400 * change. 26401 */ 26402 timeout = pkt->pkt_time * SEC_TO_CSEC; 26403 26404 for (busy_count = 0; busy_count < timeout; busy_count++) { 26405 /* 26406 * Initialize pkt status variables. 26407 */ 26408 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26409 26410 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26411 if (rc != TRAN_BUSY) { 26412 /* Transport failed - give up. */ 26413 break; 26414 } else { 26415 /* Transport busy - try again. */ 26416 poll_delay = 1 * CSEC; /* 10 msec. */ 26417 } 26418 } else { 26419 /* 26420 * Transport accepted - check pkt status. 26421 */ 26422 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26423 if ((pkt->pkt_reason == CMD_CMPLT) && 26424 (rc == STATUS_CHECK) && 26425 (pkt->pkt_state & STATE_ARQ_DONE)) { 26426 arqstat = 26427 (struct scsi_arq_status *)(pkt->pkt_scbp); 26428 sensep = (uint8_t *)&arqstat->sts_sensedata; 26429 } else { 26430 sensep = NULL; 26431 } 26432 26433 if ((pkt->pkt_reason == CMD_CMPLT) && 26434 (rc == STATUS_GOOD)) { 26435 /* No error - we're done */ 26436 rval = 0; 26437 break; 26438 26439 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26440 /* Lost connection - give up */ 26441 break; 26442 26443 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26444 (pkt->pkt_state == 0)) { 26445 /* Pkt not dispatched - try again. */ 26446 poll_delay = 1 * CSEC; /* 10 msec. */ 26447 26448 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26449 (rc == STATUS_QFULL)) { 26450 /* Queue full - try again. */ 26451 poll_delay = 1 * CSEC; /* 10 msec. */ 26452 26453 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26454 (rc == STATUS_BUSY)) { 26455 /* Busy - try again. */ 26456 poll_delay = 100 * CSEC; /* 1 sec. */ 26457 busy_count += (SEC_TO_CSEC - 1); 26458 26459 } else if ((sensep != NULL) && 26460 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 26461 /* 26462 * Unit Attention - try again. 26463 * Pretend it took 1 sec. 26464 * NOTE: 'continue' avoids poll_delay 26465 */ 26466 busy_count += (SEC_TO_CSEC - 1); 26467 continue; 26468 26469 } else if ((sensep != NULL) && 26470 (scsi_sense_key(sensep) == KEY_NOT_READY) && 26471 (scsi_sense_asc(sensep) == 0x04) && 26472 (scsi_sense_ascq(sensep) == 0x01)) { 26473 /* 26474 * Not ready -> ready - try again. 26475 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 26476 * ...same as STATUS_BUSY 26477 */ 26478 poll_delay = 100 * CSEC; /* 1 sec. */ 26479 busy_count += (SEC_TO_CSEC - 1); 26480 26481 } else { 26482 /* BAD status - give up. */ 26483 break; 26484 } 26485 } 26486 26487 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 26488 !do_polled_io) { 26489 delay(drv_usectohz(poll_delay)); 26490 } else { 26491 /* we busy wait during cpr_dump or interrupt threads */ 26492 drv_usecwait(poll_delay); 26493 } 26494 } 26495 26496 pkt->pkt_flags = savef; 26497 pkt->pkt_comp = savec; 26498 pkt->pkt_time = savet; 26499 26500 /* return on error */ 26501 if (rval) 26502 return (rval); 26503 26504 /* 26505 * This is not a performance critical code path. 26506 * 26507 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26508 * issues associated with looking at DMA memory prior to 26509 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26510 */ 26511 scsi_sync_pkt(pkt); 26512 return (0); 26513 } 26514 26515 26516 26517 /* 26518 * Function: sd_persistent_reservation_in_read_keys 26519 * 26520 * Description: This routine is the driver entry point for handling CD-ROM 26521 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26522 * by sending the SCSI-3 PRIN commands to the device. 26523 * Processes the read keys command response by copying the 26524 * reservation key information into the user provided buffer. 26525 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26526 * 26527 * Arguments: un - Pointer to soft state struct for the target. 26528 * usrp - user provided pointer to multihost Persistent In Read 26529 * Keys structure (mhioc_inkeys_t) 26530 * flag - this argument is a pass through to ddi_copyxxx() 26531 * directly from the mode argument of ioctl(). 26532 * 26533 * Return Code: 0 - Success 26534 * EACCES 26535 * ENOTSUP 26536 * errno return code from sd_send_scsi_cmd() 26537 * 26538 * Context: Can sleep. Does not return until command is completed. 26539 */ 26540 26541 static int 26542 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26543 mhioc_inkeys_t *usrp, int flag) 26544 { 26545 #ifdef _MULTI_DATAMODEL 26546 struct mhioc_key_list32 li32; 26547 #endif 26548 sd_prin_readkeys_t *in; 26549 mhioc_inkeys_t *ptr; 26550 mhioc_key_list_t li; 26551 uchar_t *data_bufp = NULL; 26552 int data_len = 0; 26553 int rval = 0; 26554 size_t copysz = 0; 26555 sd_ssc_t *ssc; 26556 26557 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26558 return (EINVAL); 26559 } 26560 bzero(&li, sizeof (mhioc_key_list_t)); 26561 26562 ssc = sd_ssc_init(un); 26563 26564 /* 26565 * Get the listsize from user 26566 */ 26567 #ifdef _MULTI_DATAMODEL 26568 switch (ddi_model_convert_from(flag & FMODELS)) { 26569 case DDI_MODEL_ILP32: 26570 copysz = sizeof (struct mhioc_key_list32); 26571 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26572 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26573 "sd_persistent_reservation_in_read_keys: " 26574 "failed ddi_copyin: mhioc_key_list32_t\n"); 26575 rval = EFAULT; 26576 goto done; 26577 } 26578 li.listsize = li32.listsize; 26579 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26580 break; 26581 26582 case DDI_MODEL_NONE: 26583 copysz = sizeof (mhioc_key_list_t); 26584 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26585 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26586 "sd_persistent_reservation_in_read_keys: " 26587 "failed ddi_copyin: mhioc_key_list_t\n"); 26588 rval = EFAULT; 26589 goto done; 26590 } 26591 break; 26592 } 26593 26594 #else /* ! _MULTI_DATAMODEL */ 26595 copysz = sizeof (mhioc_key_list_t); 26596 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26597 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26598 "sd_persistent_reservation_in_read_keys: " 26599 "failed ddi_copyin: mhioc_key_list_t\n"); 26600 rval = EFAULT; 26601 goto done; 26602 } 26603 #endif 26604 26605 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26606 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26607 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26608 26609 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26610 data_len, data_bufp); 26611 if (rval != 0) { 26612 if (rval == EIO) 26613 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26614 else 26615 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26616 goto done; 26617 } 26618 in = (sd_prin_readkeys_t *)data_bufp; 26619 ptr->generation = BE_32(in->generation); 26620 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26621 26622 /* 26623 * Return the min(listsize, listlen) keys 26624 */ 26625 #ifdef _MULTI_DATAMODEL 26626 26627 switch (ddi_model_convert_from(flag & FMODELS)) { 26628 case DDI_MODEL_ILP32: 26629 li32.listlen = li.listlen; 26630 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26631 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26632 "sd_persistent_reservation_in_read_keys: " 26633 "failed ddi_copyout: mhioc_key_list32_t\n"); 26634 rval = EFAULT; 26635 goto done; 26636 } 26637 break; 26638 26639 case DDI_MODEL_NONE: 26640 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26641 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26642 "sd_persistent_reservation_in_read_keys: " 26643 "failed ddi_copyout: mhioc_key_list_t\n"); 26644 rval = EFAULT; 26645 goto done; 26646 } 26647 break; 26648 } 26649 26650 #else /* ! _MULTI_DATAMODEL */ 26651 26652 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26653 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26654 "sd_persistent_reservation_in_read_keys: " 26655 "failed ddi_copyout: mhioc_key_list_t\n"); 26656 rval = EFAULT; 26657 goto done; 26658 } 26659 26660 #endif /* _MULTI_DATAMODEL */ 26661 26662 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26663 li.listsize * MHIOC_RESV_KEY_SIZE); 26664 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26665 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26666 "sd_persistent_reservation_in_read_keys: " 26667 "failed ddi_copyout: keylist\n"); 26668 rval = EFAULT; 26669 } 26670 done: 26671 sd_ssc_fini(ssc); 26672 kmem_free(data_bufp, data_len); 26673 return (rval); 26674 } 26675 26676 26677 /* 26678 * Function: sd_persistent_reservation_in_read_resv 26679 * 26680 * Description: This routine is the driver entry point for handling CD-ROM 26681 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26682 * by sending the SCSI-3 PRIN commands to the device. 26683 * Process the read persistent reservations command response by 26684 * copying the reservation information into the user provided 26685 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26686 * 26687 * Arguments: un - Pointer to soft state struct for the target. 26688 * usrp - user provided pointer to multihost Persistent In Read 26689 * Keys structure (mhioc_inkeys_t) 26690 * flag - this argument is a pass through to ddi_copyxxx() 26691 * directly from the mode argument of ioctl(). 26692 * 26693 * Return Code: 0 - Success 26694 * EACCES 26695 * ENOTSUP 26696 * errno return code from sd_send_scsi_cmd() 26697 * 26698 * Context: Can sleep. Does not return until command is completed. 26699 */ 26700 26701 static int 26702 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26703 mhioc_inresvs_t *usrp, int flag) 26704 { 26705 #ifdef _MULTI_DATAMODEL 26706 struct mhioc_resv_desc_list32 resvlist32; 26707 #endif 26708 sd_prin_readresv_t *in; 26709 mhioc_inresvs_t *ptr; 26710 sd_readresv_desc_t *readresv_ptr; 26711 mhioc_resv_desc_list_t resvlist; 26712 mhioc_resv_desc_t resvdesc; 26713 uchar_t *data_bufp = NULL; 26714 int data_len; 26715 int rval = 0; 26716 int i; 26717 size_t copysz = 0; 26718 mhioc_resv_desc_t *bufp; 26719 sd_ssc_t *ssc; 26720 26721 if ((ptr = usrp) == NULL) { 26722 return (EINVAL); 26723 } 26724 26725 ssc = sd_ssc_init(un); 26726 26727 /* 26728 * Get the listsize from user 26729 */ 26730 #ifdef _MULTI_DATAMODEL 26731 switch (ddi_model_convert_from(flag & FMODELS)) { 26732 case DDI_MODEL_ILP32: 26733 copysz = sizeof (struct mhioc_resv_desc_list32); 26734 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26735 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26736 "sd_persistent_reservation_in_read_resv: " 26737 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26738 rval = EFAULT; 26739 goto done; 26740 } 26741 resvlist.listsize = resvlist32.listsize; 26742 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26743 break; 26744 26745 case DDI_MODEL_NONE: 26746 copysz = sizeof (mhioc_resv_desc_list_t); 26747 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26748 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26749 "sd_persistent_reservation_in_read_resv: " 26750 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26751 rval = EFAULT; 26752 goto done; 26753 } 26754 break; 26755 } 26756 #else /* ! _MULTI_DATAMODEL */ 26757 copysz = sizeof (mhioc_resv_desc_list_t); 26758 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26759 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26760 "sd_persistent_reservation_in_read_resv: " 26761 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26762 rval = EFAULT; 26763 goto done; 26764 } 26765 #endif /* ! _MULTI_DATAMODEL */ 26766 26767 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26768 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26769 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26770 26771 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26772 data_len, data_bufp); 26773 if (rval != 0) { 26774 if (rval == EIO) 26775 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26776 else 26777 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26778 goto done; 26779 } 26780 in = (sd_prin_readresv_t *)data_bufp; 26781 ptr->generation = BE_32(in->generation); 26782 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26783 26784 /* 26785 * Return the min(listsize, listlen( keys 26786 */ 26787 #ifdef _MULTI_DATAMODEL 26788 26789 switch (ddi_model_convert_from(flag & FMODELS)) { 26790 case DDI_MODEL_ILP32: 26791 resvlist32.listlen = resvlist.listlen; 26792 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26793 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26794 "sd_persistent_reservation_in_read_resv: " 26795 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26796 rval = EFAULT; 26797 goto done; 26798 } 26799 break; 26800 26801 case DDI_MODEL_NONE: 26802 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26803 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26804 "sd_persistent_reservation_in_read_resv: " 26805 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26806 rval = EFAULT; 26807 goto done; 26808 } 26809 break; 26810 } 26811 26812 #else /* ! _MULTI_DATAMODEL */ 26813 26814 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26815 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26816 "sd_persistent_reservation_in_read_resv: " 26817 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26818 rval = EFAULT; 26819 goto done; 26820 } 26821 26822 #endif /* ! _MULTI_DATAMODEL */ 26823 26824 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26825 bufp = resvlist.list; 26826 copysz = sizeof (mhioc_resv_desc_t); 26827 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26828 i++, readresv_ptr++, bufp++) { 26829 26830 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26831 MHIOC_RESV_KEY_SIZE); 26832 resvdesc.type = readresv_ptr->type; 26833 resvdesc.scope = readresv_ptr->scope; 26834 resvdesc.scope_specific_addr = 26835 BE_32(readresv_ptr->scope_specific_addr); 26836 26837 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26838 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26839 "sd_persistent_reservation_in_read_resv: " 26840 "failed ddi_copyout: resvlist\n"); 26841 rval = EFAULT; 26842 goto done; 26843 } 26844 } 26845 done: 26846 sd_ssc_fini(ssc); 26847 /* only if data_bufp is allocated, we need to free it */ 26848 if (data_bufp) { 26849 kmem_free(data_bufp, data_len); 26850 } 26851 return (rval); 26852 } 26853 26854 26855 /* 26856 * Function: sr_change_blkmode() 26857 * 26858 * Description: This routine is the driver entry point for handling CD-ROM 26859 * block mode ioctl requests. Support for returning and changing 26860 * the current block size in use by the device is implemented. The 26861 * LBA size is changed via a MODE SELECT Block Descriptor. 26862 * 26863 * This routine issues a mode sense with an allocation length of 26864 * 12 bytes for the mode page header and a single block descriptor. 26865 * 26866 * Arguments: dev - the device 'dev_t' 26867 * cmd - the request type; one of CDROMGBLKMODE (get) or 26868 * CDROMSBLKMODE (set) 26869 * data - current block size or requested block size 26870 * flag - this argument is a pass through to ddi_copyxxx() directly 26871 * from the mode argument of ioctl(). 26872 * 26873 * Return Code: the code returned by sd_send_scsi_cmd() 26874 * EINVAL if invalid arguments are provided 26875 * EFAULT if ddi_copyxxx() fails 26876 * ENXIO if fail ddi_get_soft_state 26877 * EIO if invalid mode sense block descriptor length 26878 * 26879 */ 26880 26881 static int 26882 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26883 { 26884 struct sd_lun *un = NULL; 26885 struct mode_header *sense_mhp, *select_mhp; 26886 struct block_descriptor *sense_desc, *select_desc; 26887 int current_bsize; 26888 int rval = EINVAL; 26889 uchar_t *sense = NULL; 26890 uchar_t *select = NULL; 26891 sd_ssc_t *ssc; 26892 26893 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26894 26895 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26896 return (ENXIO); 26897 } 26898 26899 /* 26900 * The block length is changed via the Mode Select block descriptor, the 26901 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26902 * required as part of this routine. Therefore the mode sense allocation 26903 * length is specified to be the length of a mode page header and a 26904 * block descriptor. 26905 */ 26906 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26907 26908 ssc = sd_ssc_init(un); 26909 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26910 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26911 sd_ssc_fini(ssc); 26912 if (rval != 0) { 26913 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26914 "sr_change_blkmode: Mode Sense Failed\n"); 26915 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26916 return (rval); 26917 } 26918 26919 /* Check the block descriptor len to handle only 1 block descriptor */ 26920 sense_mhp = (struct mode_header *)sense; 26921 if ((sense_mhp->bdesc_length == 0) || 26922 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26923 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26924 "sr_change_blkmode: Mode Sense returned invalid block" 26925 " descriptor length\n"); 26926 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26927 return (EIO); 26928 } 26929 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26930 current_bsize = ((sense_desc->blksize_hi << 16) | 26931 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26932 26933 /* Process command */ 26934 switch (cmd) { 26935 case CDROMGBLKMODE: 26936 /* Return the block size obtained during the mode sense */ 26937 if (ddi_copyout(¤t_bsize, (void *)data, 26938 sizeof (int), flag) != 0) 26939 rval = EFAULT; 26940 break; 26941 case CDROMSBLKMODE: 26942 /* Validate the requested block size */ 26943 switch (data) { 26944 case CDROM_BLK_512: 26945 case CDROM_BLK_1024: 26946 case CDROM_BLK_2048: 26947 case CDROM_BLK_2056: 26948 case CDROM_BLK_2336: 26949 case CDROM_BLK_2340: 26950 case CDROM_BLK_2352: 26951 case CDROM_BLK_2368: 26952 case CDROM_BLK_2448: 26953 case CDROM_BLK_2646: 26954 case CDROM_BLK_2647: 26955 break; 26956 default: 26957 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26958 "sr_change_blkmode: " 26959 "Block Size '%ld' Not Supported\n", data); 26960 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26961 return (EINVAL); 26962 } 26963 26964 /* 26965 * The current block size matches the requested block size so 26966 * there is no need to send the mode select to change the size 26967 */ 26968 if (current_bsize == data) { 26969 break; 26970 } 26971 26972 /* Build the select data for the requested block size */ 26973 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26974 select_mhp = (struct mode_header *)select; 26975 select_desc = 26976 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26977 /* 26978 * The LBA size is changed via the block descriptor, so the 26979 * descriptor is built according to the user data 26980 */ 26981 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26982 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26983 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26984 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26985 26986 /* Send the mode select for the requested block size */ 26987 ssc = sd_ssc_init(un); 26988 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26989 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26990 SD_PATH_STANDARD); 26991 sd_ssc_fini(ssc); 26992 if (rval != 0) { 26993 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26994 "sr_change_blkmode: Mode Select Failed\n"); 26995 /* 26996 * The mode select failed for the requested block size, 26997 * so reset the data for the original block size and 26998 * send it to the target. The error is indicated by the 26999 * return value for the failed mode select. 27000 */ 27001 select_desc->blksize_hi = sense_desc->blksize_hi; 27002 select_desc->blksize_mid = sense_desc->blksize_mid; 27003 select_desc->blksize_lo = sense_desc->blksize_lo; 27004 ssc = sd_ssc_init(un); 27005 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 27006 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 27007 SD_PATH_STANDARD); 27008 sd_ssc_fini(ssc); 27009 } else { 27010 ASSERT(!mutex_owned(SD_MUTEX(un))); 27011 mutex_enter(SD_MUTEX(un)); 27012 sd_update_block_info(un, (uint32_t)data, 0); 27013 mutex_exit(SD_MUTEX(un)); 27014 } 27015 break; 27016 default: 27017 /* should not reach here, but check anyway */ 27018 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27019 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 27020 rval = EINVAL; 27021 break; 27022 } 27023 27024 if (select) { 27025 kmem_free(select, BUFLEN_CHG_BLK_MODE); 27026 } 27027 if (sense) { 27028 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27029 } 27030 return (rval); 27031 } 27032 27033 27034 /* 27035 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 27036 * implement driver support for getting and setting the CD speed. The command 27037 * set used will be based on the device type. If the device has not been 27038 * identified as MMC the Toshiba vendor specific mode page will be used. If 27039 * the device is MMC but does not support the Real Time Streaming feature 27040 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 27041 * be used to read the speed. 27042 */ 27043 27044 /* 27045 * Function: sr_change_speed() 27046 * 27047 * Description: This routine is the driver entry point for handling CD-ROM 27048 * drive speed ioctl requests for devices supporting the Toshiba 27049 * vendor specific drive speed mode page. Support for returning 27050 * and changing the current drive speed in use by the device is 27051 * implemented. 27052 * 27053 * Arguments: dev - the device 'dev_t' 27054 * cmd - the request type; one of CDROMGDRVSPEED (get) or 27055 * CDROMSDRVSPEED (set) 27056 * data - current drive speed or requested drive speed 27057 * flag - this argument is a pass through to ddi_copyxxx() directly 27058 * from the mode argument of ioctl(). 27059 * 27060 * Return Code: the code returned by sd_send_scsi_cmd() 27061 * EINVAL if invalid arguments are provided 27062 * EFAULT if ddi_copyxxx() fails 27063 * ENXIO if fail ddi_get_soft_state 27064 * EIO if invalid mode sense block descriptor length 27065 */ 27066 27067 static int 27068 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27069 { 27070 struct sd_lun *un = NULL; 27071 struct mode_header *sense_mhp, *select_mhp; 27072 struct mode_speed *sense_page, *select_page; 27073 int current_speed; 27074 int rval = EINVAL; 27075 int bd_len; 27076 uchar_t *sense = NULL; 27077 uchar_t *select = NULL; 27078 sd_ssc_t *ssc; 27079 27080 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27081 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27082 return (ENXIO); 27083 } 27084 27085 /* 27086 * Note: The drive speed is being modified here according to a Toshiba 27087 * vendor specific mode page (0x31). 27088 */ 27089 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27090 27091 ssc = sd_ssc_init(un); 27092 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27093 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 27094 SD_PATH_STANDARD); 27095 sd_ssc_fini(ssc); 27096 if (rval != 0) { 27097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27098 "sr_change_speed: Mode Sense Failed\n"); 27099 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27100 return (rval); 27101 } 27102 sense_mhp = (struct mode_header *)sense; 27103 27104 /* Check the block descriptor len to handle only 1 block descriptor */ 27105 bd_len = sense_mhp->bdesc_length; 27106 if (bd_len > MODE_BLK_DESC_LENGTH) { 27107 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27108 "sr_change_speed: Mode Sense returned invalid block " 27109 "descriptor length\n"); 27110 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27111 return (EIO); 27112 } 27113 27114 sense_page = (struct mode_speed *) 27115 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27116 current_speed = sense_page->speed; 27117 27118 /* Process command */ 27119 switch (cmd) { 27120 case CDROMGDRVSPEED: 27121 /* Return the drive speed obtained during the mode sense */ 27122 if (current_speed == 0x2) { 27123 current_speed = CDROM_TWELVE_SPEED; 27124 } 27125 if (ddi_copyout(¤t_speed, (void *)data, 27126 sizeof (int), flag) != 0) { 27127 rval = EFAULT; 27128 } 27129 break; 27130 case CDROMSDRVSPEED: 27131 /* Validate the requested drive speed */ 27132 switch ((uchar_t)data) { 27133 case CDROM_TWELVE_SPEED: 27134 data = 0x2; 27135 /*FALLTHROUGH*/ 27136 case CDROM_NORMAL_SPEED: 27137 case CDROM_DOUBLE_SPEED: 27138 case CDROM_QUAD_SPEED: 27139 case CDROM_MAXIMUM_SPEED: 27140 break; 27141 default: 27142 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27143 "sr_change_speed: " 27144 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 27145 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27146 return (EINVAL); 27147 } 27148 27149 /* 27150 * The current drive speed matches the requested drive speed so 27151 * there is no need to send the mode select to change the speed 27152 */ 27153 if (current_speed == data) { 27154 break; 27155 } 27156 27157 /* Build the select data for the requested drive speed */ 27158 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27159 select_mhp = (struct mode_header *)select; 27160 select_mhp->bdesc_length = 0; 27161 select_page = 27162 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27163 select_page = 27164 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27165 select_page->mode_page.code = CDROM_MODE_SPEED; 27166 select_page->mode_page.length = 2; 27167 select_page->speed = (uchar_t)data; 27168 27169 /* Send the mode select for the requested block size */ 27170 ssc = sd_ssc_init(un); 27171 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27172 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27173 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27174 sd_ssc_fini(ssc); 27175 if (rval != 0) { 27176 /* 27177 * The mode select failed for the requested drive speed, 27178 * so reset the data for the original drive speed and 27179 * send it to the target. The error is indicated by the 27180 * return value for the failed mode select. 27181 */ 27182 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27183 "sr_drive_speed: Mode Select Failed\n"); 27184 select_page->speed = sense_page->speed; 27185 ssc = sd_ssc_init(un); 27186 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27187 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27188 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27189 sd_ssc_fini(ssc); 27190 } 27191 break; 27192 default: 27193 /* should not reach here, but check anyway */ 27194 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27195 "sr_change_speed: Command '%x' Not Supported\n", cmd); 27196 rval = EINVAL; 27197 break; 27198 } 27199 27200 if (select) { 27201 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 27202 } 27203 if (sense) { 27204 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27205 } 27206 27207 return (rval); 27208 } 27209 27210 27211 /* 27212 * Function: sr_atapi_change_speed() 27213 * 27214 * Description: This routine is the driver entry point for handling CD-ROM 27215 * drive speed ioctl requests for MMC devices that do not support 27216 * the Real Time Streaming feature (0x107). 27217 * 27218 * Note: This routine will use the SET SPEED command which may not 27219 * be supported by all devices. 27220 * 27221 * Arguments: dev- the device 'dev_t' 27222 * cmd- the request type; one of CDROMGDRVSPEED (get) or 27223 * CDROMSDRVSPEED (set) 27224 * data- current drive speed or requested drive speed 27225 * flag- this argument is a pass through to ddi_copyxxx() directly 27226 * from the mode argument of ioctl(). 27227 * 27228 * Return Code: the code returned by sd_send_scsi_cmd() 27229 * EINVAL if invalid arguments are provided 27230 * EFAULT if ddi_copyxxx() fails 27231 * ENXIO if fail ddi_get_soft_state 27232 * EIO if invalid mode sense block descriptor length 27233 */ 27234 27235 static int 27236 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27237 { 27238 struct sd_lun *un; 27239 struct uscsi_cmd *com = NULL; 27240 struct mode_header_grp2 *sense_mhp; 27241 uchar_t *sense_page; 27242 uchar_t *sense = NULL; 27243 char cdb[CDB_GROUP5]; 27244 int bd_len; 27245 int current_speed = 0; 27246 int max_speed = 0; 27247 int rval; 27248 sd_ssc_t *ssc; 27249 27250 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27251 27252 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27253 return (ENXIO); 27254 } 27255 27256 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 27257 27258 ssc = sd_ssc_init(un); 27259 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27260 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 27261 SD_PATH_STANDARD); 27262 sd_ssc_fini(ssc); 27263 if (rval != 0) { 27264 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27265 "sr_atapi_change_speed: Mode Sense Failed\n"); 27266 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27267 return (rval); 27268 } 27269 27270 /* Check the block descriptor len to handle only 1 block descriptor */ 27271 sense_mhp = (struct mode_header_grp2 *)sense; 27272 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 27273 if (bd_len > MODE_BLK_DESC_LENGTH) { 27274 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27275 "sr_atapi_change_speed: Mode Sense returned invalid " 27276 "block descriptor length\n"); 27277 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27278 return (EIO); 27279 } 27280 27281 /* Calculate the current and maximum drive speeds */ 27282 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27283 current_speed = (sense_page[14] << 8) | sense_page[15]; 27284 max_speed = (sense_page[8] << 8) | sense_page[9]; 27285 27286 /* Process the command */ 27287 switch (cmd) { 27288 case CDROMGDRVSPEED: 27289 current_speed /= SD_SPEED_1X; 27290 if (ddi_copyout(¤t_speed, (void *)data, 27291 sizeof (int), flag) != 0) 27292 rval = EFAULT; 27293 break; 27294 case CDROMSDRVSPEED: 27295 /* Convert the speed code to KB/sec */ 27296 switch ((uchar_t)data) { 27297 case CDROM_NORMAL_SPEED: 27298 current_speed = SD_SPEED_1X; 27299 break; 27300 case CDROM_DOUBLE_SPEED: 27301 current_speed = 2 * SD_SPEED_1X; 27302 break; 27303 case CDROM_QUAD_SPEED: 27304 current_speed = 4 * SD_SPEED_1X; 27305 break; 27306 case CDROM_TWELVE_SPEED: 27307 current_speed = 12 * SD_SPEED_1X; 27308 break; 27309 case CDROM_MAXIMUM_SPEED: 27310 current_speed = 0xffff; 27311 break; 27312 default: 27313 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27314 "sr_atapi_change_speed: invalid drive speed %d\n", 27315 (uchar_t)data); 27316 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27317 return (EINVAL); 27318 } 27319 27320 /* Check the request against the drive's max speed. */ 27321 if (current_speed != 0xffff) { 27322 if (current_speed > max_speed) { 27323 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27324 return (EINVAL); 27325 } 27326 } 27327 27328 /* 27329 * Build and send the SET SPEED command 27330 * 27331 * Note: The SET SPEED (0xBB) command used in this routine is 27332 * obsolete per the SCSI MMC spec but still supported in the 27333 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27334 * therefore the command is still implemented in this routine. 27335 */ 27336 bzero(cdb, sizeof (cdb)); 27337 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27338 cdb[2] = (uchar_t)(current_speed >> 8); 27339 cdb[3] = (uchar_t)current_speed; 27340 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27341 com->uscsi_cdb = (caddr_t)cdb; 27342 com->uscsi_cdblen = CDB_GROUP5; 27343 com->uscsi_bufaddr = NULL; 27344 com->uscsi_buflen = 0; 27345 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27346 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 27347 break; 27348 default: 27349 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27350 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27351 rval = EINVAL; 27352 } 27353 27354 if (sense) { 27355 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27356 } 27357 if (com) { 27358 kmem_free(com, sizeof (*com)); 27359 } 27360 return (rval); 27361 } 27362 27363 27364 /* 27365 * Function: sr_pause_resume() 27366 * 27367 * Description: This routine is the driver entry point for handling CD-ROM 27368 * pause/resume ioctl requests. This only affects the audio play 27369 * operation. 27370 * 27371 * Arguments: dev - the device 'dev_t' 27372 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27373 * for setting the resume bit of the cdb. 27374 * 27375 * Return Code: the code returned by sd_send_scsi_cmd() 27376 * EINVAL if invalid mode specified 27377 * 27378 */ 27379 27380 static int 27381 sr_pause_resume(dev_t dev, int cmd) 27382 { 27383 struct sd_lun *un; 27384 struct uscsi_cmd *com; 27385 char cdb[CDB_GROUP1]; 27386 int rval; 27387 27388 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27389 return (ENXIO); 27390 } 27391 27392 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27393 bzero(cdb, CDB_GROUP1); 27394 cdb[0] = SCMD_PAUSE_RESUME; 27395 switch (cmd) { 27396 case CDROMRESUME: 27397 cdb[8] = 1; 27398 break; 27399 case CDROMPAUSE: 27400 cdb[8] = 0; 27401 break; 27402 default: 27403 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27404 " Command '%x' Not Supported\n", cmd); 27405 rval = EINVAL; 27406 goto done; 27407 } 27408 27409 com->uscsi_cdb = cdb; 27410 com->uscsi_cdblen = CDB_GROUP1; 27411 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27412 27413 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27414 SD_PATH_STANDARD); 27415 27416 done: 27417 kmem_free(com, sizeof (*com)); 27418 return (rval); 27419 } 27420 27421 27422 /* 27423 * Function: sr_play_msf() 27424 * 27425 * Description: This routine is the driver entry point for handling CD-ROM 27426 * ioctl requests to output the audio signals at the specified 27427 * starting address and continue the audio play until the specified 27428 * ending address (CDROMPLAYMSF) The address is in Minute Second 27429 * Frame (MSF) format. 27430 * 27431 * Arguments: dev - the device 'dev_t' 27432 * data - pointer to user provided audio msf structure, 27433 * specifying start/end addresses. 27434 * flag - this argument is a pass through to ddi_copyxxx() 27435 * directly from the mode argument of ioctl(). 27436 * 27437 * Return Code: the code returned by sd_send_scsi_cmd() 27438 * EFAULT if ddi_copyxxx() fails 27439 * ENXIO if fail ddi_get_soft_state 27440 * EINVAL if data pointer is NULL 27441 */ 27442 27443 static int 27444 sr_play_msf(dev_t dev, caddr_t data, int flag) 27445 { 27446 struct sd_lun *un; 27447 struct uscsi_cmd *com; 27448 struct cdrom_msf msf_struct; 27449 struct cdrom_msf *msf = &msf_struct; 27450 char cdb[CDB_GROUP1]; 27451 int rval; 27452 27453 if (data == NULL) { 27454 return (EINVAL); 27455 } 27456 27457 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27458 return (ENXIO); 27459 } 27460 27461 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27462 return (EFAULT); 27463 } 27464 27465 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27466 bzero(cdb, CDB_GROUP1); 27467 cdb[0] = SCMD_PLAYAUDIO_MSF; 27468 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27469 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27470 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27471 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27472 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27473 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27474 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27475 } else { 27476 cdb[3] = msf->cdmsf_min0; 27477 cdb[4] = msf->cdmsf_sec0; 27478 cdb[5] = msf->cdmsf_frame0; 27479 cdb[6] = msf->cdmsf_min1; 27480 cdb[7] = msf->cdmsf_sec1; 27481 cdb[8] = msf->cdmsf_frame1; 27482 } 27483 com->uscsi_cdb = cdb; 27484 com->uscsi_cdblen = CDB_GROUP1; 27485 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27486 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27487 SD_PATH_STANDARD); 27488 kmem_free(com, sizeof (*com)); 27489 return (rval); 27490 } 27491 27492 27493 /* 27494 * Function: sr_play_trkind() 27495 * 27496 * Description: This routine is the driver entry point for handling CD-ROM 27497 * ioctl requests to output the audio signals at the specified 27498 * starting address and continue the audio play until the specified 27499 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27500 * format. 27501 * 27502 * Arguments: dev - the device 'dev_t' 27503 * data - pointer to user provided audio track/index structure, 27504 * specifying start/end addresses. 27505 * flag - this argument is a pass through to ddi_copyxxx() 27506 * directly from the mode argument of ioctl(). 27507 * 27508 * Return Code: the code returned by sd_send_scsi_cmd() 27509 * EFAULT if ddi_copyxxx() fails 27510 * ENXIO if fail ddi_get_soft_state 27511 * EINVAL if data pointer is NULL 27512 */ 27513 27514 static int 27515 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27516 { 27517 struct cdrom_ti ti_struct; 27518 struct cdrom_ti *ti = &ti_struct; 27519 struct uscsi_cmd *com = NULL; 27520 char cdb[CDB_GROUP1]; 27521 int rval; 27522 27523 if (data == NULL) { 27524 return (EINVAL); 27525 } 27526 27527 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27528 return (EFAULT); 27529 } 27530 27531 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27532 bzero(cdb, CDB_GROUP1); 27533 cdb[0] = SCMD_PLAYAUDIO_TI; 27534 cdb[4] = ti->cdti_trk0; 27535 cdb[5] = ti->cdti_ind0; 27536 cdb[7] = ti->cdti_trk1; 27537 cdb[8] = ti->cdti_ind1; 27538 com->uscsi_cdb = cdb; 27539 com->uscsi_cdblen = CDB_GROUP1; 27540 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27541 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27542 SD_PATH_STANDARD); 27543 kmem_free(com, sizeof (*com)); 27544 return (rval); 27545 } 27546 27547 27548 /* 27549 * Function: sr_read_all_subcodes() 27550 * 27551 * Description: This routine is the driver entry point for handling CD-ROM 27552 * ioctl requests to return raw subcode data while the target is 27553 * playing audio (CDROMSUBCODE). 27554 * 27555 * Arguments: dev - the device 'dev_t' 27556 * data - pointer to user provided cdrom subcode structure, 27557 * specifying the transfer length and address. 27558 * flag - this argument is a pass through to ddi_copyxxx() 27559 * directly from the mode argument of ioctl(). 27560 * 27561 * Return Code: the code returned by sd_send_scsi_cmd() 27562 * EFAULT if ddi_copyxxx() fails 27563 * ENXIO if fail ddi_get_soft_state 27564 * EINVAL if data pointer is NULL 27565 */ 27566 27567 static int 27568 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27569 { 27570 struct sd_lun *un = NULL; 27571 struct uscsi_cmd *com = NULL; 27572 struct cdrom_subcode *subcode = NULL; 27573 int rval; 27574 size_t buflen; 27575 char cdb[CDB_GROUP5]; 27576 27577 #ifdef _MULTI_DATAMODEL 27578 /* To support ILP32 applications in an LP64 world */ 27579 struct cdrom_subcode32 cdrom_subcode32; 27580 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27581 #endif 27582 if (data == NULL) { 27583 return (EINVAL); 27584 } 27585 27586 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27587 return (ENXIO); 27588 } 27589 27590 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27591 27592 #ifdef _MULTI_DATAMODEL 27593 switch (ddi_model_convert_from(flag & FMODELS)) { 27594 case DDI_MODEL_ILP32: 27595 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27596 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27597 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27598 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27599 return (EFAULT); 27600 } 27601 /* Convert the ILP32 uscsi data from the application to LP64 */ 27602 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27603 break; 27604 case DDI_MODEL_NONE: 27605 if (ddi_copyin(data, subcode, 27606 sizeof (struct cdrom_subcode), flag)) { 27607 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27608 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27609 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27610 return (EFAULT); 27611 } 27612 break; 27613 } 27614 #else /* ! _MULTI_DATAMODEL */ 27615 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27617 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27618 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27619 return (EFAULT); 27620 } 27621 #endif /* _MULTI_DATAMODEL */ 27622 27623 /* 27624 * Since MMC-2 expects max 3 bytes for length, check if the 27625 * length input is greater than 3 bytes 27626 */ 27627 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27628 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27629 "sr_read_all_subcodes: " 27630 "cdrom transfer length too large: %d (limit %d)\n", 27631 subcode->cdsc_length, 0xFFFFFF); 27632 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27633 return (EINVAL); 27634 } 27635 27636 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27637 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27638 bzero(cdb, CDB_GROUP5); 27639 27640 if (un->un_f_mmc_cap == TRUE) { 27641 cdb[0] = (char)SCMD_READ_CD; 27642 cdb[2] = (char)0xff; 27643 cdb[3] = (char)0xff; 27644 cdb[4] = (char)0xff; 27645 cdb[5] = (char)0xff; 27646 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27647 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27648 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27649 cdb[10] = 1; 27650 } else { 27651 /* 27652 * Note: A vendor specific command (0xDF) is being used here to 27653 * request a read of all subcodes. 27654 */ 27655 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27656 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27657 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27658 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27659 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27660 } 27661 com->uscsi_cdb = cdb; 27662 com->uscsi_cdblen = CDB_GROUP5; 27663 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27664 com->uscsi_buflen = buflen; 27665 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27666 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27667 SD_PATH_STANDARD); 27668 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27669 kmem_free(com, sizeof (*com)); 27670 return (rval); 27671 } 27672 27673 27674 /* 27675 * Function: sr_read_subchannel() 27676 * 27677 * Description: This routine is the driver entry point for handling CD-ROM 27678 * ioctl requests to return the Q sub-channel data of the CD 27679 * current position block. (CDROMSUBCHNL) The data includes the 27680 * track number, index number, absolute CD-ROM address (LBA or MSF 27681 * format per the user) , track relative CD-ROM address (LBA or MSF 27682 * format per the user), control data and audio status. 27683 * 27684 * Arguments: dev - the device 'dev_t' 27685 * data - pointer to user provided cdrom sub-channel structure 27686 * flag - this argument is a pass through to ddi_copyxxx() 27687 * directly from the mode argument of ioctl(). 27688 * 27689 * Return Code: the code returned by sd_send_scsi_cmd() 27690 * EFAULT if ddi_copyxxx() fails 27691 * ENXIO if fail ddi_get_soft_state 27692 * EINVAL if data pointer is NULL 27693 */ 27694 27695 static int 27696 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27697 { 27698 struct sd_lun *un; 27699 struct uscsi_cmd *com; 27700 struct cdrom_subchnl subchanel; 27701 struct cdrom_subchnl *subchnl = &subchanel; 27702 char cdb[CDB_GROUP1]; 27703 caddr_t buffer; 27704 int rval; 27705 27706 if (data == NULL) { 27707 return (EINVAL); 27708 } 27709 27710 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27711 (un->un_state == SD_STATE_OFFLINE)) { 27712 return (ENXIO); 27713 } 27714 27715 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27716 return (EFAULT); 27717 } 27718 27719 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27720 bzero(cdb, CDB_GROUP1); 27721 cdb[0] = SCMD_READ_SUBCHANNEL; 27722 /* Set the MSF bit based on the user requested address format */ 27723 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27724 /* 27725 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27726 * returned 27727 */ 27728 cdb[2] = 0x40; 27729 /* 27730 * Set byte 3 to specify the return data format. A value of 0x01 27731 * indicates that the CD-ROM current position should be returned. 27732 */ 27733 cdb[3] = 0x01; 27734 cdb[8] = 0x10; 27735 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27736 com->uscsi_cdb = cdb; 27737 com->uscsi_cdblen = CDB_GROUP1; 27738 com->uscsi_bufaddr = buffer; 27739 com->uscsi_buflen = 16; 27740 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27741 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27742 SD_PATH_STANDARD); 27743 if (rval != 0) { 27744 kmem_free(buffer, 16); 27745 kmem_free(com, sizeof (*com)); 27746 return (rval); 27747 } 27748 27749 /* Process the returned Q sub-channel data */ 27750 subchnl->cdsc_audiostatus = buffer[1]; 27751 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4; 27752 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27753 subchnl->cdsc_trk = buffer[6]; 27754 subchnl->cdsc_ind = buffer[7]; 27755 if (subchnl->cdsc_format & CDROM_LBA) { 27756 subchnl->cdsc_absaddr.lba = 27757 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27758 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27759 subchnl->cdsc_reladdr.lba = 27760 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27761 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27762 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27763 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27764 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27765 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27766 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27767 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27768 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27769 } else { 27770 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27771 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27772 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27773 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27774 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27775 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27776 } 27777 kmem_free(buffer, 16); 27778 kmem_free(com, sizeof (*com)); 27779 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27780 != 0) { 27781 return (EFAULT); 27782 } 27783 return (rval); 27784 } 27785 27786 27787 /* 27788 * Function: sr_read_tocentry() 27789 * 27790 * Description: This routine is the driver entry point for handling CD-ROM 27791 * ioctl requests to read from the Table of Contents (TOC) 27792 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27793 * fields, the starting address (LBA or MSF format per the user) 27794 * and the data mode if the user specified track is a data track. 27795 * 27796 * Note: The READ HEADER (0x44) command used in this routine is 27797 * obsolete per the SCSI MMC spec but still supported in the 27798 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27799 * therefore the command is still implemented in this routine. 27800 * 27801 * Arguments: dev - the device 'dev_t' 27802 * data - pointer to user provided toc entry structure, 27803 * specifying the track # and the address format 27804 * (LBA or MSF). 27805 * flag - this argument is a pass through to ddi_copyxxx() 27806 * directly from the mode argument of ioctl(). 27807 * 27808 * Return Code: the code returned by sd_send_scsi_cmd() 27809 * EFAULT if ddi_copyxxx() fails 27810 * ENXIO if fail ddi_get_soft_state 27811 * EINVAL if data pointer is NULL 27812 */ 27813 27814 static int 27815 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27816 { 27817 struct sd_lun *un = NULL; 27818 struct uscsi_cmd *com; 27819 struct cdrom_tocentry toc_entry; 27820 struct cdrom_tocentry *entry = &toc_entry; 27821 caddr_t buffer; 27822 int rval; 27823 char cdb[CDB_GROUP1]; 27824 27825 if (data == NULL) { 27826 return (EINVAL); 27827 } 27828 27829 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27830 (un->un_state == SD_STATE_OFFLINE)) { 27831 return (ENXIO); 27832 } 27833 27834 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27835 return (EFAULT); 27836 } 27837 27838 /* Validate the requested track and address format */ 27839 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27840 return (EINVAL); 27841 } 27842 27843 if (entry->cdte_track == 0) { 27844 return (EINVAL); 27845 } 27846 27847 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27848 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27849 bzero(cdb, CDB_GROUP1); 27850 27851 cdb[0] = SCMD_READ_TOC; 27852 /* Set the MSF bit based on the user requested address format */ 27853 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27854 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27855 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27856 } else { 27857 cdb[6] = entry->cdte_track; 27858 } 27859 27860 /* 27861 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27862 * (4 byte TOC response header + 8 byte track descriptor) 27863 */ 27864 cdb[8] = 12; 27865 com->uscsi_cdb = cdb; 27866 com->uscsi_cdblen = CDB_GROUP1; 27867 com->uscsi_bufaddr = buffer; 27868 com->uscsi_buflen = 0x0C; 27869 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27870 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27871 SD_PATH_STANDARD); 27872 if (rval != 0) { 27873 kmem_free(buffer, 12); 27874 kmem_free(com, sizeof (*com)); 27875 return (rval); 27876 } 27877 27878 /* Process the toc entry */ 27879 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27880 entry->cdte_ctrl = (buffer[5] & 0x0F); 27881 if (entry->cdte_format & CDROM_LBA) { 27882 entry->cdte_addr.lba = 27883 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27884 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27885 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27886 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27887 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27888 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27889 /* 27890 * Send a READ TOC command using the LBA address format to get 27891 * the LBA for the track requested so it can be used in the 27892 * READ HEADER request 27893 * 27894 * Note: The MSF bit of the READ HEADER command specifies the 27895 * output format. The block address specified in that command 27896 * must be in LBA format. 27897 */ 27898 cdb[1] = 0; 27899 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27900 SD_PATH_STANDARD); 27901 if (rval != 0) { 27902 kmem_free(buffer, 12); 27903 kmem_free(com, sizeof (*com)); 27904 return (rval); 27905 } 27906 } else { 27907 entry->cdte_addr.msf.minute = buffer[9]; 27908 entry->cdte_addr.msf.second = buffer[10]; 27909 entry->cdte_addr.msf.frame = buffer[11]; 27910 /* 27911 * Send a READ TOC command using the LBA address format to get 27912 * the LBA for the track requested so it can be used in the 27913 * READ HEADER request 27914 * 27915 * Note: The MSF bit of the READ HEADER command specifies the 27916 * output format. The block address specified in that command 27917 * must be in LBA format. 27918 */ 27919 cdb[1] = 0; 27920 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27921 SD_PATH_STANDARD); 27922 if (rval != 0) { 27923 kmem_free(buffer, 12); 27924 kmem_free(com, sizeof (*com)); 27925 return (rval); 27926 } 27927 } 27928 27929 /* 27930 * Build and send the READ HEADER command to determine the data mode of 27931 * the user specified track. 27932 */ 27933 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27934 (entry->cdte_track != CDROM_LEADOUT)) { 27935 bzero(cdb, CDB_GROUP1); 27936 cdb[0] = SCMD_READ_HEADER; 27937 cdb[2] = buffer[8]; 27938 cdb[3] = buffer[9]; 27939 cdb[4] = buffer[10]; 27940 cdb[5] = buffer[11]; 27941 cdb[8] = 0x08; 27942 com->uscsi_buflen = 0x08; 27943 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27944 SD_PATH_STANDARD); 27945 if (rval == 0) { 27946 entry->cdte_datamode = buffer[0]; 27947 } else { 27948 /* 27949 * READ HEADER command failed, since this is 27950 * obsoleted in one spec, its better to return 27951 * -1 for an invlid track so that we can still 27952 * receive the rest of the TOC data. 27953 */ 27954 entry->cdte_datamode = (uchar_t)-1; 27955 } 27956 } else { 27957 entry->cdte_datamode = (uchar_t)-1; 27958 } 27959 27960 kmem_free(buffer, 12); 27961 kmem_free(com, sizeof (*com)); 27962 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27963 return (EFAULT); 27964 27965 return (rval); 27966 } 27967 27968 27969 /* 27970 * Function: sr_read_tochdr() 27971 * 27972 * Description: This routine is the driver entry point for handling CD-ROM 27973 * ioctl requests to read the Table of Contents (TOC) header 27974 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27975 * and ending track numbers 27976 * 27977 * Arguments: dev - the device 'dev_t' 27978 * data - pointer to user provided toc header structure, 27979 * specifying the starting and ending track numbers. 27980 * flag - this argument is a pass through to ddi_copyxxx() 27981 * directly from the mode argument of ioctl(). 27982 * 27983 * Return Code: the code returned by sd_send_scsi_cmd() 27984 * EFAULT if ddi_copyxxx() fails 27985 * ENXIO if fail ddi_get_soft_state 27986 * EINVAL if data pointer is NULL 27987 */ 27988 27989 static int 27990 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27991 { 27992 struct sd_lun *un; 27993 struct uscsi_cmd *com; 27994 struct cdrom_tochdr toc_header; 27995 struct cdrom_tochdr *hdr = &toc_header; 27996 char cdb[CDB_GROUP1]; 27997 int rval; 27998 caddr_t buffer; 27999 28000 if (data == NULL) { 28001 return (EINVAL); 28002 } 28003 28004 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28005 (un->un_state == SD_STATE_OFFLINE)) { 28006 return (ENXIO); 28007 } 28008 28009 buffer = kmem_zalloc(4, KM_SLEEP); 28010 bzero(cdb, CDB_GROUP1); 28011 cdb[0] = SCMD_READ_TOC; 28012 /* 28013 * Specifying a track number of 0x00 in the READ TOC command indicates 28014 * that the TOC header should be returned 28015 */ 28016 cdb[6] = 0x00; 28017 /* 28018 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 28019 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 28020 */ 28021 cdb[8] = 0x04; 28022 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28023 com->uscsi_cdb = cdb; 28024 com->uscsi_cdblen = CDB_GROUP1; 28025 com->uscsi_bufaddr = buffer; 28026 com->uscsi_buflen = 0x04; 28027 com->uscsi_timeout = 300; 28028 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28029 28030 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28031 SD_PATH_STANDARD); 28032 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 28033 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 28034 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 28035 } else { 28036 hdr->cdth_trk0 = buffer[2]; 28037 hdr->cdth_trk1 = buffer[3]; 28038 } 28039 kmem_free(buffer, 4); 28040 kmem_free(com, sizeof (*com)); 28041 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 28042 return (EFAULT); 28043 } 28044 return (rval); 28045 } 28046 28047 28048 /* 28049 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 28050 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 28051 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 28052 * digital audio and extended architecture digital audio. These modes are 28053 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 28054 * MMC specs. 28055 * 28056 * In addition to support for the various data formats these routines also 28057 * include support for devices that implement only the direct access READ 28058 * commands (0x08, 0x28), devices that implement the READ_CD commands 28059 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 28060 * READ CDXA commands (0xD8, 0xDB) 28061 */ 28062 28063 /* 28064 * Function: sr_read_mode1() 28065 * 28066 * Description: This routine is the driver entry point for handling CD-ROM 28067 * ioctl read mode1 requests (CDROMREADMODE1). 28068 * 28069 * Arguments: dev - the device 'dev_t' 28070 * data - pointer to user provided cd read structure specifying 28071 * the lba buffer address and length. 28072 * flag - this argument is a pass through to ddi_copyxxx() 28073 * directly from the mode argument of ioctl(). 28074 * 28075 * Return Code: the code returned by sd_send_scsi_cmd() 28076 * EFAULT if ddi_copyxxx() fails 28077 * ENXIO if fail ddi_get_soft_state 28078 * EINVAL if data pointer is NULL 28079 */ 28080 28081 static int 28082 sr_read_mode1(dev_t dev, caddr_t data, int flag) 28083 { 28084 struct sd_lun *un; 28085 struct cdrom_read mode1_struct; 28086 struct cdrom_read *mode1 = &mode1_struct; 28087 int rval; 28088 sd_ssc_t *ssc; 28089 28090 #ifdef _MULTI_DATAMODEL 28091 /* To support ILP32 applications in an LP64 world */ 28092 struct cdrom_read32 cdrom_read32; 28093 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28094 #endif /* _MULTI_DATAMODEL */ 28095 28096 if (data == NULL) { 28097 return (EINVAL); 28098 } 28099 28100 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28101 (un->un_state == SD_STATE_OFFLINE)) { 28102 return (ENXIO); 28103 } 28104 28105 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28106 "sd_read_mode1: entry: un:0x%p\n", un); 28107 28108 #ifdef _MULTI_DATAMODEL 28109 switch (ddi_model_convert_from(flag & FMODELS)) { 28110 case DDI_MODEL_ILP32: 28111 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28112 return (EFAULT); 28113 } 28114 /* Convert the ILP32 uscsi data from the application to LP64 */ 28115 cdrom_read32tocdrom_read(cdrd32, mode1); 28116 break; 28117 case DDI_MODEL_NONE: 28118 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28119 return (EFAULT); 28120 } 28121 } 28122 #else /* ! _MULTI_DATAMODEL */ 28123 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28124 return (EFAULT); 28125 } 28126 #endif /* _MULTI_DATAMODEL */ 28127 28128 ssc = sd_ssc_init(un); 28129 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 28130 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 28131 sd_ssc_fini(ssc); 28132 28133 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28134 "sd_read_mode1: exit: un:0x%p\n", un); 28135 28136 return (rval); 28137 } 28138 28139 28140 /* 28141 * Function: sr_read_cd_mode2() 28142 * 28143 * Description: This routine is the driver entry point for handling CD-ROM 28144 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28145 * support the READ CD (0xBE) command or the 1st generation 28146 * READ CD (0xD4) command. 28147 * 28148 * Arguments: dev - the device 'dev_t' 28149 * data - pointer to user provided cd read structure specifying 28150 * the lba buffer address and length. 28151 * flag - this argument is a pass through to ddi_copyxxx() 28152 * directly from the mode argument of ioctl(). 28153 * 28154 * Return Code: the code returned by sd_send_scsi_cmd() 28155 * EFAULT if ddi_copyxxx() fails 28156 * ENXIO if fail ddi_get_soft_state 28157 * EINVAL if data pointer is NULL 28158 */ 28159 28160 static int 28161 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 28162 { 28163 struct sd_lun *un; 28164 struct uscsi_cmd *com; 28165 struct cdrom_read mode2_struct; 28166 struct cdrom_read *mode2 = &mode2_struct; 28167 uchar_t cdb[CDB_GROUP5]; 28168 int nblocks; 28169 int rval; 28170 #ifdef _MULTI_DATAMODEL 28171 /* To support ILP32 applications in an LP64 world */ 28172 struct cdrom_read32 cdrom_read32; 28173 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28174 #endif /* _MULTI_DATAMODEL */ 28175 28176 if (data == NULL) { 28177 return (EINVAL); 28178 } 28179 28180 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28181 (un->un_state == SD_STATE_OFFLINE)) { 28182 return (ENXIO); 28183 } 28184 28185 #ifdef _MULTI_DATAMODEL 28186 switch (ddi_model_convert_from(flag & FMODELS)) { 28187 case DDI_MODEL_ILP32: 28188 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28189 return (EFAULT); 28190 } 28191 /* Convert the ILP32 uscsi data from the application to LP64 */ 28192 cdrom_read32tocdrom_read(cdrd32, mode2); 28193 break; 28194 case DDI_MODEL_NONE: 28195 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28196 return (EFAULT); 28197 } 28198 break; 28199 } 28200 28201 #else /* ! _MULTI_DATAMODEL */ 28202 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28203 return (EFAULT); 28204 } 28205 #endif /* _MULTI_DATAMODEL */ 28206 28207 bzero(cdb, sizeof (cdb)); 28208 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 28209 /* Read command supported by 1st generation atapi drives */ 28210 cdb[0] = SCMD_READ_CDD4; 28211 } else { 28212 /* Universal CD Access Command */ 28213 cdb[0] = SCMD_READ_CD; 28214 } 28215 28216 /* 28217 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 28218 */ 28219 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 28220 28221 /* set the start address */ 28222 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 28223 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 28224 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28225 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 28226 28227 /* set the transfer length */ 28228 nblocks = mode2->cdread_buflen / 2336; 28229 cdb[6] = (uchar_t)(nblocks >> 16); 28230 cdb[7] = (uchar_t)(nblocks >> 8); 28231 cdb[8] = (uchar_t)nblocks; 28232 28233 /* set the filter bits */ 28234 cdb[9] = CDROM_READ_CD_USERDATA; 28235 28236 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28237 com->uscsi_cdb = (caddr_t)cdb; 28238 com->uscsi_cdblen = sizeof (cdb); 28239 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28240 com->uscsi_buflen = mode2->cdread_buflen; 28241 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28242 28243 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28244 SD_PATH_STANDARD); 28245 kmem_free(com, sizeof (*com)); 28246 return (rval); 28247 } 28248 28249 28250 /* 28251 * Function: sr_read_mode2() 28252 * 28253 * Description: This routine is the driver entry point for handling CD-ROM 28254 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28255 * do not support the READ CD (0xBE) command. 28256 * 28257 * Arguments: dev - the device 'dev_t' 28258 * data - pointer to user provided cd read structure specifying 28259 * the lba buffer address and length. 28260 * flag - this argument is a pass through to ddi_copyxxx() 28261 * directly from the mode argument of ioctl(). 28262 * 28263 * Return Code: the code returned by sd_send_scsi_cmd() 28264 * EFAULT if ddi_copyxxx() fails 28265 * ENXIO if fail ddi_get_soft_state 28266 * EINVAL if data pointer is NULL 28267 * EIO if fail to reset block size 28268 * EAGAIN if commands are in progress in the driver 28269 */ 28270 28271 static int 28272 sr_read_mode2(dev_t dev, caddr_t data, int flag) 28273 { 28274 struct sd_lun *un; 28275 struct cdrom_read mode2_struct; 28276 struct cdrom_read *mode2 = &mode2_struct; 28277 int rval; 28278 uint32_t restore_blksize; 28279 struct uscsi_cmd *com; 28280 uchar_t cdb[CDB_GROUP0]; 28281 int nblocks; 28282 28283 #ifdef _MULTI_DATAMODEL 28284 /* To support ILP32 applications in an LP64 world */ 28285 struct cdrom_read32 cdrom_read32; 28286 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28287 #endif /* _MULTI_DATAMODEL */ 28288 28289 if (data == NULL) { 28290 return (EINVAL); 28291 } 28292 28293 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28294 (un->un_state == SD_STATE_OFFLINE)) { 28295 return (ENXIO); 28296 } 28297 28298 /* 28299 * Because this routine will update the device and driver block size 28300 * being used we want to make sure there are no commands in progress. 28301 * If commands are in progress the user will have to try again. 28302 * 28303 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 28304 * in sdioctl to protect commands from sdioctl through to the top of 28305 * sd_uscsi_strategy. See sdioctl for details. 28306 */ 28307 mutex_enter(SD_MUTEX(un)); 28308 if (un->un_ncmds_in_driver != 1) { 28309 mutex_exit(SD_MUTEX(un)); 28310 return (EAGAIN); 28311 } 28312 mutex_exit(SD_MUTEX(un)); 28313 28314 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28315 "sd_read_mode2: entry: un:0x%p\n", un); 28316 28317 #ifdef _MULTI_DATAMODEL 28318 switch (ddi_model_convert_from(flag & FMODELS)) { 28319 case DDI_MODEL_ILP32: 28320 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28321 return (EFAULT); 28322 } 28323 /* Convert the ILP32 uscsi data from the application to LP64 */ 28324 cdrom_read32tocdrom_read(cdrd32, mode2); 28325 break; 28326 case DDI_MODEL_NONE: 28327 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28328 return (EFAULT); 28329 } 28330 break; 28331 } 28332 #else /* ! _MULTI_DATAMODEL */ 28333 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 28334 return (EFAULT); 28335 } 28336 #endif /* _MULTI_DATAMODEL */ 28337 28338 /* Store the current target block size for restoration later */ 28339 restore_blksize = un->un_tgt_blocksize; 28340 28341 /* Change the device and soft state target block size to 2336 */ 28342 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28343 rval = EIO; 28344 goto done; 28345 } 28346 28347 28348 bzero(cdb, sizeof (cdb)); 28349 28350 /* set READ operation */ 28351 cdb[0] = SCMD_READ; 28352 28353 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28354 mode2->cdread_lba >>= 2; 28355 28356 /* set the start address */ 28357 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28358 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28359 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28360 28361 /* set the transfer length */ 28362 nblocks = mode2->cdread_buflen / 2336; 28363 cdb[4] = (uchar_t)nblocks & 0xFF; 28364 28365 /* build command */ 28366 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28367 com->uscsi_cdb = (caddr_t)cdb; 28368 com->uscsi_cdblen = sizeof (cdb); 28369 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28370 com->uscsi_buflen = mode2->cdread_buflen; 28371 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28372 28373 /* 28374 * Issue SCSI command with user space address for read buffer. 28375 * 28376 * This sends the command through main channel in the driver. 28377 * 28378 * Since this is accessed via an IOCTL call, we go through the 28379 * standard path, so that if the device was powered down, then 28380 * it would be 'awakened' to handle the command. 28381 */ 28382 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28383 SD_PATH_STANDARD); 28384 28385 kmem_free(com, sizeof (*com)); 28386 28387 /* Restore the device and soft state target block size */ 28388 if (sr_sector_mode(dev, restore_blksize) != 0) { 28389 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28390 "can't do switch back to mode 1\n"); 28391 /* 28392 * If sd_send_scsi_READ succeeded we still need to report 28393 * an error because we failed to reset the block size 28394 */ 28395 if (rval == 0) { 28396 rval = EIO; 28397 } 28398 } 28399 28400 done: 28401 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28402 "sd_read_mode2: exit: un:0x%p\n", un); 28403 28404 return (rval); 28405 } 28406 28407 28408 /* 28409 * Function: sr_sector_mode() 28410 * 28411 * Description: This utility function is used by sr_read_mode2 to set the target 28412 * block size based on the user specified size. This is a legacy 28413 * implementation based upon a vendor specific mode page 28414 * 28415 * Arguments: dev - the device 'dev_t' 28416 * data - flag indicating if block size is being set to 2336 or 28417 * 512. 28418 * 28419 * Return Code: the code returned by sd_send_scsi_cmd() 28420 * EFAULT if ddi_copyxxx() fails 28421 * ENXIO if fail ddi_get_soft_state 28422 * EINVAL if data pointer is NULL 28423 */ 28424 28425 static int 28426 sr_sector_mode(dev_t dev, uint32_t blksize) 28427 { 28428 struct sd_lun *un; 28429 uchar_t *sense; 28430 uchar_t *select; 28431 int rval; 28432 sd_ssc_t *ssc; 28433 28434 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28435 (un->un_state == SD_STATE_OFFLINE)) { 28436 return (ENXIO); 28437 } 28438 28439 sense = kmem_zalloc(20, KM_SLEEP); 28440 28441 /* Note: This is a vendor specific mode page (0x81) */ 28442 ssc = sd_ssc_init(un); 28443 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 28444 SD_PATH_STANDARD); 28445 sd_ssc_fini(ssc); 28446 if (rval != 0) { 28447 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28448 "sr_sector_mode: Mode Sense failed\n"); 28449 kmem_free(sense, 20); 28450 return (rval); 28451 } 28452 select = kmem_zalloc(20, KM_SLEEP); 28453 select[3] = 0x08; 28454 select[10] = ((blksize >> 8) & 0xff); 28455 select[11] = (blksize & 0xff); 28456 select[12] = 0x01; 28457 select[13] = 0x06; 28458 select[14] = sense[14]; 28459 select[15] = sense[15]; 28460 if (blksize == SD_MODE2_BLKSIZE) { 28461 select[14] |= 0x01; 28462 } 28463 28464 ssc = sd_ssc_init(un); 28465 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 28466 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28467 sd_ssc_fini(ssc); 28468 if (rval != 0) { 28469 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28470 "sr_sector_mode: Mode Select failed\n"); 28471 } else { 28472 /* 28473 * Only update the softstate block size if we successfully 28474 * changed the device block mode. 28475 */ 28476 mutex_enter(SD_MUTEX(un)); 28477 sd_update_block_info(un, blksize, 0); 28478 mutex_exit(SD_MUTEX(un)); 28479 } 28480 kmem_free(sense, 20); 28481 kmem_free(select, 20); 28482 return (rval); 28483 } 28484 28485 28486 /* 28487 * Function: sr_read_cdda() 28488 * 28489 * Description: This routine is the driver entry point for handling CD-ROM 28490 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28491 * the target supports CDDA these requests are handled via a vendor 28492 * specific command (0xD8) If the target does not support CDDA 28493 * these requests are handled via the READ CD command (0xBE). 28494 * 28495 * Arguments: dev - the device 'dev_t' 28496 * data - pointer to user provided CD-DA structure specifying 28497 * the track starting address, transfer length, and 28498 * subcode options. 28499 * flag - this argument is a pass through to ddi_copyxxx() 28500 * directly from the mode argument of ioctl(). 28501 * 28502 * Return Code: the code returned by sd_send_scsi_cmd() 28503 * EFAULT if ddi_copyxxx() fails 28504 * ENXIO if fail ddi_get_soft_state 28505 * EINVAL if invalid arguments are provided 28506 * ENOTTY 28507 */ 28508 28509 static int 28510 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28511 { 28512 struct sd_lun *un; 28513 struct uscsi_cmd *com; 28514 struct cdrom_cdda *cdda; 28515 int rval; 28516 size_t buflen; 28517 char cdb[CDB_GROUP5]; 28518 28519 #ifdef _MULTI_DATAMODEL 28520 /* To support ILP32 applications in an LP64 world */ 28521 struct cdrom_cdda32 cdrom_cdda32; 28522 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28523 #endif /* _MULTI_DATAMODEL */ 28524 28525 if (data == NULL) { 28526 return (EINVAL); 28527 } 28528 28529 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28530 return (ENXIO); 28531 } 28532 28533 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28534 28535 #ifdef _MULTI_DATAMODEL 28536 switch (ddi_model_convert_from(flag & FMODELS)) { 28537 case DDI_MODEL_ILP32: 28538 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28539 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28540 "sr_read_cdda: ddi_copyin Failed\n"); 28541 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28542 return (EFAULT); 28543 } 28544 /* Convert the ILP32 uscsi data from the application to LP64 */ 28545 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28546 break; 28547 case DDI_MODEL_NONE: 28548 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28549 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28550 "sr_read_cdda: ddi_copyin Failed\n"); 28551 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28552 return (EFAULT); 28553 } 28554 break; 28555 } 28556 #else /* ! _MULTI_DATAMODEL */ 28557 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28558 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28559 "sr_read_cdda: ddi_copyin Failed\n"); 28560 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28561 return (EFAULT); 28562 } 28563 #endif /* _MULTI_DATAMODEL */ 28564 28565 /* 28566 * Since MMC-2 expects max 3 bytes for length, check if the 28567 * length input is greater than 3 bytes 28568 */ 28569 if ((cdda->cdda_length & 0xFF000000) != 0) { 28570 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28571 "cdrom transfer length too large: %d (limit %d)\n", 28572 cdda->cdda_length, 0xFFFFFF); 28573 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28574 return (EINVAL); 28575 } 28576 28577 switch (cdda->cdda_subcode) { 28578 case CDROM_DA_NO_SUBCODE: 28579 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28580 break; 28581 case CDROM_DA_SUBQ: 28582 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28583 break; 28584 case CDROM_DA_ALL_SUBCODE: 28585 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28586 break; 28587 case CDROM_DA_SUBCODE_ONLY: 28588 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28589 break; 28590 default: 28591 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28592 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28593 cdda->cdda_subcode); 28594 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28595 return (EINVAL); 28596 } 28597 28598 /* Build and send the command */ 28599 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28600 bzero(cdb, CDB_GROUP5); 28601 28602 if (un->un_f_cfg_cdda == TRUE) { 28603 cdb[0] = (char)SCMD_READ_CD; 28604 cdb[1] = 0x04; 28605 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28606 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28607 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28608 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28609 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28610 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28611 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28612 cdb[9] = 0x10; 28613 switch (cdda->cdda_subcode) { 28614 case CDROM_DA_NO_SUBCODE : 28615 cdb[10] = 0x0; 28616 break; 28617 case CDROM_DA_SUBQ : 28618 cdb[10] = 0x2; 28619 break; 28620 case CDROM_DA_ALL_SUBCODE : 28621 cdb[10] = 0x1; 28622 break; 28623 case CDROM_DA_SUBCODE_ONLY : 28624 /* FALLTHROUGH */ 28625 default : 28626 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28627 kmem_free(com, sizeof (*com)); 28628 return (ENOTTY); 28629 } 28630 } else { 28631 cdb[0] = (char)SCMD_READ_CDDA; 28632 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28633 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28634 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28635 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28636 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28637 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28638 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28639 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28640 cdb[10] = cdda->cdda_subcode; 28641 } 28642 28643 com->uscsi_cdb = cdb; 28644 com->uscsi_cdblen = CDB_GROUP5; 28645 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28646 com->uscsi_buflen = buflen; 28647 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28648 28649 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28650 SD_PATH_STANDARD); 28651 28652 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28653 kmem_free(com, sizeof (*com)); 28654 return (rval); 28655 } 28656 28657 28658 /* 28659 * Function: sr_read_cdxa() 28660 * 28661 * Description: This routine is the driver entry point for handling CD-ROM 28662 * ioctl requests to return CD-XA (Extended Architecture) data. 28663 * (CDROMCDXA). 28664 * 28665 * Arguments: dev - the device 'dev_t' 28666 * data - pointer to user provided CD-XA structure specifying 28667 * the data starting address, transfer length, and format 28668 * flag - this argument is a pass through to ddi_copyxxx() 28669 * directly from the mode argument of ioctl(). 28670 * 28671 * Return Code: the code returned by sd_send_scsi_cmd() 28672 * EFAULT if ddi_copyxxx() fails 28673 * ENXIO if fail ddi_get_soft_state 28674 * EINVAL if data pointer is NULL 28675 */ 28676 28677 static int 28678 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28679 { 28680 struct sd_lun *un; 28681 struct uscsi_cmd *com; 28682 struct cdrom_cdxa *cdxa; 28683 int rval; 28684 size_t buflen; 28685 char cdb[CDB_GROUP5]; 28686 uchar_t read_flags; 28687 28688 #ifdef _MULTI_DATAMODEL 28689 /* To support ILP32 applications in an LP64 world */ 28690 struct cdrom_cdxa32 cdrom_cdxa32; 28691 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28692 #endif /* _MULTI_DATAMODEL */ 28693 28694 if (data == NULL) { 28695 return (EINVAL); 28696 } 28697 28698 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28699 return (ENXIO); 28700 } 28701 28702 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28703 28704 #ifdef _MULTI_DATAMODEL 28705 switch (ddi_model_convert_from(flag & FMODELS)) { 28706 case DDI_MODEL_ILP32: 28707 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28708 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28709 return (EFAULT); 28710 } 28711 /* 28712 * Convert the ILP32 uscsi data from the 28713 * application to LP64 for internal use. 28714 */ 28715 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28716 break; 28717 case DDI_MODEL_NONE: 28718 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28719 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28720 return (EFAULT); 28721 } 28722 break; 28723 } 28724 #else /* ! _MULTI_DATAMODEL */ 28725 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28726 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28727 return (EFAULT); 28728 } 28729 #endif /* _MULTI_DATAMODEL */ 28730 28731 /* 28732 * Since MMC-2 expects max 3 bytes for length, check if the 28733 * length input is greater than 3 bytes 28734 */ 28735 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28736 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28737 "cdrom transfer length too large: %d (limit %d)\n", 28738 cdxa->cdxa_length, 0xFFFFFF); 28739 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28740 return (EINVAL); 28741 } 28742 28743 switch (cdxa->cdxa_format) { 28744 case CDROM_XA_DATA: 28745 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28746 read_flags = 0x10; 28747 break; 28748 case CDROM_XA_SECTOR_DATA: 28749 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28750 read_flags = 0xf8; 28751 break; 28752 case CDROM_XA_DATA_W_ERROR: 28753 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28754 read_flags = 0xfc; 28755 break; 28756 default: 28757 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28758 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28759 cdxa->cdxa_format); 28760 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28761 return (EINVAL); 28762 } 28763 28764 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28765 bzero(cdb, CDB_GROUP5); 28766 if (un->un_f_mmc_cap == TRUE) { 28767 cdb[0] = (char)SCMD_READ_CD; 28768 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28769 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28770 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28771 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28772 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28773 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28774 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28775 cdb[9] = (char)read_flags; 28776 } else { 28777 /* 28778 * Note: A vendor specific command (0xDB) is being used her to 28779 * request a read of all subcodes. 28780 */ 28781 cdb[0] = (char)SCMD_READ_CDXA; 28782 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28783 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28784 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28785 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28786 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28787 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28788 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28789 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28790 cdb[10] = cdxa->cdxa_format; 28791 } 28792 com->uscsi_cdb = cdb; 28793 com->uscsi_cdblen = CDB_GROUP5; 28794 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28795 com->uscsi_buflen = buflen; 28796 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28797 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28798 SD_PATH_STANDARD); 28799 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28800 kmem_free(com, sizeof (*com)); 28801 return (rval); 28802 } 28803 28804 28805 /* 28806 * Function: sr_eject() 28807 * 28808 * Description: This routine is the driver entry point for handling CD-ROM 28809 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28810 * 28811 * Arguments: dev - the device 'dev_t' 28812 * 28813 * Return Code: the code returned by sd_send_scsi_cmd() 28814 */ 28815 28816 static int 28817 sr_eject(dev_t dev) 28818 { 28819 struct sd_lun *un; 28820 int rval; 28821 sd_ssc_t *ssc; 28822 28823 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28824 (un->un_state == SD_STATE_OFFLINE)) { 28825 return (ENXIO); 28826 } 28827 28828 /* 28829 * To prevent race conditions with the eject 28830 * command, keep track of an eject command as 28831 * it progresses. If we are already handling 28832 * an eject command in the driver for the given 28833 * unit and another request to eject is received 28834 * immediately return EAGAIN so we don't lose 28835 * the command if the current eject command fails. 28836 */ 28837 mutex_enter(SD_MUTEX(un)); 28838 if (un->un_f_ejecting == TRUE) { 28839 mutex_exit(SD_MUTEX(un)); 28840 return (EAGAIN); 28841 } 28842 un->un_f_ejecting = TRUE; 28843 mutex_exit(SD_MUTEX(un)); 28844 28845 ssc = sd_ssc_init(un); 28846 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28847 SD_PATH_STANDARD); 28848 sd_ssc_fini(ssc); 28849 28850 if (rval != 0) { 28851 mutex_enter(SD_MUTEX(un)); 28852 un->un_f_ejecting = FALSE; 28853 mutex_exit(SD_MUTEX(un)); 28854 return (rval); 28855 } 28856 28857 ssc = sd_ssc_init(un); 28858 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 28859 SD_TARGET_EJECT, SD_PATH_STANDARD); 28860 sd_ssc_fini(ssc); 28861 28862 if (rval == 0) { 28863 mutex_enter(SD_MUTEX(un)); 28864 sr_ejected(un); 28865 un->un_mediastate = DKIO_EJECTED; 28866 un->un_f_ejecting = FALSE; 28867 cv_broadcast(&un->un_state_cv); 28868 mutex_exit(SD_MUTEX(un)); 28869 } else { 28870 mutex_enter(SD_MUTEX(un)); 28871 un->un_f_ejecting = FALSE; 28872 mutex_exit(SD_MUTEX(un)); 28873 } 28874 return (rval); 28875 } 28876 28877 28878 /* 28879 * Function: sr_ejected() 28880 * 28881 * Description: This routine updates the soft state structure to invalidate the 28882 * geometry information after the media has been ejected or a 28883 * media eject has been detected. 28884 * 28885 * Arguments: un - driver soft state (unit) structure 28886 */ 28887 28888 static void 28889 sr_ejected(struct sd_lun *un) 28890 { 28891 struct sd_errstats *stp; 28892 28893 ASSERT(un != NULL); 28894 ASSERT(mutex_owned(SD_MUTEX(un))); 28895 28896 un->un_f_blockcount_is_valid = FALSE; 28897 un->un_f_tgt_blocksize_is_valid = FALSE; 28898 mutex_exit(SD_MUTEX(un)); 28899 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28900 mutex_enter(SD_MUTEX(un)); 28901 28902 if (un->un_errstats != NULL) { 28903 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28904 stp->sd_capacity.value.ui64 = 0; 28905 } 28906 } 28907 28908 28909 /* 28910 * Function: sr_check_wp() 28911 * 28912 * Description: This routine checks the write protection of a removable 28913 * media disk and hotpluggable devices via the write protect bit of 28914 * the Mode Page Header device specific field. Some devices choke 28915 * on unsupported mode page. In order to workaround this issue, 28916 * this routine has been implemented to use 0x3f mode page(request 28917 * for all pages) for all device types. 28918 * 28919 * Arguments: dev - the device 'dev_t' 28920 * 28921 * Return Code: int indicating if the device is write protected (1) or not (0) 28922 * 28923 * Context: Kernel thread. 28924 * 28925 */ 28926 28927 static int 28928 sr_check_wp(dev_t dev) 28929 { 28930 struct sd_lun *un; 28931 uchar_t device_specific; 28932 uchar_t *sense; 28933 int hdrlen; 28934 int rval = FALSE; 28935 int status; 28936 sd_ssc_t *ssc; 28937 28938 /* 28939 * Note: The return codes for this routine should be reworked to 28940 * properly handle the case of a NULL softstate. 28941 */ 28942 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28943 return (FALSE); 28944 } 28945 28946 if (un->un_f_cfg_is_atapi == TRUE) { 28947 /* 28948 * The mode page contents are not required; set the allocation 28949 * length for the mode page header only 28950 */ 28951 hdrlen = MODE_HEADER_LENGTH_GRP2; 28952 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28953 ssc = sd_ssc_init(un); 28954 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28955 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28956 sd_ssc_fini(ssc); 28957 if (status != 0) 28958 goto err_exit; 28959 device_specific = 28960 ((struct mode_header_grp2 *)sense)->device_specific; 28961 } else { 28962 hdrlen = MODE_HEADER_LENGTH; 28963 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28964 ssc = sd_ssc_init(un); 28965 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28966 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28967 sd_ssc_fini(ssc); 28968 if (status != 0) 28969 goto err_exit; 28970 device_specific = 28971 ((struct mode_header *)sense)->device_specific; 28972 } 28973 28974 28975 /* 28976 * Write protect mode sense failed; not all disks 28977 * understand this query. Return FALSE assuming that 28978 * these devices are not writable. 28979 */ 28980 if (device_specific & WRITE_PROTECT) { 28981 rval = TRUE; 28982 } 28983 28984 err_exit: 28985 kmem_free(sense, hdrlen); 28986 return (rval); 28987 } 28988 28989 /* 28990 * Function: sr_volume_ctrl() 28991 * 28992 * Description: This routine is the driver entry point for handling CD-ROM 28993 * audio output volume ioctl requests. (CDROMVOLCTRL) 28994 * 28995 * Arguments: dev - the device 'dev_t' 28996 * data - pointer to user audio volume control structure 28997 * flag - this argument is a pass through to ddi_copyxxx() 28998 * directly from the mode argument of ioctl(). 28999 * 29000 * Return Code: the code returned by sd_send_scsi_cmd() 29001 * EFAULT if ddi_copyxxx() fails 29002 * ENXIO if fail ddi_get_soft_state 29003 * EINVAL if data pointer is NULL 29004 * 29005 */ 29006 29007 static int 29008 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 29009 { 29010 struct sd_lun *un; 29011 struct cdrom_volctrl volume; 29012 struct cdrom_volctrl *vol = &volume; 29013 uchar_t *sense_page; 29014 uchar_t *select_page; 29015 uchar_t *sense; 29016 uchar_t *select; 29017 int sense_buflen; 29018 int select_buflen; 29019 int rval; 29020 sd_ssc_t *ssc; 29021 29022 if (data == NULL) { 29023 return (EINVAL); 29024 } 29025 29026 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29027 (un->un_state == SD_STATE_OFFLINE)) { 29028 return (ENXIO); 29029 } 29030 29031 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 29032 return (EFAULT); 29033 } 29034 29035 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29036 struct mode_header_grp2 *sense_mhp; 29037 struct mode_header_grp2 *select_mhp; 29038 int bd_len; 29039 29040 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 29041 select_buflen = MODE_HEADER_LENGTH_GRP2 + 29042 MODEPAGE_AUDIO_CTRL_LEN; 29043 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29044 select = kmem_zalloc(select_buflen, KM_SLEEP); 29045 ssc = sd_ssc_init(un); 29046 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 29047 sense_buflen, MODEPAGE_AUDIO_CTRL, 29048 SD_PATH_STANDARD); 29049 sd_ssc_fini(ssc); 29050 29051 if (rval != 0) { 29052 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 29053 "sr_volume_ctrl: Mode Sense Failed\n"); 29054 kmem_free(sense, sense_buflen); 29055 kmem_free(select, select_buflen); 29056 return (rval); 29057 } 29058 sense_mhp = (struct mode_header_grp2 *)sense; 29059 select_mhp = (struct mode_header_grp2 *)select; 29060 bd_len = (sense_mhp->bdesc_length_hi << 8) | 29061 sense_mhp->bdesc_length_lo; 29062 if (bd_len > MODE_BLK_DESC_LENGTH) { 29063 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29064 "sr_volume_ctrl: Mode Sense returned invalid " 29065 "block descriptor length\n"); 29066 kmem_free(sense, sense_buflen); 29067 kmem_free(select, select_buflen); 29068 return (EIO); 29069 } 29070 sense_page = (uchar_t *) 29071 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 29072 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 29073 select_mhp->length_msb = 0; 29074 select_mhp->length_lsb = 0; 29075 select_mhp->bdesc_length_hi = 0; 29076 select_mhp->bdesc_length_lo = 0; 29077 } else { 29078 struct mode_header *sense_mhp, *select_mhp; 29079 29080 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29081 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29082 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29083 select = kmem_zalloc(select_buflen, KM_SLEEP); 29084 ssc = sd_ssc_init(un); 29085 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 29086 sense_buflen, MODEPAGE_AUDIO_CTRL, 29087 SD_PATH_STANDARD); 29088 sd_ssc_fini(ssc); 29089 29090 if (rval != 0) { 29091 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29092 "sr_volume_ctrl: Mode Sense Failed\n"); 29093 kmem_free(sense, sense_buflen); 29094 kmem_free(select, select_buflen); 29095 return (rval); 29096 } 29097 sense_mhp = (struct mode_header *)sense; 29098 select_mhp = (struct mode_header *)select; 29099 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 29100 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29101 "sr_volume_ctrl: Mode Sense returned invalid " 29102 "block descriptor length\n"); 29103 kmem_free(sense, sense_buflen); 29104 kmem_free(select, select_buflen); 29105 return (EIO); 29106 } 29107 sense_page = (uchar_t *) 29108 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 29109 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 29110 select_mhp->length = 0; 29111 select_mhp->bdesc_length = 0; 29112 } 29113 /* 29114 * Note: An audio control data structure could be created and overlayed 29115 * on the following in place of the array indexing method implemented. 29116 */ 29117 29118 /* Build the select data for the user volume data */ 29119 select_page[0] = MODEPAGE_AUDIO_CTRL; 29120 select_page[1] = 0xE; 29121 /* Set the immediate bit */ 29122 select_page[2] = 0x04; 29123 /* Zero out reserved fields */ 29124 select_page[3] = 0x00; 29125 select_page[4] = 0x00; 29126 /* Return sense data for fields not to be modified */ 29127 select_page[5] = sense_page[5]; 29128 select_page[6] = sense_page[6]; 29129 select_page[7] = sense_page[7]; 29130 /* Set the user specified volume levels for channel 0 and 1 */ 29131 select_page[8] = 0x01; 29132 select_page[9] = vol->channel0; 29133 select_page[10] = 0x02; 29134 select_page[11] = vol->channel1; 29135 /* Channel 2 and 3 are currently unsupported so return the sense data */ 29136 select_page[12] = sense_page[12]; 29137 select_page[13] = sense_page[13]; 29138 select_page[14] = sense_page[14]; 29139 select_page[15] = sense_page[15]; 29140 29141 ssc = sd_ssc_init(un); 29142 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29143 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 29144 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29145 } else { 29146 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 29147 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29148 } 29149 sd_ssc_fini(ssc); 29150 29151 kmem_free(sense, sense_buflen); 29152 kmem_free(select, select_buflen); 29153 return (rval); 29154 } 29155 29156 29157 /* 29158 * Function: sr_read_sony_session_offset() 29159 * 29160 * Description: This routine is the driver entry point for handling CD-ROM 29161 * ioctl requests for session offset information. (CDROMREADOFFSET) 29162 * The address of the first track in the last session of a 29163 * multi-session CD-ROM is returned 29164 * 29165 * Note: This routine uses a vendor specific key value in the 29166 * command control field without implementing any vendor check here 29167 * or in the ioctl routine. 29168 * 29169 * Arguments: dev - the device 'dev_t' 29170 * data - pointer to an int to hold the requested address 29171 * flag - this argument is a pass through to ddi_copyxxx() 29172 * directly from the mode argument of ioctl(). 29173 * 29174 * Return Code: the code returned by sd_send_scsi_cmd() 29175 * EFAULT if ddi_copyxxx() fails 29176 * ENXIO if fail ddi_get_soft_state 29177 * EINVAL if data pointer is NULL 29178 */ 29179 29180 static int 29181 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 29182 { 29183 struct sd_lun *un; 29184 struct uscsi_cmd *com; 29185 caddr_t buffer; 29186 char cdb[CDB_GROUP1]; 29187 int session_offset = 0; 29188 int rval; 29189 29190 if (data == NULL) { 29191 return (EINVAL); 29192 } 29193 29194 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29195 (un->un_state == SD_STATE_OFFLINE)) { 29196 return (ENXIO); 29197 } 29198 29199 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 29200 bzero(cdb, CDB_GROUP1); 29201 cdb[0] = SCMD_READ_TOC; 29202 /* 29203 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 29204 * (4 byte TOC response header + 8 byte response data) 29205 */ 29206 cdb[8] = SONY_SESSION_OFFSET_LEN; 29207 /* Byte 9 is the control byte. A vendor specific value is used */ 29208 cdb[9] = SONY_SESSION_OFFSET_KEY; 29209 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 29210 com->uscsi_cdb = cdb; 29211 com->uscsi_cdblen = CDB_GROUP1; 29212 com->uscsi_bufaddr = buffer; 29213 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 29214 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 29215 29216 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 29217 SD_PATH_STANDARD); 29218 if (rval != 0) { 29219 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29220 kmem_free(com, sizeof (*com)); 29221 return (rval); 29222 } 29223 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 29224 session_offset = 29225 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 29226 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 29227 /* 29228 * Offset returned offset in current lbasize block's. Convert to 29229 * 2k block's to return to the user 29230 */ 29231 if (un->un_tgt_blocksize == CDROM_BLK_512) { 29232 session_offset >>= 2; 29233 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 29234 session_offset >>= 1; 29235 } 29236 } 29237 29238 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 29239 rval = EFAULT; 29240 } 29241 29242 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29243 kmem_free(com, sizeof (*com)); 29244 return (rval); 29245 } 29246 29247 29248 /* 29249 * Function: sd_wm_cache_constructor() 29250 * 29251 * Description: Cache Constructor for the wmap cache for the read/modify/write 29252 * devices. 29253 * 29254 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29255 * un - sd_lun structure for the device. 29256 * flag - the km flags passed to constructor 29257 * 29258 * Return Code: 0 on success. 29259 * -1 on failure. 29260 */ 29261 29262 /*ARGSUSED*/ 29263 static int 29264 sd_wm_cache_constructor(void *wm, void *un, int flags) 29265 { 29266 bzero(wm, sizeof (struct sd_w_map)); 29267 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 29268 return (0); 29269 } 29270 29271 29272 /* 29273 * Function: sd_wm_cache_destructor() 29274 * 29275 * Description: Cache destructor for the wmap cache for the read/modify/write 29276 * devices. 29277 * 29278 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29279 * un - sd_lun structure for the device. 29280 */ 29281 /*ARGSUSED*/ 29282 static void 29283 sd_wm_cache_destructor(void *wm, void *un) 29284 { 29285 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 29286 } 29287 29288 29289 /* 29290 * Function: sd_range_lock() 29291 * 29292 * Description: Lock the range of blocks specified as parameter to ensure 29293 * that read, modify write is atomic and no other i/o writes 29294 * to the same location. The range is specified in terms 29295 * of start and end blocks. Block numbers are the actual 29296 * media block numbers and not system. 29297 * 29298 * Arguments: un - sd_lun structure for the device. 29299 * startb - The starting block number 29300 * endb - The end block number 29301 * typ - type of i/o - simple/read_modify_write 29302 * 29303 * Return Code: wm - pointer to the wmap structure. 29304 * 29305 * Context: This routine can sleep. 29306 */ 29307 29308 static struct sd_w_map * 29309 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 29310 { 29311 struct sd_w_map *wmp = NULL; 29312 struct sd_w_map *sl_wmp = NULL; 29313 struct sd_w_map *tmp_wmp; 29314 wm_state state = SD_WM_CHK_LIST; 29315 29316 29317 ASSERT(un != NULL); 29318 ASSERT(!mutex_owned(SD_MUTEX(un))); 29319 29320 mutex_enter(SD_MUTEX(un)); 29321 29322 while (state != SD_WM_DONE) { 29323 29324 switch (state) { 29325 case SD_WM_CHK_LIST: 29326 /* 29327 * This is the starting state. Check the wmap list 29328 * to see if the range is currently available. 29329 */ 29330 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 29331 /* 29332 * If this is a simple write and no rmw 29333 * i/o is pending then try to lock the 29334 * range as the range should be available. 29335 */ 29336 state = SD_WM_LOCK_RANGE; 29337 } else { 29338 tmp_wmp = sd_get_range(un, startb, endb); 29339 if (tmp_wmp != NULL) { 29340 if ((wmp != NULL) && ONLIST(un, wmp)) { 29341 /* 29342 * Should not keep onlist wmps 29343 * while waiting this macro 29344 * will also do wmp = NULL; 29345 */ 29346 FREE_ONLIST_WMAP(un, wmp); 29347 } 29348 /* 29349 * sl_wmp is the wmap on which wait 29350 * is done, since the tmp_wmp points 29351 * to the inuse wmap, set sl_wmp to 29352 * tmp_wmp and change the state to sleep 29353 */ 29354 sl_wmp = tmp_wmp; 29355 state = SD_WM_WAIT_MAP; 29356 } else { 29357 state = SD_WM_LOCK_RANGE; 29358 } 29359 29360 } 29361 break; 29362 29363 case SD_WM_LOCK_RANGE: 29364 ASSERT(un->un_wm_cache); 29365 /* 29366 * The range need to be locked, try to get a wmap. 29367 * First attempt it with NO_SLEEP, want to avoid a sleep 29368 * if possible as we will have to release the sd mutex 29369 * if we have to sleep. 29370 */ 29371 if (wmp == NULL) 29372 wmp = kmem_cache_alloc(un->un_wm_cache, 29373 KM_NOSLEEP); 29374 if (wmp == NULL) { 29375 mutex_exit(SD_MUTEX(un)); 29376 _NOTE(DATA_READABLE_WITHOUT_LOCK 29377 (sd_lun::un_wm_cache)) 29378 wmp = kmem_cache_alloc(un->un_wm_cache, 29379 KM_SLEEP); 29380 mutex_enter(SD_MUTEX(un)); 29381 /* 29382 * we released the mutex so recheck and go to 29383 * check list state. 29384 */ 29385 state = SD_WM_CHK_LIST; 29386 } else { 29387 /* 29388 * We exit out of state machine since we 29389 * have the wmap. Do the housekeeping first. 29390 * place the wmap on the wmap list if it is not 29391 * on it already and then set the state to done. 29392 */ 29393 wmp->wm_start = startb; 29394 wmp->wm_end = endb; 29395 wmp->wm_flags = typ | SD_WM_BUSY; 29396 if (typ & SD_WTYPE_RMW) { 29397 un->un_rmw_count++; 29398 } 29399 /* 29400 * If not already on the list then link 29401 */ 29402 if (!ONLIST(un, wmp)) { 29403 wmp->wm_next = un->un_wm; 29404 wmp->wm_prev = NULL; 29405 if (wmp->wm_next) 29406 wmp->wm_next->wm_prev = wmp; 29407 un->un_wm = wmp; 29408 } 29409 state = SD_WM_DONE; 29410 } 29411 break; 29412 29413 case SD_WM_WAIT_MAP: 29414 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29415 /* 29416 * Wait is done on sl_wmp, which is set in the 29417 * check_list state. 29418 */ 29419 sl_wmp->wm_wanted_count++; 29420 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29421 sl_wmp->wm_wanted_count--; 29422 /* 29423 * We can reuse the memory from the completed sl_wmp 29424 * lock range for our new lock, but only if noone is 29425 * waiting for it. 29426 */ 29427 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 29428 if (sl_wmp->wm_wanted_count == 0) { 29429 if (wmp != NULL) { 29430 CHK_N_FREEWMP(un, wmp); 29431 } 29432 wmp = sl_wmp; 29433 } 29434 sl_wmp = NULL; 29435 /* 29436 * After waking up, need to recheck for availability of 29437 * range. 29438 */ 29439 state = SD_WM_CHK_LIST; 29440 break; 29441 29442 default: 29443 panic("sd_range_lock: " 29444 "Unknown state %d in sd_range_lock", state); 29445 /*NOTREACHED*/ 29446 } /* switch(state) */ 29447 29448 } /* while(state != SD_WM_DONE) */ 29449 29450 mutex_exit(SD_MUTEX(un)); 29451 29452 ASSERT(wmp != NULL); 29453 29454 return (wmp); 29455 } 29456 29457 29458 /* 29459 * Function: sd_get_range() 29460 * 29461 * Description: Find if there any overlapping I/O to this one 29462 * Returns the write-map of 1st such I/O, NULL otherwise. 29463 * 29464 * Arguments: un - sd_lun structure for the device. 29465 * startb - The starting block number 29466 * endb - The end block number 29467 * 29468 * Return Code: wm - pointer to the wmap structure. 29469 */ 29470 29471 static struct sd_w_map * 29472 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29473 { 29474 struct sd_w_map *wmp; 29475 29476 ASSERT(un != NULL); 29477 29478 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29479 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29480 continue; 29481 } 29482 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29483 break; 29484 } 29485 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29486 break; 29487 } 29488 } 29489 29490 return (wmp); 29491 } 29492 29493 29494 /* 29495 * Function: sd_free_inlist_wmap() 29496 * 29497 * Description: Unlink and free a write map struct. 29498 * 29499 * Arguments: un - sd_lun structure for the device. 29500 * wmp - sd_w_map which needs to be unlinked. 29501 */ 29502 29503 static void 29504 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29505 { 29506 ASSERT(un != NULL); 29507 29508 if (un->un_wm == wmp) { 29509 un->un_wm = wmp->wm_next; 29510 } else { 29511 wmp->wm_prev->wm_next = wmp->wm_next; 29512 } 29513 29514 if (wmp->wm_next) { 29515 wmp->wm_next->wm_prev = wmp->wm_prev; 29516 } 29517 29518 wmp->wm_next = wmp->wm_prev = NULL; 29519 29520 kmem_cache_free(un->un_wm_cache, wmp); 29521 } 29522 29523 29524 /* 29525 * Function: sd_range_unlock() 29526 * 29527 * Description: Unlock the range locked by wm. 29528 * Free write map if nobody else is waiting on it. 29529 * 29530 * Arguments: un - sd_lun structure for the device. 29531 * wmp - sd_w_map which needs to be unlinked. 29532 */ 29533 29534 static void 29535 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29536 { 29537 ASSERT(un != NULL); 29538 ASSERT(wm != NULL); 29539 ASSERT(!mutex_owned(SD_MUTEX(un))); 29540 29541 mutex_enter(SD_MUTEX(un)); 29542 29543 if (wm->wm_flags & SD_WTYPE_RMW) { 29544 un->un_rmw_count--; 29545 } 29546 29547 if (wm->wm_wanted_count) { 29548 wm->wm_flags = 0; 29549 /* 29550 * Broadcast that the wmap is available now. 29551 */ 29552 cv_broadcast(&wm->wm_avail); 29553 } else { 29554 /* 29555 * If no one is waiting on the map, it should be free'ed. 29556 */ 29557 sd_free_inlist_wmap(un, wm); 29558 } 29559 29560 mutex_exit(SD_MUTEX(un)); 29561 } 29562 29563 29564 /* 29565 * Function: sd_read_modify_write_task 29566 * 29567 * Description: Called from a taskq thread to initiate the write phase of 29568 * a read-modify-write request. This is used for targets where 29569 * un->un_sys_blocksize != un->un_tgt_blocksize. 29570 * 29571 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29572 * 29573 * Context: Called under taskq thread context. 29574 */ 29575 29576 static void 29577 sd_read_modify_write_task(void *arg) 29578 { 29579 struct sd_mapblocksize_info *bsp; 29580 struct buf *bp; 29581 struct sd_xbuf *xp; 29582 struct sd_lun *un; 29583 29584 bp = arg; /* The bp is given in arg */ 29585 ASSERT(bp != NULL); 29586 29587 /* Get the pointer to the layer-private data struct */ 29588 xp = SD_GET_XBUF(bp); 29589 ASSERT(xp != NULL); 29590 bsp = xp->xb_private; 29591 ASSERT(bsp != NULL); 29592 29593 un = SD_GET_UN(bp); 29594 ASSERT(un != NULL); 29595 ASSERT(!mutex_owned(SD_MUTEX(un))); 29596 29597 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29598 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29599 29600 /* 29601 * This is the write phase of a read-modify-write request, called 29602 * under the context of a taskq thread in response to the completion 29603 * of the read portion of the rmw request completing under interrupt 29604 * context. The write request must be sent from here down the iostart 29605 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29606 * we use the layer index saved in the layer-private data area. 29607 */ 29608 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29609 29610 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29611 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29612 } 29613 29614 29615 /* 29616 * Function: sddump_do_read_of_rmw() 29617 * 29618 * Description: This routine will be called from sddump, If sddump is called 29619 * with an I/O which not aligned on device blocksize boundary 29620 * then the write has to be converted to read-modify-write. 29621 * Do the read part here in order to keep sddump simple. 29622 * Note - That the sd_mutex is held across the call to this 29623 * routine. 29624 * 29625 * Arguments: un - sd_lun 29626 * blkno - block number in terms of media block size. 29627 * nblk - number of blocks. 29628 * bpp - pointer to pointer to the buf structure. On return 29629 * from this function, *bpp points to the valid buffer 29630 * to which the write has to be done. 29631 * 29632 * Return Code: 0 for success or errno-type return code 29633 */ 29634 29635 static int 29636 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29637 struct buf **bpp) 29638 { 29639 int err; 29640 int i; 29641 int rval; 29642 struct buf *bp; 29643 struct scsi_pkt *pkt = NULL; 29644 uint32_t target_blocksize; 29645 29646 ASSERT(un != NULL); 29647 ASSERT(mutex_owned(SD_MUTEX(un))); 29648 29649 target_blocksize = un->un_tgt_blocksize; 29650 29651 mutex_exit(SD_MUTEX(un)); 29652 29653 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29654 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29655 if (bp == NULL) { 29656 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29657 "no resources for dumping; giving up"); 29658 err = ENOMEM; 29659 goto done; 29660 } 29661 29662 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29663 blkno, nblk); 29664 if (rval != 0) { 29665 scsi_free_consistent_buf(bp); 29666 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29667 "no resources for dumping; giving up"); 29668 err = ENOMEM; 29669 goto done; 29670 } 29671 29672 pkt->pkt_flags |= FLAG_NOINTR; 29673 29674 err = EIO; 29675 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29676 29677 /* 29678 * Scsi_poll returns 0 (success) if the command completes and 29679 * the status block is STATUS_GOOD. We should only check 29680 * errors if this condition is not true. Even then we should 29681 * send our own request sense packet only if we have a check 29682 * condition and auto request sense has not been performed by 29683 * the hba. 29684 */ 29685 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29686 29687 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29688 err = 0; 29689 break; 29690 } 29691 29692 /* 29693 * Check CMD_DEV_GONE 1st, give up if device is gone, 29694 * no need to read RQS data. 29695 */ 29696 if (pkt->pkt_reason == CMD_DEV_GONE) { 29697 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29698 "Error while dumping state with rmw..." 29699 "Device is gone\n"); 29700 break; 29701 } 29702 29703 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29704 SD_INFO(SD_LOG_DUMP, un, 29705 "sddump: read failed with CHECK, try # %d\n", i); 29706 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29707 (void) sd_send_polled_RQS(un); 29708 } 29709 29710 continue; 29711 } 29712 29713 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29714 int reset_retval = 0; 29715 29716 SD_INFO(SD_LOG_DUMP, un, 29717 "sddump: read failed with BUSY, try # %d\n", i); 29718 29719 if (un->un_f_lun_reset_enabled == TRUE) { 29720 reset_retval = scsi_reset(SD_ADDRESS(un), 29721 RESET_LUN); 29722 } 29723 if (reset_retval == 0) { 29724 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29725 } 29726 (void) sd_send_polled_RQS(un); 29727 29728 } else { 29729 SD_INFO(SD_LOG_DUMP, un, 29730 "sddump: read failed with 0x%x, try # %d\n", 29731 SD_GET_PKT_STATUS(pkt), i); 29732 mutex_enter(SD_MUTEX(un)); 29733 sd_reset_target(un, pkt); 29734 mutex_exit(SD_MUTEX(un)); 29735 } 29736 29737 /* 29738 * If we are not getting anywhere with lun/target resets, 29739 * let's reset the bus. 29740 */ 29741 if (i > SD_NDUMP_RETRIES/2) { 29742 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29743 (void) sd_send_polled_RQS(un); 29744 } 29745 29746 } 29747 scsi_destroy_pkt(pkt); 29748 29749 if (err != 0) { 29750 scsi_free_consistent_buf(bp); 29751 *bpp = NULL; 29752 } else { 29753 *bpp = bp; 29754 } 29755 29756 done: 29757 mutex_enter(SD_MUTEX(un)); 29758 return (err); 29759 } 29760 29761 29762 /* 29763 * Function: sd_failfast_flushq 29764 * 29765 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29766 * in b_flags and move them onto the failfast queue, then kick 29767 * off a thread to return all bp's on the failfast queue to 29768 * their owners with an error set. 29769 * 29770 * Arguments: un - pointer to the soft state struct for the instance. 29771 * 29772 * Context: may execute in interrupt context. 29773 */ 29774 29775 static void 29776 sd_failfast_flushq(struct sd_lun *un) 29777 { 29778 struct buf *bp; 29779 struct buf *next_waitq_bp; 29780 struct buf *prev_waitq_bp = NULL; 29781 29782 ASSERT(un != NULL); 29783 ASSERT(mutex_owned(SD_MUTEX(un))); 29784 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29785 ASSERT(un->un_failfast_bp == NULL); 29786 29787 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29788 "sd_failfast_flushq: entry: un:0x%p\n", un); 29789 29790 /* 29791 * Check if we should flush all bufs when entering failfast state, or 29792 * just those with B_FAILFAST set. 29793 */ 29794 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29795 /* 29796 * Move *all* bp's on the wait queue to the failfast flush 29797 * queue, including those that do NOT have B_FAILFAST set. 29798 */ 29799 if (un->un_failfast_headp == NULL) { 29800 ASSERT(un->un_failfast_tailp == NULL); 29801 un->un_failfast_headp = un->un_waitq_headp; 29802 } else { 29803 ASSERT(un->un_failfast_tailp != NULL); 29804 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29805 } 29806 29807 un->un_failfast_tailp = un->un_waitq_tailp; 29808 29809 /* update kstat for each bp moved out of the waitq */ 29810 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29811 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29812 } 29813 29814 /* empty the waitq */ 29815 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29816 29817 } else { 29818 /* 29819 * Go thru the wait queue, pick off all entries with 29820 * B_FAILFAST set, and move these onto the failfast queue. 29821 */ 29822 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29823 /* 29824 * Save the pointer to the next bp on the wait queue, 29825 * so we get to it on the next iteration of this loop. 29826 */ 29827 next_waitq_bp = bp->av_forw; 29828 29829 /* 29830 * If this bp from the wait queue does NOT have 29831 * B_FAILFAST set, just move on to the next element 29832 * in the wait queue. Note, this is the only place 29833 * where it is correct to set prev_waitq_bp. 29834 */ 29835 if ((bp->b_flags & B_FAILFAST) == 0) { 29836 prev_waitq_bp = bp; 29837 continue; 29838 } 29839 29840 /* 29841 * Remove the bp from the wait queue. 29842 */ 29843 if (bp == un->un_waitq_headp) { 29844 /* The bp is the first element of the waitq. */ 29845 un->un_waitq_headp = next_waitq_bp; 29846 if (un->un_waitq_headp == NULL) { 29847 /* The wait queue is now empty */ 29848 un->un_waitq_tailp = NULL; 29849 } 29850 } else { 29851 /* 29852 * The bp is either somewhere in the middle 29853 * or at the end of the wait queue. 29854 */ 29855 ASSERT(un->un_waitq_headp != NULL); 29856 ASSERT(prev_waitq_bp != NULL); 29857 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29858 == 0); 29859 if (bp == un->un_waitq_tailp) { 29860 /* bp is the last entry on the waitq. */ 29861 ASSERT(next_waitq_bp == NULL); 29862 un->un_waitq_tailp = prev_waitq_bp; 29863 } 29864 prev_waitq_bp->av_forw = next_waitq_bp; 29865 } 29866 bp->av_forw = NULL; 29867 29868 /* 29869 * update kstat since the bp is moved out of 29870 * the waitq 29871 */ 29872 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29873 29874 /* 29875 * Now put the bp onto the failfast queue. 29876 */ 29877 if (un->un_failfast_headp == NULL) { 29878 /* failfast queue is currently empty */ 29879 ASSERT(un->un_failfast_tailp == NULL); 29880 un->un_failfast_headp = 29881 un->un_failfast_tailp = bp; 29882 } else { 29883 /* Add the bp to the end of the failfast q */ 29884 ASSERT(un->un_failfast_tailp != NULL); 29885 ASSERT(un->un_failfast_tailp->b_flags & 29886 B_FAILFAST); 29887 un->un_failfast_tailp->av_forw = bp; 29888 un->un_failfast_tailp = bp; 29889 } 29890 } 29891 } 29892 29893 /* 29894 * Now return all bp's on the failfast queue to their owners. 29895 */ 29896 while ((bp = un->un_failfast_headp) != NULL) { 29897 29898 un->un_failfast_headp = bp->av_forw; 29899 if (un->un_failfast_headp == NULL) { 29900 un->un_failfast_tailp = NULL; 29901 } 29902 29903 /* 29904 * We want to return the bp with a failure error code, but 29905 * we do not want a call to sd_start_cmds() to occur here, 29906 * so use sd_return_failed_command_no_restart() instead of 29907 * sd_return_failed_command(). 29908 */ 29909 sd_return_failed_command_no_restart(un, bp, EIO); 29910 } 29911 29912 /* Flush the xbuf queues if required. */ 29913 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29914 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29915 } 29916 29917 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29918 "sd_failfast_flushq: exit: un:0x%p\n", un); 29919 } 29920 29921 29922 /* 29923 * Function: sd_failfast_flushq_callback 29924 * 29925 * Description: Return TRUE if the given bp meets the criteria for failfast 29926 * flushing. Used with ddi_xbuf_flushq(9F). 29927 * 29928 * Arguments: bp - ptr to buf struct to be examined. 29929 * 29930 * Context: Any 29931 */ 29932 29933 static int 29934 sd_failfast_flushq_callback(struct buf *bp) 29935 { 29936 /* 29937 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29938 * state is entered; OR (2) the given bp has B_FAILFAST set. 29939 */ 29940 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29941 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29942 } 29943 29944 29945 29946 /* 29947 * Function: sd_setup_next_xfer 29948 * 29949 * Description: Prepare next I/O operation using DMA_PARTIAL 29950 * 29951 */ 29952 29953 static int 29954 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29955 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29956 { 29957 ssize_t num_blks_not_xfered; 29958 daddr_t strt_blk_num; 29959 ssize_t bytes_not_xfered; 29960 int rval; 29961 29962 ASSERT(pkt->pkt_resid == 0); 29963 29964 /* 29965 * Calculate next block number and amount to be transferred. 29966 * 29967 * How much data NOT transfered to the HBA yet. 29968 */ 29969 bytes_not_xfered = xp->xb_dma_resid; 29970 29971 /* 29972 * figure how many blocks NOT transfered to the HBA yet. 29973 */ 29974 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29975 29976 /* 29977 * set starting block number to the end of what WAS transfered. 29978 */ 29979 strt_blk_num = xp->xb_blkno + 29980 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29981 29982 /* 29983 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29984 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29985 * the disk mutex here. 29986 */ 29987 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29988 strt_blk_num, num_blks_not_xfered); 29989 29990 if (rval == 0) { 29991 29992 /* 29993 * Success. 29994 * 29995 * Adjust things if there are still more blocks to be 29996 * transfered. 29997 */ 29998 xp->xb_dma_resid = pkt->pkt_resid; 29999 pkt->pkt_resid = 0; 30000 30001 return (1); 30002 } 30003 30004 /* 30005 * There's really only one possible return value from 30006 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 30007 * returns NULL. 30008 */ 30009 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 30010 30011 bp->b_resid = bp->b_bcount; 30012 bp->b_flags |= B_ERROR; 30013 30014 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30015 "Error setting up next portion of DMA transfer\n"); 30016 30017 return (0); 30018 } 30019 30020 /* 30021 * Function: sd_panic_for_res_conflict 30022 * 30023 * Description: Call panic with a string formatted with "Reservation Conflict" 30024 * and a human readable identifier indicating the SD instance 30025 * that experienced the reservation conflict. 30026 * 30027 * Arguments: un - pointer to the soft state struct for the instance. 30028 * 30029 * Context: may execute in interrupt context. 30030 */ 30031 30032 #define SD_RESV_CONFLICT_FMT_LEN 40 30033 void 30034 sd_panic_for_res_conflict(struct sd_lun *un) 30035 { 30036 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 30037 char path_str[MAXPATHLEN]; 30038 30039 (void) snprintf(panic_str, sizeof (panic_str), 30040 "Reservation Conflict\nDisk: %s", 30041 ddi_pathname(SD_DEVINFO(un), path_str)); 30042 30043 panic(panic_str); 30044 } 30045 30046 /* 30047 * Note: The following sd_faultinjection_ioctl( ) routines implement 30048 * driver support for handling fault injection for error analysis 30049 * causing faults in multiple layers of the driver. 30050 * 30051 */ 30052 30053 #ifdef SD_FAULT_INJECTION 30054 static uint_t sd_fault_injection_on = 0; 30055 30056 /* 30057 * Function: sd_faultinjection_ioctl() 30058 * 30059 * Description: This routine is the driver entry point for handling 30060 * faultinjection ioctls to inject errors into the 30061 * layer model 30062 * 30063 * Arguments: cmd - the ioctl cmd received 30064 * arg - the arguments from user and returns 30065 */ 30066 30067 static void 30068 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) 30069 { 30070 uint_t i = 0; 30071 uint_t rval; 30072 30073 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 30074 30075 mutex_enter(SD_MUTEX(un)); 30076 30077 switch (cmd) { 30078 case SDIOCRUN: 30079 /* Allow pushed faults to be injected */ 30080 SD_INFO(SD_LOG_SDTEST, un, 30081 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 30082 30083 sd_fault_injection_on = 1; 30084 30085 SD_INFO(SD_LOG_IOERR, un, 30086 "sd_faultinjection_ioctl: run finished\n"); 30087 break; 30088 30089 case SDIOCSTART: 30090 /* Start Injection Session */ 30091 SD_INFO(SD_LOG_SDTEST, un, 30092 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 30093 30094 sd_fault_injection_on = 0; 30095 un->sd_injection_mask = 0xFFFFFFFF; 30096 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30097 un->sd_fi_fifo_pkt[i] = NULL; 30098 un->sd_fi_fifo_xb[i] = NULL; 30099 un->sd_fi_fifo_un[i] = NULL; 30100 un->sd_fi_fifo_arq[i] = NULL; 30101 } 30102 un->sd_fi_fifo_start = 0; 30103 un->sd_fi_fifo_end = 0; 30104 30105 mutex_enter(&(un->un_fi_mutex)); 30106 un->sd_fi_log[0] = '\0'; 30107 un->sd_fi_buf_len = 0; 30108 mutex_exit(&(un->un_fi_mutex)); 30109 30110 SD_INFO(SD_LOG_IOERR, un, 30111 "sd_faultinjection_ioctl: start finished\n"); 30112 break; 30113 30114 case SDIOCSTOP: 30115 /* Stop Injection Session */ 30116 SD_INFO(SD_LOG_SDTEST, un, 30117 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 30118 sd_fault_injection_on = 0; 30119 un->sd_injection_mask = 0x0; 30120 30121 /* Empty stray or unuseds structs from fifo */ 30122 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30123 if (un->sd_fi_fifo_pkt[i] != NULL) { 30124 kmem_free(un->sd_fi_fifo_pkt[i], 30125 sizeof (struct sd_fi_pkt)); 30126 } 30127 if (un->sd_fi_fifo_xb[i] != NULL) { 30128 kmem_free(un->sd_fi_fifo_xb[i], 30129 sizeof (struct sd_fi_xb)); 30130 } 30131 if (un->sd_fi_fifo_un[i] != NULL) { 30132 kmem_free(un->sd_fi_fifo_un[i], 30133 sizeof (struct sd_fi_un)); 30134 } 30135 if (un->sd_fi_fifo_arq[i] != NULL) { 30136 kmem_free(un->sd_fi_fifo_arq[i], 30137 sizeof (struct sd_fi_arq)); 30138 } 30139 un->sd_fi_fifo_pkt[i] = NULL; 30140 un->sd_fi_fifo_un[i] = NULL; 30141 un->sd_fi_fifo_xb[i] = NULL; 30142 un->sd_fi_fifo_arq[i] = NULL; 30143 } 30144 un->sd_fi_fifo_start = 0; 30145 un->sd_fi_fifo_end = 0; 30146 30147 SD_INFO(SD_LOG_IOERR, un, 30148 "sd_faultinjection_ioctl: stop finished\n"); 30149 break; 30150 30151 case SDIOCINSERTPKT: 30152 /* Store a packet struct to be pushed onto fifo */ 30153 SD_INFO(SD_LOG_SDTEST, un, 30154 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 30155 30156 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30157 30158 sd_fault_injection_on = 0; 30159 30160 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 30161 if (un->sd_fi_fifo_pkt[i] != NULL) { 30162 kmem_free(un->sd_fi_fifo_pkt[i], 30163 sizeof (struct sd_fi_pkt)); 30164 } 30165 if (arg != NULL) { 30166 un->sd_fi_fifo_pkt[i] = 30167 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 30168 if (un->sd_fi_fifo_pkt[i] == NULL) { 30169 /* Alloc failed don't store anything */ 30170 break; 30171 } 30172 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 30173 sizeof (struct sd_fi_pkt), 0); 30174 if (rval == -1) { 30175 kmem_free(un->sd_fi_fifo_pkt[i], 30176 sizeof (struct sd_fi_pkt)); 30177 un->sd_fi_fifo_pkt[i] = NULL; 30178 } 30179 } else { 30180 SD_INFO(SD_LOG_IOERR, un, 30181 "sd_faultinjection_ioctl: pkt null\n"); 30182 } 30183 break; 30184 30185 case SDIOCINSERTXB: 30186 /* Store a xb struct to be pushed onto fifo */ 30187 SD_INFO(SD_LOG_SDTEST, un, 30188 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 30189 30190 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30191 30192 sd_fault_injection_on = 0; 30193 30194 if (un->sd_fi_fifo_xb[i] != NULL) { 30195 kmem_free(un->sd_fi_fifo_xb[i], 30196 sizeof (struct sd_fi_xb)); 30197 un->sd_fi_fifo_xb[i] = NULL; 30198 } 30199 if (arg != NULL) { 30200 un->sd_fi_fifo_xb[i] = 30201 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 30202 if (un->sd_fi_fifo_xb[i] == NULL) { 30203 /* Alloc failed don't store anything */ 30204 break; 30205 } 30206 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 30207 sizeof (struct sd_fi_xb), 0); 30208 30209 if (rval == -1) { 30210 kmem_free(un->sd_fi_fifo_xb[i], 30211 sizeof (struct sd_fi_xb)); 30212 un->sd_fi_fifo_xb[i] = NULL; 30213 } 30214 } else { 30215 SD_INFO(SD_LOG_IOERR, un, 30216 "sd_faultinjection_ioctl: xb null\n"); 30217 } 30218 break; 30219 30220 case SDIOCINSERTUN: 30221 /* Store a un struct to be pushed onto fifo */ 30222 SD_INFO(SD_LOG_SDTEST, un, 30223 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 30224 30225 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30226 30227 sd_fault_injection_on = 0; 30228 30229 if (un->sd_fi_fifo_un[i] != NULL) { 30230 kmem_free(un->sd_fi_fifo_un[i], 30231 sizeof (struct sd_fi_un)); 30232 un->sd_fi_fifo_un[i] = NULL; 30233 } 30234 if (arg != NULL) { 30235 un->sd_fi_fifo_un[i] = 30236 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 30237 if (un->sd_fi_fifo_un[i] == NULL) { 30238 /* Alloc failed don't store anything */ 30239 break; 30240 } 30241 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 30242 sizeof (struct sd_fi_un), 0); 30243 if (rval == -1) { 30244 kmem_free(un->sd_fi_fifo_un[i], 30245 sizeof (struct sd_fi_un)); 30246 un->sd_fi_fifo_un[i] = NULL; 30247 } 30248 30249 } else { 30250 SD_INFO(SD_LOG_IOERR, un, 30251 "sd_faultinjection_ioctl: un null\n"); 30252 } 30253 30254 break; 30255 30256 case SDIOCINSERTARQ: 30257 /* Store a arq struct to be pushed onto fifo */ 30258 SD_INFO(SD_LOG_SDTEST, un, 30259 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 30260 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30261 30262 sd_fault_injection_on = 0; 30263 30264 if (un->sd_fi_fifo_arq[i] != NULL) { 30265 kmem_free(un->sd_fi_fifo_arq[i], 30266 sizeof (struct sd_fi_arq)); 30267 un->sd_fi_fifo_arq[i] = NULL; 30268 } 30269 if (arg != NULL) { 30270 un->sd_fi_fifo_arq[i] = 30271 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 30272 if (un->sd_fi_fifo_arq[i] == NULL) { 30273 /* Alloc failed don't store anything */ 30274 break; 30275 } 30276 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 30277 sizeof (struct sd_fi_arq), 0); 30278 if (rval == -1) { 30279 kmem_free(un->sd_fi_fifo_arq[i], 30280 sizeof (struct sd_fi_arq)); 30281 un->sd_fi_fifo_arq[i] = NULL; 30282 } 30283 30284 } else { 30285 SD_INFO(SD_LOG_IOERR, un, 30286 "sd_faultinjection_ioctl: arq null\n"); 30287 } 30288 30289 break; 30290 30291 case SDIOCPUSH: 30292 /* Push stored xb, pkt, un, and arq onto fifo */ 30293 sd_fault_injection_on = 0; 30294 30295 if (arg != NULL) { 30296 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 30297 if (rval != -1 && 30298 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30299 un->sd_fi_fifo_end += i; 30300 } 30301 } else { 30302 SD_INFO(SD_LOG_IOERR, un, 30303 "sd_faultinjection_ioctl: push arg null\n"); 30304 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30305 un->sd_fi_fifo_end++; 30306 } 30307 } 30308 SD_INFO(SD_LOG_IOERR, un, 30309 "sd_faultinjection_ioctl: push to end=%d\n", 30310 un->sd_fi_fifo_end); 30311 break; 30312 30313 case SDIOCRETRIEVE: 30314 /* Return buffer of log from Injection session */ 30315 SD_INFO(SD_LOG_SDTEST, un, 30316 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 30317 30318 sd_fault_injection_on = 0; 30319 30320 mutex_enter(&(un->un_fi_mutex)); 30321 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 30322 un->sd_fi_buf_len+1, 0); 30323 mutex_exit(&(un->un_fi_mutex)); 30324 30325 if (rval == -1) { 30326 /* 30327 * arg is possibly invalid setting 30328 * it to NULL for return 30329 */ 30330 arg = NULL; 30331 } 30332 break; 30333 } 30334 30335 mutex_exit(SD_MUTEX(un)); 30336 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n"); 30337 } 30338 30339 30340 /* 30341 * Function: sd_injection_log() 30342 * 30343 * Description: This routine adds buff to the already existing injection log 30344 * for retrieval via faultinjection_ioctl for use in fault 30345 * detection and recovery 30346 * 30347 * Arguments: buf - the string to add to the log 30348 */ 30349 30350 static void 30351 sd_injection_log(char *buf, struct sd_lun *un) 30352 { 30353 uint_t len; 30354 30355 ASSERT(un != NULL); 30356 ASSERT(buf != NULL); 30357 30358 mutex_enter(&(un->un_fi_mutex)); 30359 30360 len = min(strlen(buf), 255); 30361 /* Add logged value to Injection log to be returned later */ 30362 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 30363 uint_t offset = strlen((char *)un->sd_fi_log); 30364 char *destp = (char *)un->sd_fi_log + offset; 30365 int i; 30366 for (i = 0; i < len; i++) { 30367 *destp++ = *buf++; 30368 } 30369 un->sd_fi_buf_len += len; 30370 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30371 } 30372 30373 mutex_exit(&(un->un_fi_mutex)); 30374 } 30375 30376 30377 /* 30378 * Function: sd_faultinjection() 30379 * 30380 * Description: This routine takes the pkt and changes its 30381 * content based on error injection scenerio. 30382 * 30383 * Arguments: pktp - packet to be changed 30384 */ 30385 30386 static void 30387 sd_faultinjection(struct scsi_pkt *pktp) 30388 { 30389 uint_t i; 30390 struct sd_fi_pkt *fi_pkt; 30391 struct sd_fi_xb *fi_xb; 30392 struct sd_fi_un *fi_un; 30393 struct sd_fi_arq *fi_arq; 30394 struct buf *bp; 30395 struct sd_xbuf *xb; 30396 struct sd_lun *un; 30397 30398 ASSERT(pktp != NULL); 30399 30400 /* pull bp xb and un from pktp */ 30401 bp = (struct buf *)pktp->pkt_private; 30402 xb = SD_GET_XBUF(bp); 30403 un = SD_GET_UN(bp); 30404 30405 ASSERT(un != NULL); 30406 30407 mutex_enter(SD_MUTEX(un)); 30408 30409 SD_TRACE(SD_LOG_SDTEST, un, 30410 "sd_faultinjection: entry Injection from sdintr\n"); 30411 30412 /* if injection is off return */ 30413 if (sd_fault_injection_on == 0 || 30414 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30415 mutex_exit(SD_MUTEX(un)); 30416 return; 30417 } 30418 30419 SD_INFO(SD_LOG_SDTEST, un, 30420 "sd_faultinjection: is working for copying\n"); 30421 30422 /* take next set off fifo */ 30423 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30424 30425 fi_pkt = un->sd_fi_fifo_pkt[i]; 30426 fi_xb = un->sd_fi_fifo_xb[i]; 30427 fi_un = un->sd_fi_fifo_un[i]; 30428 fi_arq = un->sd_fi_fifo_arq[i]; 30429 30430 30431 /* set variables accordingly */ 30432 /* set pkt if it was on fifo */ 30433 if (fi_pkt != NULL) { 30434 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30435 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30436 if (fi_pkt->pkt_cdbp != 0xff) 30437 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30438 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30439 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30440 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30441 30442 } 30443 /* set xb if it was on fifo */ 30444 if (fi_xb != NULL) { 30445 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30446 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30447 if (fi_xb->xb_retry_count != 0) 30448 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30449 SD_CONDSET(xb, xb, xb_victim_retry_count, 30450 "xb_victim_retry_count"); 30451 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30452 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30453 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30454 30455 /* copy in block data from sense */ 30456 /* 30457 * if (fi_xb->xb_sense_data[0] != -1) { 30458 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30459 * SENSE_LENGTH); 30460 * } 30461 */ 30462 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 30463 30464 /* copy in extended sense codes */ 30465 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30466 xb, es_code, "es_code"); 30467 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30468 xb, es_key, "es_key"); 30469 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30470 xb, es_add_code, "es_add_code"); 30471 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30472 xb, es_qual_code, "es_qual_code"); 30473 struct scsi_extended_sense *esp; 30474 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 30475 esp->es_class = CLASS_EXTENDED_SENSE; 30476 } 30477 30478 /* set un if it was on fifo */ 30479 if (fi_un != NULL) { 30480 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30481 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30482 SD_CONDSET(un, un, un_reset_retry_count, 30483 "un_reset_retry_count"); 30484 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30485 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30486 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30487 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30488 "un_f_allow_bus_device_reset"); 30489 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30490 30491 } 30492 30493 /* copy in auto request sense if it was on fifo */ 30494 if (fi_arq != NULL) { 30495 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30496 } 30497 30498 /* free structs */ 30499 if (un->sd_fi_fifo_pkt[i] != NULL) { 30500 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30501 } 30502 if (un->sd_fi_fifo_xb[i] != NULL) { 30503 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30504 } 30505 if (un->sd_fi_fifo_un[i] != NULL) { 30506 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30507 } 30508 if (un->sd_fi_fifo_arq[i] != NULL) { 30509 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30510 } 30511 30512 /* 30513 * kmem_free does not gurantee to set to NULL 30514 * since we uses these to determine if we set 30515 * values or not lets confirm they are always 30516 * NULL after free 30517 */ 30518 un->sd_fi_fifo_pkt[i] = NULL; 30519 un->sd_fi_fifo_un[i] = NULL; 30520 un->sd_fi_fifo_xb[i] = NULL; 30521 un->sd_fi_fifo_arq[i] = NULL; 30522 30523 un->sd_fi_fifo_start++; 30524 30525 mutex_exit(SD_MUTEX(un)); 30526 30527 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30528 } 30529 30530 #endif /* SD_FAULT_INJECTION */ 30531 30532 /* 30533 * This routine is invoked in sd_unit_attach(). Before calling it, the 30534 * properties in conf file should be processed already, and "hotpluggable" 30535 * property was processed also. 30536 * 30537 * The sd driver distinguishes 3 different type of devices: removable media, 30538 * non-removable media, and hotpluggable. Below the differences are defined: 30539 * 30540 * 1. Device ID 30541 * 30542 * The device ID of a device is used to identify this device. Refer to 30543 * ddi_devid_register(9F). 30544 * 30545 * For a non-removable media disk device which can provide 0x80 or 0x83 30546 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30547 * device ID is created to identify this device. For other non-removable 30548 * media devices, a default device ID is created only if this device has 30549 * at least 2 alter cylinders. Otherwise, this device has no devid. 30550 * 30551 * ------------------------------------------------------- 30552 * removable media hotpluggable | Can Have Device ID 30553 * ------------------------------------------------------- 30554 * false false | Yes 30555 * false true | Yes 30556 * true x | No 30557 * ------------------------------------------------------ 30558 * 30559 * 30560 * 2. SCSI group 4 commands 30561 * 30562 * In SCSI specs, only some commands in group 4 command set can use 30563 * 8-byte addresses that can be used to access >2TB storage spaces. 30564 * Other commands have no such capability. Without supporting group4, 30565 * it is impossible to make full use of storage spaces of a disk with 30566 * capacity larger than 2TB. 30567 * 30568 * ----------------------------------------------- 30569 * removable media hotpluggable LP64 | Group 30570 * ----------------------------------------------- 30571 * false false false | 1 30572 * false false true | 4 30573 * false true false | 1 30574 * false true true | 4 30575 * true x x | 5 30576 * ----------------------------------------------- 30577 * 30578 * 30579 * 3. Check for VTOC Label 30580 * 30581 * If a direct-access disk has no EFI label, sd will check if it has a 30582 * valid VTOC label. Now, sd also does that check for removable media 30583 * and hotpluggable devices. 30584 * 30585 * -------------------------------------------------------------- 30586 * Direct-Access removable media hotpluggable | Check Label 30587 * ------------------------------------------------------------- 30588 * false false false | No 30589 * false false true | No 30590 * false true false | Yes 30591 * false true true | Yes 30592 * true x x | Yes 30593 * -------------------------------------------------------------- 30594 * 30595 * 30596 * 4. Building default VTOC label 30597 * 30598 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30599 * If those devices have no valid VTOC label, sd(7d) will attempt to 30600 * create default VTOC for them. Currently sd creates default VTOC label 30601 * for all devices on x86 platform (VTOC_16), but only for removable 30602 * media devices on SPARC (VTOC_8). 30603 * 30604 * ----------------------------------------------------------- 30605 * removable media hotpluggable platform | Default Label 30606 * ----------------------------------------------------------- 30607 * false false sparc | No 30608 * false true x86 | Yes 30609 * false true sparc | Yes 30610 * true x x | Yes 30611 * ---------------------------------------------------------- 30612 * 30613 * 30614 * 5. Supported blocksizes of target devices 30615 * 30616 * Sd supports non-512-byte blocksize for removable media devices only. 30617 * For other devices, only 512-byte blocksize is supported. This may be 30618 * changed in near future because some RAID devices require non-512-byte 30619 * blocksize 30620 * 30621 * ----------------------------------------------------------- 30622 * removable media hotpluggable | non-512-byte blocksize 30623 * ----------------------------------------------------------- 30624 * false false | No 30625 * false true | No 30626 * true x | Yes 30627 * ----------------------------------------------------------- 30628 * 30629 * 30630 * 6. Automatic mount & unmount 30631 * 30632 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30633 * if a device is removable media device. It return 1 for removable media 30634 * devices, and 0 for others. 30635 * 30636 * The automatic mounting subsystem should distinguish between the types 30637 * of devices and apply automounting policies to each. 30638 * 30639 * 30640 * 7. fdisk partition management 30641 * 30642 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30643 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30644 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30645 * fdisk partitions on both x86 and SPARC platform. 30646 * 30647 * ----------------------------------------------------------- 30648 * platform removable media USB/1394 | fdisk supported 30649 * ----------------------------------------------------------- 30650 * x86 X X | true 30651 * ------------------------------------------------------------ 30652 * sparc X X | false 30653 * ------------------------------------------------------------ 30654 * 30655 * 30656 * 8. MBOOT/MBR 30657 * 30658 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30659 * read/write mboot for removable media devices on sparc platform. 30660 * 30661 * ----------------------------------------------------------- 30662 * platform removable media USB/1394 | mboot supported 30663 * ----------------------------------------------------------- 30664 * x86 X X | true 30665 * ------------------------------------------------------------ 30666 * sparc false false | false 30667 * sparc false true | true 30668 * sparc true false | true 30669 * sparc true true | true 30670 * ------------------------------------------------------------ 30671 * 30672 * 30673 * 9. error handling during opening device 30674 * 30675 * If failed to open a disk device, an errno is returned. For some kinds 30676 * of errors, different errno is returned depending on if this device is 30677 * a removable media device. This brings USB/1394 hard disks in line with 30678 * expected hard disk behavior. It is not expected that this breaks any 30679 * application. 30680 * 30681 * ------------------------------------------------------ 30682 * removable media hotpluggable | errno 30683 * ------------------------------------------------------ 30684 * false false | EIO 30685 * false true | EIO 30686 * true x | ENXIO 30687 * ------------------------------------------------------ 30688 * 30689 * 30690 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30691 * 30692 * These IOCTLs are applicable only to removable media devices. 30693 * 30694 * ----------------------------------------------------------- 30695 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30696 * ----------------------------------------------------------- 30697 * false false | No 30698 * false true | No 30699 * true x | Yes 30700 * ----------------------------------------------------------- 30701 * 30702 * 30703 * 12. Kstats for partitions 30704 * 30705 * sd creates partition kstat for non-removable media devices. USB and 30706 * Firewire hard disks now have partition kstats 30707 * 30708 * ------------------------------------------------------ 30709 * removable media hotpluggable | kstat 30710 * ------------------------------------------------------ 30711 * false false | Yes 30712 * false true | Yes 30713 * true x | No 30714 * ------------------------------------------------------ 30715 * 30716 * 30717 * 13. Removable media & hotpluggable properties 30718 * 30719 * Sd driver creates a "removable-media" property for removable media 30720 * devices. Parent nexus drivers create a "hotpluggable" property if 30721 * it supports hotplugging. 30722 * 30723 * --------------------------------------------------------------------- 30724 * removable media hotpluggable | "removable-media" " hotpluggable" 30725 * --------------------------------------------------------------------- 30726 * false false | No No 30727 * false true | No Yes 30728 * true false | Yes No 30729 * true true | Yes Yes 30730 * --------------------------------------------------------------------- 30731 * 30732 * 30733 * 14. Power Management 30734 * 30735 * sd only power manages removable media devices or devices that support 30736 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30737 * 30738 * A parent nexus that supports hotplugging can also set "pm-capable" 30739 * if the disk can be power managed. 30740 * 30741 * ------------------------------------------------------------ 30742 * removable media hotpluggable pm-capable | power manage 30743 * ------------------------------------------------------------ 30744 * false false false | No 30745 * false false true | Yes 30746 * false true false | No 30747 * false true true | Yes 30748 * true x x | Yes 30749 * ------------------------------------------------------------ 30750 * 30751 * USB and firewire hard disks can now be power managed independently 30752 * of the framebuffer 30753 * 30754 * 30755 * 15. Support for USB disks with capacity larger than 1TB 30756 * 30757 * Currently, sd doesn't permit a fixed disk device with capacity 30758 * larger than 1TB to be used in a 32-bit operating system environment. 30759 * However, sd doesn't do that for removable media devices. Instead, it 30760 * assumes that removable media devices cannot have a capacity larger 30761 * than 1TB. Therefore, using those devices on 32-bit system is partially 30762 * supported, which can cause some unexpected results. 30763 * 30764 * --------------------------------------------------------------------- 30765 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30766 * --------------------------------------------------------------------- 30767 * false false | true | no 30768 * false true | true | no 30769 * true false | true | Yes 30770 * true true | true | Yes 30771 * --------------------------------------------------------------------- 30772 * 30773 * 30774 * 16. Check write-protection at open time 30775 * 30776 * When a removable media device is being opened for writing without NDELAY 30777 * flag, sd will check if this device is writable. If attempting to open 30778 * without NDELAY flag a write-protected device, this operation will abort. 30779 * 30780 * ------------------------------------------------------------ 30781 * removable media USB/1394 | WP Check 30782 * ------------------------------------------------------------ 30783 * false false | No 30784 * false true | No 30785 * true false | Yes 30786 * true true | Yes 30787 * ------------------------------------------------------------ 30788 * 30789 * 30790 * 17. syslog when corrupted VTOC is encountered 30791 * 30792 * Currently, if an invalid VTOC is encountered, sd only print syslog 30793 * for fixed SCSI disks. 30794 * ------------------------------------------------------------ 30795 * removable media USB/1394 | print syslog 30796 * ------------------------------------------------------------ 30797 * false false | Yes 30798 * false true | No 30799 * true false | No 30800 * true true | No 30801 * ------------------------------------------------------------ 30802 */ 30803 static void 30804 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30805 { 30806 int pm_cap; 30807 30808 ASSERT(un->un_sd); 30809 ASSERT(un->un_sd->sd_inq); 30810 30811 /* 30812 * Enable SYNC CACHE support for all devices. 30813 */ 30814 un->un_f_sync_cache_supported = TRUE; 30815 30816 /* 30817 * Set the sync cache required flag to false. 30818 * This would ensure that there is no SYNC CACHE 30819 * sent when there are no writes 30820 */ 30821 un->un_f_sync_cache_required = FALSE; 30822 30823 if (un->un_sd->sd_inq->inq_rmb) { 30824 /* 30825 * The media of this device is removable. And for this kind 30826 * of devices, it is possible to change medium after opening 30827 * devices. Thus we should support this operation. 30828 */ 30829 un->un_f_has_removable_media = TRUE; 30830 30831 /* 30832 * support non-512-byte blocksize of removable media devices 30833 */ 30834 un->un_f_non_devbsize_supported = TRUE; 30835 30836 /* 30837 * Assume that all removable media devices support DOOR_LOCK 30838 */ 30839 un->un_f_doorlock_supported = TRUE; 30840 30841 /* 30842 * For a removable media device, it is possible to be opened 30843 * with NDELAY flag when there is no media in drive, in this 30844 * case we don't care if device is writable. But if without 30845 * NDELAY flag, we need to check if media is write-protected. 30846 */ 30847 un->un_f_chk_wp_open = TRUE; 30848 30849 /* 30850 * need to start a SCSI watch thread to monitor media state, 30851 * when media is being inserted or ejected, notify syseventd. 30852 */ 30853 un->un_f_monitor_media_state = TRUE; 30854 30855 /* 30856 * Some devices don't support START_STOP_UNIT command. 30857 * Therefore, we'd better check if a device supports it 30858 * before sending it. 30859 */ 30860 un->un_f_check_start_stop = TRUE; 30861 30862 /* 30863 * support eject media ioctl: 30864 * FDEJECT, DKIOCEJECT, CDROMEJECT 30865 */ 30866 un->un_f_eject_media_supported = TRUE; 30867 30868 /* 30869 * Because many removable-media devices don't support 30870 * LOG_SENSE, we couldn't use this command to check if 30871 * a removable media device support power-management. 30872 * We assume that they support power-management via 30873 * START_STOP_UNIT command and can be spun up and down 30874 * without limitations. 30875 */ 30876 un->un_f_pm_supported = TRUE; 30877 30878 /* 30879 * Need to create a zero length (Boolean) property 30880 * removable-media for the removable media devices. 30881 * Note that the return value of the property is not being 30882 * checked, since if unable to create the property 30883 * then do not want the attach to fail altogether. Consistent 30884 * with other property creation in attach. 30885 */ 30886 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30887 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30888 30889 } else { 30890 /* 30891 * create device ID for device 30892 */ 30893 un->un_f_devid_supported = TRUE; 30894 30895 /* 30896 * Spin up non-removable-media devices once it is attached 30897 */ 30898 un->un_f_attach_spinup = TRUE; 30899 30900 /* 30901 * According to SCSI specification, Sense data has two kinds of 30902 * format: fixed format, and descriptor format. At present, we 30903 * don't support descriptor format sense data for removable 30904 * media. 30905 */ 30906 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30907 un->un_f_descr_format_supported = TRUE; 30908 } 30909 30910 /* 30911 * kstats are created only for non-removable media devices. 30912 * 30913 * Set this in sd.conf to 0 in order to disable kstats. The 30914 * default is 1, so they are enabled by default. 30915 */ 30916 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30917 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30918 "enable-partition-kstats", 1)); 30919 30920 /* 30921 * Check if HBA has set the "pm-capable" property. 30922 * If "pm-capable" exists and is non-zero then we can 30923 * power manage the device without checking the start/stop 30924 * cycle count log sense page. 30925 * 30926 * If "pm-capable" exists and is set to be false (0), 30927 * then we should not power manage the device. 30928 * 30929 * If "pm-capable" doesn't exist then pm_cap will 30930 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30931 * sd will check the start/stop cycle count log sense page 30932 * and power manage the device if the cycle count limit has 30933 * not been exceeded. 30934 */ 30935 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30936 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30937 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) { 30938 un->un_f_log_sense_supported = TRUE; 30939 if (!un->un_f_power_condition_disabled && 30940 SD_INQUIRY(un)->inq_ansi == 6) { 30941 un->un_f_power_condition_supported = TRUE; 30942 } 30943 } else { 30944 /* 30945 * pm-capable property exists. 30946 * 30947 * Convert "TRUE" values for pm_cap to 30948 * SD_PM_CAPABLE_IS_TRUE to make it easier to check 30949 * later. "TRUE" values are any values defined in 30950 * inquiry.h. 30951 */ 30952 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) { 30953 un->un_f_log_sense_supported = FALSE; 30954 } else { 30955 /* SD_PM_CAPABLE_IS_TRUE case */ 30956 un->un_f_pm_supported = TRUE; 30957 if (!un->un_f_power_condition_disabled && 30958 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) { 30959 un->un_f_power_condition_supported = 30960 TRUE; 30961 } 30962 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) { 30963 un->un_f_log_sense_supported = TRUE; 30964 un->un_f_pm_log_sense_smart = 30965 SD_PM_CAP_SMART_LOG(pm_cap); 30966 } 30967 } 30968 30969 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30970 "sd_unit_attach: un:0x%p pm-capable " 30971 "property set to %d.\n", un, un->un_f_pm_supported); 30972 } 30973 } 30974 30975 if (un->un_f_is_hotpluggable) { 30976 30977 /* 30978 * Have to watch hotpluggable devices as well, since 30979 * that's the only way for userland applications to 30980 * detect hot removal while device is busy/mounted. 30981 */ 30982 un->un_f_monitor_media_state = TRUE; 30983 30984 un->un_f_check_start_stop = TRUE; 30985 30986 } 30987 } 30988 30989 /* 30990 * sd_tg_rdwr: 30991 * Provides rdwr access for cmlb via sd_tgops. The start_block is 30992 * in sys block size, req_length in bytes. 30993 * 30994 */ 30995 static int 30996 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 30997 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 30998 { 30999 struct sd_lun *un; 31000 int path_flag = (int)(uintptr_t)tg_cookie; 31001 char *dkl = NULL; 31002 diskaddr_t real_addr = start_block; 31003 diskaddr_t first_byte, end_block; 31004 31005 size_t buffer_size = reqlength; 31006 int rval = 0; 31007 diskaddr_t cap; 31008 uint32_t lbasize; 31009 sd_ssc_t *ssc; 31010 31011 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31012 if (un == NULL) 31013 return (ENXIO); 31014 31015 if (cmd != TG_READ && cmd != TG_WRITE) 31016 return (EINVAL); 31017 31018 ssc = sd_ssc_init(un); 31019 mutex_enter(SD_MUTEX(un)); 31020 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 31021 mutex_exit(SD_MUTEX(un)); 31022 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31023 &lbasize, path_flag); 31024 if (rval != 0) 31025 goto done1; 31026 mutex_enter(SD_MUTEX(un)); 31027 sd_update_block_info(un, lbasize, cap); 31028 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 31029 mutex_exit(SD_MUTEX(un)); 31030 rval = EIO; 31031 goto done; 31032 } 31033 } 31034 31035 if (NOT_DEVBSIZE(un)) { 31036 /* 31037 * sys_blocksize != tgt_blocksize, need to re-adjust 31038 * blkno and save the index to beginning of dk_label 31039 */ 31040 first_byte = SD_SYSBLOCKS2BYTES(start_block); 31041 real_addr = first_byte / un->un_tgt_blocksize; 31042 31043 end_block = (first_byte + reqlength + 31044 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 31045 31046 /* round up buffer size to multiple of target block size */ 31047 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 31048 31049 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 31050 "label_addr: 0x%x allocation size: 0x%x\n", 31051 real_addr, buffer_size); 31052 31053 if (((first_byte % un->un_tgt_blocksize) != 0) || 31054 (reqlength % un->un_tgt_blocksize) != 0) 31055 /* the request is not aligned */ 31056 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 31057 } 31058 31059 /* 31060 * The MMC standard allows READ CAPACITY to be 31061 * inaccurate by a bounded amount (in the interest of 31062 * response latency). As a result, failed READs are 31063 * commonplace (due to the reading of metadata and not 31064 * data). Depending on the per-Vendor/drive Sense data, 31065 * the failed READ can cause many (unnecessary) retries. 31066 */ 31067 31068 if (ISCD(un) && (cmd == TG_READ) && 31069 (un->un_f_blockcount_is_valid == TRUE) && 31070 ((start_block == (un->un_blockcount - 1))|| 31071 (start_block == (un->un_blockcount - 2)))) { 31072 path_flag = SD_PATH_DIRECT_PRIORITY; 31073 } 31074 31075 mutex_exit(SD_MUTEX(un)); 31076 if (cmd == TG_READ) { 31077 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 31078 buffer_size, real_addr, path_flag); 31079 if (dkl != NULL) 31080 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 31081 real_addr), bufaddr, reqlength); 31082 } else { 31083 if (dkl) { 31084 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 31085 real_addr, path_flag); 31086 if (rval) { 31087 goto done1; 31088 } 31089 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 31090 real_addr), reqlength); 31091 } 31092 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 31093 buffer_size, real_addr, path_flag); 31094 } 31095 31096 done1: 31097 if (dkl != NULL) 31098 kmem_free(dkl, buffer_size); 31099 31100 if (rval != 0) { 31101 if (rval == EIO) 31102 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 31103 else 31104 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31105 } 31106 done: 31107 sd_ssc_fini(ssc); 31108 return (rval); 31109 } 31110 31111 31112 static int 31113 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 31114 { 31115 31116 struct sd_lun *un; 31117 diskaddr_t cap; 31118 uint32_t lbasize; 31119 int path_flag = (int)(uintptr_t)tg_cookie; 31120 int ret = 0; 31121 31122 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31123 if (un == NULL) 31124 return (ENXIO); 31125 31126 switch (cmd) { 31127 case TG_GETPHYGEOM: 31128 case TG_GETVIRTGEOM: 31129 case TG_GETCAPACITY: 31130 case TG_GETBLOCKSIZE: 31131 mutex_enter(SD_MUTEX(un)); 31132 31133 if ((un->un_f_blockcount_is_valid == TRUE) && 31134 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 31135 cap = un->un_blockcount; 31136 lbasize = un->un_tgt_blocksize; 31137 mutex_exit(SD_MUTEX(un)); 31138 } else { 31139 sd_ssc_t *ssc; 31140 mutex_exit(SD_MUTEX(un)); 31141 ssc = sd_ssc_init(un); 31142 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31143 &lbasize, path_flag); 31144 if (ret != 0) { 31145 if (ret == EIO) 31146 sd_ssc_assessment(ssc, 31147 SD_FMT_STATUS_CHECK); 31148 else 31149 sd_ssc_assessment(ssc, 31150 SD_FMT_IGNORE); 31151 sd_ssc_fini(ssc); 31152 return (ret); 31153 } 31154 sd_ssc_fini(ssc); 31155 mutex_enter(SD_MUTEX(un)); 31156 sd_update_block_info(un, lbasize, cap); 31157 if ((un->un_f_blockcount_is_valid == FALSE) || 31158 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 31159 mutex_exit(SD_MUTEX(un)); 31160 return (EIO); 31161 } 31162 mutex_exit(SD_MUTEX(un)); 31163 } 31164 31165 if (cmd == TG_GETCAPACITY) { 31166 *(diskaddr_t *)arg = cap; 31167 return (0); 31168 } 31169 31170 if (cmd == TG_GETBLOCKSIZE) { 31171 *(uint32_t *)arg = lbasize; 31172 return (0); 31173 } 31174 31175 if (cmd == TG_GETPHYGEOM) 31176 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 31177 cap, lbasize, path_flag); 31178 else 31179 /* TG_GETVIRTGEOM */ 31180 ret = sd_get_virtual_geometry(un, 31181 (cmlb_geom_t *)arg, cap, lbasize); 31182 31183 return (ret); 31184 31185 case TG_GETATTR: 31186 mutex_enter(SD_MUTEX(un)); 31187 ((tg_attribute_t *)arg)->media_is_writable = 31188 un->un_f_mmc_writable_media; 31189 ((tg_attribute_t *)arg)->media_is_solid_state = 31190 un->un_f_is_solid_state; 31191 ((tg_attribute_t *)arg)->media_is_rotational = 31192 un->un_f_is_rotational; 31193 mutex_exit(SD_MUTEX(un)); 31194 return (0); 31195 default: 31196 return (ENOTTY); 31197 31198 } 31199 } 31200 31201 /* 31202 * Function: sd_ssc_ereport_post 31203 * 31204 * Description: Will be called when SD driver need to post an ereport. 31205 * 31206 * Context: Kernel thread or interrupt context. 31207 */ 31208 31209 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown" 31210 31211 static void 31212 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 31213 { 31214 int uscsi_path_instance = 0; 31215 uchar_t uscsi_pkt_reason; 31216 uint32_t uscsi_pkt_state; 31217 uint32_t uscsi_pkt_statistics; 31218 uint64_t uscsi_ena; 31219 uchar_t op_code; 31220 uint8_t *sensep; 31221 union scsi_cdb *cdbp; 31222 uint_t cdblen = 0; 31223 uint_t senlen = 0; 31224 struct sd_lun *un; 31225 dev_info_t *dip; 31226 char *devid; 31227 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 31228 SSC_FLAGS_INVALID_STATUS | 31229 SSC_FLAGS_INVALID_SENSE | 31230 SSC_FLAGS_INVALID_DATA; 31231 char assessment[16]; 31232 31233 ASSERT(ssc != NULL); 31234 ASSERT(ssc->ssc_uscsi_cmd != NULL); 31235 ASSERT(ssc->ssc_uscsi_info != NULL); 31236 31237 un = ssc->ssc_un; 31238 ASSERT(un != NULL); 31239 31240 dip = un->un_sd->sd_dev; 31241 31242 /* 31243 * Get the devid: 31244 * devid will only be passed to non-transport error reports. 31245 */ 31246 devid = DEVI(dip)->devi_devid_str; 31247 31248 /* 31249 * If we are syncing or dumping, the command will not be executed 31250 * so we bypass this situation. 31251 */ 31252 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 31253 (un->un_state == SD_STATE_DUMPING)) 31254 return; 31255 31256 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 31257 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 31258 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 31259 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 31260 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 31261 31262 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 31263 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 31264 31265 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 31266 if (cdbp == NULL) { 31267 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 31268 "sd_ssc_ereport_post meet empty cdb\n"); 31269 return; 31270 } 31271 31272 op_code = cdbp->scc_cmd; 31273 31274 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 31275 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 31276 ssc->ssc_uscsi_cmd->uscsi_rqresid); 31277 31278 if (senlen > 0) 31279 ASSERT(sensep != NULL); 31280 31281 /* 31282 * Initialize drv_assess to corresponding values. 31283 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 31284 * on the sense-key returned back. 31285 */ 31286 switch (drv_assess) { 31287 case SD_FM_DRV_RECOVERY: 31288 (void) sprintf(assessment, "%s", "recovered"); 31289 break; 31290 case SD_FM_DRV_RETRY: 31291 (void) sprintf(assessment, "%s", "retry"); 31292 break; 31293 case SD_FM_DRV_NOTICE: 31294 (void) sprintf(assessment, "%s", "info"); 31295 break; 31296 case SD_FM_DRV_FATAL: 31297 default: 31298 (void) sprintf(assessment, "%s", "unknown"); 31299 } 31300 /* 31301 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 31302 * command, we will post ereport.io.scsi.cmd.disk.recovered. 31303 * driver-assessment will always be "recovered" here. 31304 */ 31305 if (drv_assess == SD_FM_DRV_RECOVERY) { 31306 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31307 "cmd.disk.recovered", uscsi_ena, devid, NULL, 31308 DDI_NOSLEEP, NULL, 31309 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31310 DEVID_IF_KNOWN(devid), 31311 "driver-assessment", DATA_TYPE_STRING, assessment, 31312 "op-code", DATA_TYPE_UINT8, op_code, 31313 "cdb", DATA_TYPE_UINT8_ARRAY, 31314 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31315 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31316 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31317 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31318 NULL); 31319 return; 31320 } 31321 31322 /* 31323 * If there is un-expected/un-decodable data, we should post 31324 * ereport.io.scsi.cmd.disk.dev.uderr. 31325 * driver-assessment will be set based on parameter drv_assess. 31326 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 31327 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 31328 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 31329 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 31330 */ 31331 if (ssc->ssc_flags & ssc_invalid_flags) { 31332 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 31333 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31334 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, 31335 NULL, DDI_NOSLEEP, NULL, 31336 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31337 DEVID_IF_KNOWN(devid), 31338 "driver-assessment", DATA_TYPE_STRING, 31339 drv_assess == SD_FM_DRV_FATAL ? 31340 "fail" : assessment, 31341 "op-code", DATA_TYPE_UINT8, op_code, 31342 "cdb", DATA_TYPE_UINT8_ARRAY, 31343 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31344 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31345 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31346 "pkt-stats", DATA_TYPE_UINT32, 31347 uscsi_pkt_statistics, 31348 "stat-code", DATA_TYPE_UINT8, 31349 ssc->ssc_uscsi_cmd->uscsi_status, 31350 "un-decode-info", DATA_TYPE_STRING, 31351 ssc->ssc_info, 31352 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31353 senlen, sensep, 31354 NULL); 31355 } else { 31356 /* 31357 * For other type of invalid data, the 31358 * un-decode-value field would be empty because the 31359 * un-decodable content could be seen from upper 31360 * level payload or inside un-decode-info. 31361 */ 31362 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31363 NULL, 31364 "cmd.disk.dev.uderr", uscsi_ena, devid, 31365 NULL, DDI_NOSLEEP, NULL, 31366 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31367 DEVID_IF_KNOWN(devid), 31368 "driver-assessment", DATA_TYPE_STRING, 31369 drv_assess == SD_FM_DRV_FATAL ? 31370 "fail" : assessment, 31371 "op-code", DATA_TYPE_UINT8, op_code, 31372 "cdb", DATA_TYPE_UINT8_ARRAY, 31373 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31374 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31375 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31376 "pkt-stats", DATA_TYPE_UINT32, 31377 uscsi_pkt_statistics, 31378 "stat-code", DATA_TYPE_UINT8, 31379 ssc->ssc_uscsi_cmd->uscsi_status, 31380 "un-decode-info", DATA_TYPE_STRING, 31381 ssc->ssc_info, 31382 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31383 0, NULL, 31384 NULL); 31385 } 31386 ssc->ssc_flags &= ~ssc_invalid_flags; 31387 return; 31388 } 31389 31390 if (uscsi_pkt_reason != CMD_CMPLT || 31391 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 31392 /* 31393 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 31394 * set inside sd_start_cmds due to errors(bad packet or 31395 * fatal transport error), we should take it as a 31396 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 31397 * driver-assessment will be set based on drv_assess. 31398 * We will set devid to NULL because it is a transport 31399 * error. 31400 */ 31401 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 31402 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 31403 31404 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31405 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL, 31406 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31407 DEVID_IF_KNOWN(devid), 31408 "driver-assessment", DATA_TYPE_STRING, 31409 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31410 "op-code", DATA_TYPE_UINT8, op_code, 31411 "cdb", DATA_TYPE_UINT8_ARRAY, 31412 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31413 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31414 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 31415 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31416 NULL); 31417 } else { 31418 /* 31419 * If we got here, we have a completed command, and we need 31420 * to further investigate the sense data to see what kind 31421 * of ereport we should post. 31422 * No ereport is needed if sense-key is KEY_RECOVERABLE_ERROR 31423 * and asc/ascq is "ATA PASS-THROUGH INFORMATION AVAILABLE". 31424 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr if sense-key is 31425 * KEY_MEDIUM_ERROR. 31426 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 31427 * driver-assessment will be set based on the parameter 31428 * drv_assess. 31429 */ 31430 if (senlen > 0) { 31431 /* 31432 * Here we have sense data available. 31433 */ 31434 uint8_t sense_key = scsi_sense_key(sensep); 31435 uint8_t sense_asc = scsi_sense_asc(sensep); 31436 uint8_t sense_ascq = scsi_sense_ascq(sensep); 31437 31438 if (sense_key == KEY_RECOVERABLE_ERROR && 31439 sense_asc == 0x00 && sense_ascq == 0x1d) 31440 return; 31441 31442 if (sense_key == KEY_MEDIUM_ERROR) { 31443 /* 31444 * driver-assessment should be "fatal" if 31445 * drv_assess is SD_FM_DRV_FATAL. 31446 */ 31447 scsi_fm_ereport_post(un->un_sd, 31448 uscsi_path_instance, NULL, 31449 "cmd.disk.dev.rqs.merr", 31450 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, 31451 FM_VERSION, DATA_TYPE_UINT8, 31452 FM_EREPORT_VERS0, 31453 DEVID_IF_KNOWN(devid), 31454 "driver-assessment", 31455 DATA_TYPE_STRING, 31456 drv_assess == SD_FM_DRV_FATAL ? 31457 "fatal" : assessment, 31458 "op-code", 31459 DATA_TYPE_UINT8, op_code, 31460 "cdb", 31461 DATA_TYPE_UINT8_ARRAY, cdblen, 31462 ssc->ssc_uscsi_cmd->uscsi_cdb, 31463 "pkt-reason", 31464 DATA_TYPE_UINT8, uscsi_pkt_reason, 31465 "pkt-state", 31466 DATA_TYPE_UINT8, uscsi_pkt_state, 31467 "pkt-stats", 31468 DATA_TYPE_UINT32, 31469 uscsi_pkt_statistics, 31470 "stat-code", 31471 DATA_TYPE_UINT8, 31472 ssc->ssc_uscsi_cmd->uscsi_status, 31473 "key", 31474 DATA_TYPE_UINT8, 31475 scsi_sense_key(sensep), 31476 "asc", 31477 DATA_TYPE_UINT8, 31478 scsi_sense_asc(sensep), 31479 "ascq", 31480 DATA_TYPE_UINT8, 31481 scsi_sense_ascq(sensep), 31482 "sense-data", 31483 DATA_TYPE_UINT8_ARRAY, 31484 senlen, sensep, 31485 "lba", 31486 DATA_TYPE_UINT64, 31487 ssc->ssc_uscsi_info->ui_lba, 31488 NULL); 31489 } else { 31490 /* 31491 * if sense-key == 0x4(hardware 31492 * error), driver-assessment should 31493 * be "fatal" if drv_assess is 31494 * SD_FM_DRV_FATAL. 31495 */ 31496 scsi_fm_ereport_post(un->un_sd, 31497 uscsi_path_instance, NULL, 31498 "cmd.disk.dev.rqs.derr", 31499 uscsi_ena, devid, 31500 NULL, DDI_NOSLEEP, NULL, 31501 FM_VERSION, 31502 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31503 DEVID_IF_KNOWN(devid), 31504 "driver-assessment", 31505 DATA_TYPE_STRING, 31506 drv_assess == SD_FM_DRV_FATAL ? 31507 (sense_key == 0x4 ? 31508 "fatal" : "fail") : assessment, 31509 "op-code", 31510 DATA_TYPE_UINT8, op_code, 31511 "cdb", 31512 DATA_TYPE_UINT8_ARRAY, cdblen, 31513 ssc->ssc_uscsi_cmd->uscsi_cdb, 31514 "pkt-reason", 31515 DATA_TYPE_UINT8, uscsi_pkt_reason, 31516 "pkt-state", 31517 DATA_TYPE_UINT8, uscsi_pkt_state, 31518 "pkt-stats", 31519 DATA_TYPE_UINT32, 31520 uscsi_pkt_statistics, 31521 "stat-code", 31522 DATA_TYPE_UINT8, 31523 ssc->ssc_uscsi_cmd->uscsi_status, 31524 "key", 31525 DATA_TYPE_UINT8, 31526 scsi_sense_key(sensep), 31527 "asc", 31528 DATA_TYPE_UINT8, 31529 scsi_sense_asc(sensep), 31530 "ascq", 31531 DATA_TYPE_UINT8, 31532 scsi_sense_ascq(sensep), 31533 "sense-data", 31534 DATA_TYPE_UINT8_ARRAY, 31535 senlen, sensep, 31536 NULL); 31537 } 31538 } else { 31539 /* 31540 * For stat_code == STATUS_GOOD, this is not a 31541 * hardware error. 31542 */ 31543 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31544 return; 31545 31546 /* 31547 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31548 * stat-code but with sense data unavailable. 31549 * driver-assessment will be set based on parameter 31550 * drv_assess. 31551 */ 31552 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31553 NULL, 31554 "cmd.disk.dev.serr", uscsi_ena, 31555 devid, NULL, DDI_NOSLEEP, NULL, 31556 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31557 DEVID_IF_KNOWN(devid), 31558 "driver-assessment", DATA_TYPE_STRING, 31559 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31560 "op-code", DATA_TYPE_UINT8, op_code, 31561 "cdb", 31562 DATA_TYPE_UINT8_ARRAY, 31563 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31564 "pkt-reason", 31565 DATA_TYPE_UINT8, uscsi_pkt_reason, 31566 "pkt-state", 31567 DATA_TYPE_UINT8, uscsi_pkt_state, 31568 "pkt-stats", 31569 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31570 "stat-code", 31571 DATA_TYPE_UINT8, 31572 ssc->ssc_uscsi_cmd->uscsi_status, 31573 NULL); 31574 } 31575 } 31576 } 31577 31578 /* 31579 * Function: sd_ssc_extract_info 31580 * 31581 * Description: Extract information available to help generate ereport. 31582 * 31583 * Context: Kernel thread or interrupt context. 31584 */ 31585 static void 31586 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31587 struct buf *bp, struct sd_xbuf *xp) 31588 { 31589 size_t senlen = 0; 31590 union scsi_cdb *cdbp; 31591 int path_instance; 31592 /* 31593 * Need scsi_cdb_size array to determine the cdb length. 31594 */ 31595 extern uchar_t scsi_cdb_size[]; 31596 31597 ASSERT(un != NULL); 31598 ASSERT(pktp != NULL); 31599 ASSERT(bp != NULL); 31600 ASSERT(xp != NULL); 31601 ASSERT(ssc != NULL); 31602 ASSERT(mutex_owned(SD_MUTEX(un))); 31603 31604 /* 31605 * Transfer the cdb buffer pointer here. 31606 */ 31607 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31608 31609 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31610 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31611 31612 /* 31613 * Transfer the sense data buffer pointer if sense data is available, 31614 * calculate the sense data length first. 31615 */ 31616 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31617 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31618 /* 31619 * For arq case, we will enter here. 31620 */ 31621 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31622 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31623 } else { 31624 senlen = SENSE_LENGTH; 31625 } 31626 } else { 31627 /* 31628 * For non-arq case, we will enter this branch. 31629 */ 31630 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31631 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31632 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31633 } 31634 31635 } 31636 31637 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31638 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31639 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31640 31641 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31642 31643 /* 31644 * Only transfer path_instance when scsi_pkt was properly allocated. 31645 */ 31646 path_instance = pktp->pkt_path_instance; 31647 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31648 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31649 else 31650 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31651 31652 /* 31653 * Copy in the other fields we may need when posting ereport. 31654 */ 31655 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31656 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31657 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31658 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31659 31660 /* 31661 * For partially read/write command, we will not create ena 31662 * in case of a successful command be reconized as recovered. 31663 */ 31664 if ((pktp->pkt_reason == CMD_CMPLT) && 31665 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31666 (senlen == 0)) { 31667 return; 31668 } 31669 31670 /* 31671 * To associate ereports of a single command execution flow, we 31672 * need a shared ena for a specific command. 31673 */ 31674 if (xp->xb_ena == 0) 31675 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31676 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31677 } 31678 31679 31680 /* 31681 * Function: sd_check_bdc_vpd 31682 * 31683 * Description: Query the optional INQUIRY VPD page 0xb1. If the device 31684 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION 31685 * RATE. 31686 * 31687 * Set the following based on RPM value: 31688 * = 0 device is not solid state, non-rotational 31689 * = 1 device is solid state, non-rotational 31690 * > 1 device is not solid state, rotational 31691 * 31692 * Context: Kernel thread or interrupt context. 31693 */ 31694 31695 static void 31696 sd_check_bdc_vpd(sd_ssc_t *ssc) 31697 { 31698 int rval = 0; 31699 uchar_t *inqb1 = NULL; 31700 size_t inqb1_len = MAX_INQUIRY_SIZE; 31701 size_t inqb1_resid = 0; 31702 struct sd_lun *un; 31703 31704 ASSERT(ssc != NULL); 31705 un = ssc->ssc_un; 31706 ASSERT(un != NULL); 31707 ASSERT(!mutex_owned(SD_MUTEX(un))); 31708 31709 mutex_enter(SD_MUTEX(un)); 31710 un->un_f_is_rotational = TRUE; 31711 un->un_f_is_solid_state = FALSE; 31712 31713 if (ISCD(un)) { 31714 mutex_exit(SD_MUTEX(un)); 31715 return; 31716 } 31717 31718 if (sd_check_vpd_page_support(ssc) == 0 && 31719 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) { 31720 mutex_exit(SD_MUTEX(un)); 31721 /* collect page b1 data */ 31722 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP); 31723 31724 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len, 31725 0x01, 0xB1, &inqb1_resid); 31726 31727 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) { 31728 SD_TRACE(SD_LOG_COMMON, un, 31729 "sd_check_bdc_vpd: \ 31730 successfully get VPD page: %x \ 31731 PAGE LENGTH: %x BYTE 4: %x \ 31732 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4], 31733 inqb1[5]); 31734 31735 mutex_enter(SD_MUTEX(un)); 31736 /* 31737 * Check the MEDIUM ROTATION RATE. 31738 */ 31739 if (inqb1[4] == 0) { 31740 if (inqb1[5] == 0) { 31741 un->un_f_is_rotational = FALSE; 31742 } else if (inqb1[5] == 1) { 31743 un->un_f_is_rotational = FALSE; 31744 un->un_f_is_solid_state = TRUE; 31745 /* 31746 * Solid state drives don't need 31747 * disksort. 31748 */ 31749 un->un_f_disksort_disabled = TRUE; 31750 } 31751 } 31752 mutex_exit(SD_MUTEX(un)); 31753 } else if (rval != 0) { 31754 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31755 } 31756 31757 kmem_free(inqb1, inqb1_len); 31758 } else { 31759 mutex_exit(SD_MUTEX(un)); 31760 } 31761 } 31762 31763 /* 31764 * Function: sd_check_emulation_mode 31765 * 31766 * Description: Check whether the SSD is at emulation mode 31767 * by issuing READ_CAPACITY_16 to see whether 31768 * we can get physical block size of the drive. 31769 * 31770 * Context: Kernel thread or interrupt context. 31771 */ 31772 31773 static void 31774 sd_check_emulation_mode(sd_ssc_t *ssc) 31775 { 31776 int rval = 0; 31777 uint64_t capacity; 31778 uint_t lbasize; 31779 uint_t pbsize; 31780 int i; 31781 int devid_len; 31782 struct sd_lun *un; 31783 31784 ASSERT(ssc != NULL); 31785 un = ssc->ssc_un; 31786 ASSERT(un != NULL); 31787 ASSERT(!mutex_owned(SD_MUTEX(un))); 31788 31789 mutex_enter(SD_MUTEX(un)); 31790 if (ISCD(un)) { 31791 mutex_exit(SD_MUTEX(un)); 31792 return; 31793 } 31794 31795 if (un->un_f_descr_format_supported) { 31796 mutex_exit(SD_MUTEX(un)); 31797 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 31798 &pbsize, SD_PATH_DIRECT); 31799 mutex_enter(SD_MUTEX(un)); 31800 31801 if (rval != 0) { 31802 un->un_phy_blocksize = DEV_BSIZE; 31803 } else { 31804 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) { 31805 un->un_phy_blocksize = DEV_BSIZE; 31806 } else if (pbsize > un->un_phy_blocksize) { 31807 /* 31808 * Don't reset the physical blocksize 31809 * unless we've detected a larger value. 31810 */ 31811 un->un_phy_blocksize = pbsize; 31812 } 31813 } 31814 } 31815 31816 for (i = 0; i < sd_flash_dev_table_size; i++) { 31817 devid_len = (int)strlen(sd_flash_dev_table[i]); 31818 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len) 31819 == SD_SUCCESS) { 31820 un->un_phy_blocksize = SSD_SECSIZE; 31821 if (un->un_f_is_solid_state && 31822 un->un_phy_blocksize != un->un_tgt_blocksize) 31823 un->un_f_enable_rmw = TRUE; 31824 } 31825 } 31826 31827 mutex_exit(SD_MUTEX(un)); 31828 } 31829